1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check that the argument to __builtin_function_start is a function. 199 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 200 if (checkArgCount(S, TheCall, 1)) 201 return true; 202 203 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 204 if (Arg.isInvalid()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 209 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 210 211 if (!FD) { 212 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 213 << TheCall->getSourceRange(); 214 return true; 215 } 216 217 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 218 TheCall->getBeginLoc()); 219 } 220 221 /// Check the number of arguments and set the result type to 222 /// the argument type. 223 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 224 if (checkArgCount(S, TheCall, 1)) 225 return true; 226 227 TheCall->setType(TheCall->getArg(0)->getType()); 228 return false; 229 } 230 231 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 232 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 233 /// type (but not a function pointer) and that the alignment is a power-of-two. 234 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 235 if (checkArgCount(S, TheCall, 2)) 236 return true; 237 238 clang::Expr *Source = TheCall->getArg(0); 239 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 240 241 auto IsValidIntegerType = [](QualType Ty) { 242 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 243 }; 244 QualType SrcTy = Source->getType(); 245 // We should also be able to use it with arrays (but not functions!). 246 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 247 SrcTy = S.Context.getDecayedType(SrcTy); 248 } 249 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 250 SrcTy->isFunctionPointerType()) { 251 // FIXME: this is not quite the right error message since we don't allow 252 // floating point types, or member pointers. 253 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 254 << SrcTy; 255 return true; 256 } 257 258 clang::Expr *AlignOp = TheCall->getArg(1); 259 if (!IsValidIntegerType(AlignOp->getType())) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 261 << AlignOp->getType(); 262 return true; 263 } 264 Expr::EvalResult AlignResult; 265 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 266 // We can't check validity of alignment if it is value dependent. 267 if (!AlignOp->isValueDependent() && 268 AlignOp->EvaluateAsInt(AlignResult, S.Context, 269 Expr::SE_AllowSideEffects)) { 270 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 271 llvm::APSInt MaxValue( 272 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 273 if (AlignValue < 1) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 275 return true; 276 } 277 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 278 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 279 << toString(MaxValue, 10); 280 return true; 281 } 282 if (!AlignValue.isPowerOf2()) { 283 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 284 return true; 285 } 286 if (AlignValue == 1) { 287 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 288 << IsBooleanAlignBuiltin; 289 } 290 } 291 292 ExprResult SrcArg = S.PerformCopyInitialization( 293 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 294 SourceLocation(), Source); 295 if (SrcArg.isInvalid()) 296 return true; 297 TheCall->setArg(0, SrcArg.get()); 298 ExprResult AlignArg = 299 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 300 S.Context, AlignOp->getType(), false), 301 SourceLocation(), AlignOp); 302 if (AlignArg.isInvalid()) 303 return true; 304 TheCall->setArg(1, AlignArg.get()); 305 // For align_up/align_down, the return type is the same as the (potentially 306 // decayed) argument type including qualifiers. For is_aligned(), the result 307 // is always bool. 308 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 309 return false; 310 } 311 312 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 313 unsigned BuiltinID) { 314 if (checkArgCount(S, TheCall, 3)) 315 return true; 316 317 // First two arguments should be integers. 318 for (unsigned I = 0; I < 2; ++I) { 319 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 320 if (Arg.isInvalid()) return true; 321 TheCall->setArg(I, Arg.get()); 322 323 QualType Ty = Arg.get()->getType(); 324 if (!Ty->isIntegerType()) { 325 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 326 << Ty << Arg.get()->getSourceRange(); 327 return true; 328 } 329 } 330 331 // Third argument should be a pointer to a non-const integer. 332 // IRGen correctly handles volatile, restrict, and address spaces, and 333 // the other qualifiers aren't possible. 334 { 335 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 336 if (Arg.isInvalid()) return true; 337 TheCall->setArg(2, Arg.get()); 338 339 QualType Ty = Arg.get()->getType(); 340 const auto *PtrTy = Ty->getAs<PointerType>(); 341 if (!PtrTy || 342 !PtrTy->getPointeeType()->isIntegerType() || 343 PtrTy->getPointeeType().isConstQualified()) { 344 S.Diag(Arg.get()->getBeginLoc(), 345 diag::err_overflow_builtin_must_be_ptr_int) 346 << Ty << Arg.get()->getSourceRange(); 347 return true; 348 } 349 } 350 351 // Disallow signed bit-precise integer args larger than 128 bits to mul 352 // function until we improve backend support. 353 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 354 for (unsigned I = 0; I < 3; ++I) { 355 const auto Arg = TheCall->getArg(I); 356 // Third argument will be a pointer. 357 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 358 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 359 S.getASTContext().getIntWidth(Ty) > 128) 360 return S.Diag(Arg->getBeginLoc(), 361 diag::err_overflow_builtin_bit_int_max_size) 362 << 128; 363 } 364 } 365 366 return false; 367 } 368 369 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 370 if (checkArgCount(S, BuiltinCall, 2)) 371 return true; 372 373 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 374 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 375 Expr *Call = BuiltinCall->getArg(0); 376 Expr *Chain = BuiltinCall->getArg(1); 377 378 if (Call->getStmtClass() != Stmt::CallExprClass) { 379 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 380 << Call->getSourceRange(); 381 return true; 382 } 383 384 auto CE = cast<CallExpr>(Call); 385 if (CE->getCallee()->getType()->isBlockPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 387 << Call->getSourceRange(); 388 return true; 389 } 390 391 const Decl *TargetDecl = CE->getCalleeDecl(); 392 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 393 if (FD->getBuiltinID()) { 394 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 395 << Call->getSourceRange(); 396 return true; 397 } 398 399 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 400 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 401 << Call->getSourceRange(); 402 return true; 403 } 404 405 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 406 if (ChainResult.isInvalid()) 407 return true; 408 if (!ChainResult.get()->getType()->isPointerType()) { 409 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 410 << Chain->getSourceRange(); 411 return true; 412 } 413 414 QualType ReturnTy = CE->getCallReturnType(S.Context); 415 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 416 QualType BuiltinTy = S.Context.getFunctionType( 417 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 418 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 419 420 Builtin = 421 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 422 423 BuiltinCall->setType(CE->getType()); 424 BuiltinCall->setValueKind(CE->getValueKind()); 425 BuiltinCall->setObjectKind(CE->getObjectKind()); 426 BuiltinCall->setCallee(Builtin); 427 BuiltinCall->setArg(1, ChainResult.get()); 428 429 return false; 430 } 431 432 namespace { 433 434 class ScanfDiagnosticFormatHandler 435 : public analyze_format_string::FormatStringHandler { 436 // Accepts the argument index (relative to the first destination index) of the 437 // argument whose size we want. 438 using ComputeSizeFunction = 439 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 440 441 // Accepts the argument index (relative to the first destination index), the 442 // destination size, and the source size). 443 using DiagnoseFunction = 444 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 445 446 ComputeSizeFunction ComputeSizeArgument; 447 DiagnoseFunction Diagnose; 448 449 public: 450 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 451 DiagnoseFunction Diagnose) 452 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 453 454 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 455 const char *StartSpecifier, 456 unsigned specifierLen) override { 457 if (!FS.consumesDataArgument()) 458 return true; 459 460 unsigned NulByte = 0; 461 switch ((FS.getConversionSpecifier().getKind())) { 462 default: 463 return true; 464 case analyze_format_string::ConversionSpecifier::sArg: 465 case analyze_format_string::ConversionSpecifier::ScanListArg: 466 NulByte = 1; 467 break; 468 case analyze_format_string::ConversionSpecifier::cArg: 469 break; 470 } 471 472 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 473 if (FW.getHowSpecified() != 474 analyze_format_string::OptionalAmount::HowSpecified::Constant) 475 return true; 476 477 unsigned SourceSize = FW.getConstantAmount() + NulByte; 478 479 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 480 if (!DestSizeAPS) 481 return true; 482 483 unsigned DestSize = DestSizeAPS->getZExtValue(); 484 485 if (DestSize < SourceSize) 486 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 487 488 return true; 489 } 490 }; 491 492 class EstimateSizeFormatHandler 493 : public analyze_format_string::FormatStringHandler { 494 size_t Size; 495 496 public: 497 EstimateSizeFormatHandler(StringRef Format) 498 : Size(std::min(Format.find(0), Format.size()) + 499 1 /* null byte always written by sprintf */) {} 500 501 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 502 const char *, unsigned SpecifierLen, 503 const TargetInfo &) override { 504 505 const size_t FieldWidth = computeFieldWidth(FS); 506 const size_t Precision = computePrecision(FS); 507 508 // The actual format. 509 switch (FS.getConversionSpecifier().getKind()) { 510 // Just a char. 511 case analyze_format_string::ConversionSpecifier::cArg: 512 case analyze_format_string::ConversionSpecifier::CArg: 513 Size += std::max(FieldWidth, (size_t)1); 514 break; 515 // Just an integer. 516 case analyze_format_string::ConversionSpecifier::dArg: 517 case analyze_format_string::ConversionSpecifier::DArg: 518 case analyze_format_string::ConversionSpecifier::iArg: 519 case analyze_format_string::ConversionSpecifier::oArg: 520 case analyze_format_string::ConversionSpecifier::OArg: 521 case analyze_format_string::ConversionSpecifier::uArg: 522 case analyze_format_string::ConversionSpecifier::UArg: 523 case analyze_format_string::ConversionSpecifier::xArg: 524 case analyze_format_string::ConversionSpecifier::XArg: 525 Size += std::max(FieldWidth, Precision); 526 break; 527 528 // %g style conversion switches between %f or %e style dynamically. 529 // %f always takes less space, so default to it. 530 case analyze_format_string::ConversionSpecifier::gArg: 531 case analyze_format_string::ConversionSpecifier::GArg: 532 533 // Floating point number in the form '[+]ddd.ddd'. 534 case analyze_format_string::ConversionSpecifier::fArg: 535 case analyze_format_string::ConversionSpecifier::FArg: 536 Size += std::max(FieldWidth, 1 /* integer part */ + 537 (Precision ? 1 + Precision 538 : 0) /* period + decimal */); 539 break; 540 541 // Floating point number in the form '[-]d.ddde[+-]dd'. 542 case analyze_format_string::ConversionSpecifier::eArg: 543 case analyze_format_string::ConversionSpecifier::EArg: 544 Size += 545 std::max(FieldWidth, 546 1 /* integer part */ + 547 (Precision ? 1 + Precision : 0) /* period + decimal */ + 548 1 /* e or E letter */ + 2 /* exponent */); 549 break; 550 551 // Floating point number in the form '[-]0xh.hhhhp±dd'. 552 case analyze_format_string::ConversionSpecifier::aArg: 553 case analyze_format_string::ConversionSpecifier::AArg: 554 Size += 555 std::max(FieldWidth, 556 2 /* 0x */ + 1 /* integer part */ + 557 (Precision ? 1 + Precision : 0) /* period + decimal */ + 558 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 559 break; 560 561 // Just a string. 562 case analyze_format_string::ConversionSpecifier::sArg: 563 case analyze_format_string::ConversionSpecifier::SArg: 564 Size += FieldWidth; 565 break; 566 567 // Just a pointer in the form '0xddd'. 568 case analyze_format_string::ConversionSpecifier::pArg: 569 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 570 break; 571 572 // A plain percent. 573 case analyze_format_string::ConversionSpecifier::PercentArg: 574 Size += 1; 575 break; 576 577 default: 578 break; 579 } 580 581 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 582 583 if (FS.hasAlternativeForm()) { 584 switch (FS.getConversionSpecifier().getKind()) { 585 default: 586 break; 587 // Force a leading '0'. 588 case analyze_format_string::ConversionSpecifier::oArg: 589 Size += 1; 590 break; 591 // Force a leading '0x'. 592 case analyze_format_string::ConversionSpecifier::xArg: 593 case analyze_format_string::ConversionSpecifier::XArg: 594 Size += 2; 595 break; 596 // Force a period '.' before decimal, even if precision is 0. 597 case analyze_format_string::ConversionSpecifier::aArg: 598 case analyze_format_string::ConversionSpecifier::AArg: 599 case analyze_format_string::ConversionSpecifier::eArg: 600 case analyze_format_string::ConversionSpecifier::EArg: 601 case analyze_format_string::ConversionSpecifier::fArg: 602 case analyze_format_string::ConversionSpecifier::FArg: 603 case analyze_format_string::ConversionSpecifier::gArg: 604 case analyze_format_string::ConversionSpecifier::GArg: 605 Size += (Precision ? 0 : 1); 606 break; 607 } 608 } 609 assert(SpecifierLen <= Size && "no underflow"); 610 Size -= SpecifierLen; 611 return true; 612 } 613 614 size_t getSizeLowerBound() const { return Size; } 615 616 private: 617 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 618 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 619 size_t FieldWidth = 0; 620 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 621 FieldWidth = FW.getConstantAmount(); 622 return FieldWidth; 623 } 624 625 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 626 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 627 size_t Precision = 0; 628 629 // See man 3 printf for default precision value based on the specifier. 630 switch (FW.getHowSpecified()) { 631 case analyze_format_string::OptionalAmount::NotSpecified: 632 switch (FS.getConversionSpecifier().getKind()) { 633 default: 634 break; 635 case analyze_format_string::ConversionSpecifier::dArg: // %d 636 case analyze_format_string::ConversionSpecifier::DArg: // %D 637 case analyze_format_string::ConversionSpecifier::iArg: // %i 638 Precision = 1; 639 break; 640 case analyze_format_string::ConversionSpecifier::oArg: // %d 641 case analyze_format_string::ConversionSpecifier::OArg: // %D 642 case analyze_format_string::ConversionSpecifier::uArg: // %d 643 case analyze_format_string::ConversionSpecifier::UArg: // %D 644 case analyze_format_string::ConversionSpecifier::xArg: // %d 645 case analyze_format_string::ConversionSpecifier::XArg: // %D 646 Precision = 1; 647 break; 648 case analyze_format_string::ConversionSpecifier::fArg: // %f 649 case analyze_format_string::ConversionSpecifier::FArg: // %F 650 case analyze_format_string::ConversionSpecifier::eArg: // %e 651 case analyze_format_string::ConversionSpecifier::EArg: // %E 652 case analyze_format_string::ConversionSpecifier::gArg: // %g 653 case analyze_format_string::ConversionSpecifier::GArg: // %G 654 Precision = 6; 655 break; 656 case analyze_format_string::ConversionSpecifier::pArg: // %d 657 Precision = 1; 658 break; 659 } 660 break; 661 case analyze_format_string::OptionalAmount::Constant: 662 Precision = FW.getConstantAmount(); 663 break; 664 default: 665 break; 666 } 667 return Precision; 668 } 669 }; 670 671 } // namespace 672 673 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 674 CallExpr *TheCall) { 675 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 676 isConstantEvaluated()) 677 return; 678 679 bool UseDABAttr = false; 680 const FunctionDecl *UseDecl = FD; 681 682 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 683 if (DABAttr) { 684 UseDecl = DABAttr->getFunction(); 685 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 686 UseDABAttr = true; 687 } 688 689 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 690 691 if (!BuiltinID) 692 return; 693 694 const TargetInfo &TI = getASTContext().getTargetInfo(); 695 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 696 697 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 698 // If we refer to a diagnose_as_builtin attribute, we need to change the 699 // argument index to refer to the arguments of the called function. Unless 700 // the index is out of bounds, which presumably means it's a variadic 701 // function. 702 if (!UseDABAttr) 703 return Index; 704 unsigned DABIndices = DABAttr->argIndices_size(); 705 unsigned NewIndex = Index < DABIndices 706 ? DABAttr->argIndices_begin()[Index] 707 : Index - DABIndices + FD->getNumParams(); 708 if (NewIndex >= TheCall->getNumArgs()) 709 return llvm::None; 710 return NewIndex; 711 }; 712 713 auto ComputeExplicitObjectSizeArgument = 714 [&](unsigned Index) -> Optional<llvm::APSInt> { 715 Optional<unsigned> IndexOptional = TranslateIndex(Index); 716 if (!IndexOptional) 717 return llvm::None; 718 unsigned NewIndex = IndexOptional.getValue(); 719 Expr::EvalResult Result; 720 Expr *SizeArg = TheCall->getArg(NewIndex); 721 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 722 return llvm::None; 723 llvm::APSInt Integer = Result.Val.getInt(); 724 Integer.setIsUnsigned(true); 725 return Integer; 726 }; 727 728 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 729 // If the parameter has a pass_object_size attribute, then we should use its 730 // (potentially) more strict checking mode. Otherwise, conservatively assume 731 // type 0. 732 int BOSType = 0; 733 // This check can fail for variadic functions. 734 if (Index < FD->getNumParams()) { 735 if (const auto *POS = 736 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 737 BOSType = POS->getType(); 738 } 739 740 Optional<unsigned> IndexOptional = TranslateIndex(Index); 741 if (!IndexOptional) 742 return llvm::None; 743 unsigned NewIndex = IndexOptional.getValue(); 744 745 const Expr *ObjArg = TheCall->getArg(NewIndex); 746 uint64_t Result; 747 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 748 return llvm::None; 749 750 // Get the object size in the target's size_t width. 751 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 752 }; 753 754 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 755 Optional<unsigned> IndexOptional = TranslateIndex(Index); 756 if (!IndexOptional) 757 return llvm::None; 758 unsigned NewIndex = IndexOptional.getValue(); 759 760 const Expr *ObjArg = TheCall->getArg(NewIndex); 761 uint64_t Result; 762 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 763 return llvm::None; 764 // Add 1 for null byte. 765 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 766 }; 767 768 Optional<llvm::APSInt> SourceSize; 769 Optional<llvm::APSInt> DestinationSize; 770 unsigned DiagID = 0; 771 bool IsChkVariant = false; 772 773 auto GetFunctionName = [&]() { 774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 775 // Skim off the details of whichever builtin was called to produce a better 776 // diagnostic, as it's unlikely that the user wrote the __builtin 777 // explicitly. 778 if (IsChkVariant) { 779 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 780 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 781 } else if (FunctionName.startswith("__builtin_")) { 782 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 783 } 784 return FunctionName; 785 }; 786 787 switch (BuiltinID) { 788 default: 789 return; 790 case Builtin::BI__builtin_strcpy: 791 case Builtin::BIstrcpy: { 792 DiagID = diag::warn_fortify_strlen_overflow; 793 SourceSize = ComputeStrLenArgument(1); 794 DestinationSize = ComputeSizeArgument(0); 795 break; 796 } 797 798 case Builtin::BI__builtin___strcpy_chk: { 799 DiagID = diag::warn_fortify_strlen_overflow; 800 SourceSize = ComputeStrLenArgument(1); 801 DestinationSize = ComputeExplicitObjectSizeArgument(2); 802 IsChkVariant = true; 803 break; 804 } 805 806 case Builtin::BIscanf: 807 case Builtin::BIfscanf: 808 case Builtin::BIsscanf: { 809 unsigned FormatIndex = 1; 810 unsigned DataIndex = 2; 811 if (BuiltinID == Builtin::BIscanf) { 812 FormatIndex = 0; 813 DataIndex = 1; 814 } 815 816 const auto *FormatExpr = 817 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 818 819 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 820 if (!Format) 821 return; 822 823 if (!Format->isAscii() && !Format->isUTF8()) 824 return; 825 826 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 827 unsigned SourceSize) { 828 DiagID = diag::warn_fortify_scanf_overflow; 829 unsigned Index = ArgIndex + DataIndex; 830 StringRef FunctionName = GetFunctionName(); 831 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 832 PDiag(DiagID) << FunctionName << (Index + 1) 833 << DestSize << SourceSize); 834 }; 835 836 StringRef FormatStrRef = Format->getString(); 837 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 838 return ComputeSizeArgument(Index + DataIndex); 839 }; 840 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 841 const char *FormatBytes = FormatStrRef.data(); 842 const ConstantArrayType *T = 843 Context.getAsConstantArrayType(Format->getType()); 844 assert(T && "String literal not of constant array type!"); 845 size_t TypeSize = T->getSize().getZExtValue(); 846 847 // In case there's a null byte somewhere. 848 size_t StrLen = 849 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 850 851 analyze_format_string::ParseScanfString(H, FormatBytes, 852 FormatBytes + StrLen, getLangOpts(), 853 Context.getTargetInfo()); 854 855 // Unlike the other cases, in this one we have already issued the diagnostic 856 // here, so no need to continue (because unlike the other cases, here the 857 // diagnostic refers to the argument number). 858 return; 859 } 860 861 case Builtin::BIsprintf: 862 case Builtin::BI__builtin___sprintf_chk: { 863 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 864 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 865 866 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 867 868 if (!Format->isAscii() && !Format->isUTF8()) 869 return; 870 871 StringRef FormatStrRef = Format->getString(); 872 EstimateSizeFormatHandler H(FormatStrRef); 873 const char *FormatBytes = FormatStrRef.data(); 874 const ConstantArrayType *T = 875 Context.getAsConstantArrayType(Format->getType()); 876 assert(T && "String literal not of constant array type!"); 877 size_t TypeSize = T->getSize().getZExtValue(); 878 879 // In case there's a null byte somewhere. 880 size_t StrLen = 881 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 882 if (!analyze_format_string::ParsePrintfString( 883 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 884 Context.getTargetInfo(), false)) { 885 DiagID = diag::warn_fortify_source_format_overflow; 886 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 887 .extOrTrunc(SizeTypeWidth); 888 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 889 DestinationSize = ComputeExplicitObjectSizeArgument(2); 890 IsChkVariant = true; 891 } else { 892 DestinationSize = ComputeSizeArgument(0); 893 } 894 break; 895 } 896 } 897 return; 898 } 899 case Builtin::BI__builtin___memcpy_chk: 900 case Builtin::BI__builtin___memmove_chk: 901 case Builtin::BI__builtin___memset_chk: 902 case Builtin::BI__builtin___strlcat_chk: 903 case Builtin::BI__builtin___strlcpy_chk: 904 case Builtin::BI__builtin___strncat_chk: 905 case Builtin::BI__builtin___strncpy_chk: 906 case Builtin::BI__builtin___stpncpy_chk: 907 case Builtin::BI__builtin___memccpy_chk: 908 case Builtin::BI__builtin___mempcpy_chk: { 909 DiagID = diag::warn_builtin_chk_overflow; 910 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 911 DestinationSize = 912 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 913 IsChkVariant = true; 914 break; 915 } 916 917 case Builtin::BI__builtin___snprintf_chk: 918 case Builtin::BI__builtin___vsnprintf_chk: { 919 DiagID = diag::warn_builtin_chk_overflow; 920 SourceSize = ComputeExplicitObjectSizeArgument(1); 921 DestinationSize = ComputeExplicitObjectSizeArgument(3); 922 IsChkVariant = true; 923 break; 924 } 925 926 case Builtin::BIstrncat: 927 case Builtin::BI__builtin_strncat: 928 case Builtin::BIstrncpy: 929 case Builtin::BI__builtin_strncpy: 930 case Builtin::BIstpncpy: 931 case Builtin::BI__builtin_stpncpy: { 932 // Whether these functions overflow depends on the runtime strlen of the 933 // string, not just the buffer size, so emitting the "always overflow" 934 // diagnostic isn't quite right. We should still diagnose passing a buffer 935 // size larger than the destination buffer though; this is a runtime abort 936 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 937 DiagID = diag::warn_fortify_source_size_mismatch; 938 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 939 DestinationSize = ComputeSizeArgument(0); 940 break; 941 } 942 943 case Builtin::BImemcpy: 944 case Builtin::BI__builtin_memcpy: 945 case Builtin::BImemmove: 946 case Builtin::BI__builtin_memmove: 947 case Builtin::BImemset: 948 case Builtin::BI__builtin_memset: 949 case Builtin::BImempcpy: 950 case Builtin::BI__builtin_mempcpy: { 951 DiagID = diag::warn_fortify_source_overflow; 952 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 953 DestinationSize = ComputeSizeArgument(0); 954 break; 955 } 956 case Builtin::BIsnprintf: 957 case Builtin::BI__builtin_snprintf: 958 case Builtin::BIvsnprintf: 959 case Builtin::BI__builtin_vsnprintf: { 960 DiagID = diag::warn_fortify_source_size_mismatch; 961 SourceSize = ComputeExplicitObjectSizeArgument(1); 962 DestinationSize = ComputeSizeArgument(0); 963 break; 964 } 965 } 966 967 if (!SourceSize || !DestinationSize || 968 llvm::APSInt::compareValues(SourceSize.getValue(), 969 DestinationSize.getValue()) <= 0) 970 return; 971 972 StringRef FunctionName = GetFunctionName(); 973 974 SmallString<16> DestinationStr; 975 SmallString<16> SourceStr; 976 DestinationSize->toString(DestinationStr, /*Radix=*/10); 977 SourceSize->toString(SourceStr, /*Radix=*/10); 978 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 979 PDiag(DiagID) 980 << FunctionName << DestinationStr << SourceStr); 981 } 982 983 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 984 Scope::ScopeFlags NeededScopeFlags, 985 unsigned DiagID) { 986 // Scopes aren't available during instantiation. Fortunately, builtin 987 // functions cannot be template args so they cannot be formed through template 988 // instantiation. Therefore checking once during the parse is sufficient. 989 if (SemaRef.inTemplateInstantiation()) 990 return false; 991 992 Scope *S = SemaRef.getCurScope(); 993 while (S && !S->isSEHExceptScope()) 994 S = S->getParent(); 995 if (!S || !(S->getFlags() & NeededScopeFlags)) { 996 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 997 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 998 << DRE->getDecl()->getIdentifier(); 999 return true; 1000 } 1001 1002 return false; 1003 } 1004 1005 static inline bool isBlockPointer(Expr *Arg) { 1006 return Arg->getType()->isBlockPointerType(); 1007 } 1008 1009 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1010 /// void*, which is a requirement of device side enqueue. 1011 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1012 const BlockPointerType *BPT = 1013 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1014 ArrayRef<QualType> Params = 1015 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1016 unsigned ArgCounter = 0; 1017 bool IllegalParams = false; 1018 // Iterate through the block parameters until either one is found that is not 1019 // a local void*, or the block is valid. 1020 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1021 I != E; ++I, ++ArgCounter) { 1022 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1023 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1024 LangAS::opencl_local) { 1025 // Get the location of the error. If a block literal has been passed 1026 // (BlockExpr) then we can point straight to the offending argument, 1027 // else we just point to the variable reference. 1028 SourceLocation ErrorLoc; 1029 if (isa<BlockExpr>(BlockArg)) { 1030 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1031 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1032 } else if (isa<DeclRefExpr>(BlockArg)) { 1033 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1034 } 1035 S.Diag(ErrorLoc, 1036 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1037 IllegalParams = true; 1038 } 1039 } 1040 1041 return IllegalParams; 1042 } 1043 1044 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1045 // OpenCL device can support extension but not the feature as extension 1046 // requires subgroup independent forward progress, but subgroup independent 1047 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1048 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1049 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1050 S.getLangOpts())) { 1051 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1052 << 1 << Call->getDirectCallee() 1053 << "cl_khr_subgroups or __opencl_c_subgroups"; 1054 return true; 1055 } 1056 return false; 1057 } 1058 1059 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1060 if (checkArgCount(S, TheCall, 2)) 1061 return true; 1062 1063 if (checkOpenCLSubgroupExt(S, TheCall)) 1064 return true; 1065 1066 // First argument is an ndrange_t type. 1067 Expr *NDRangeArg = TheCall->getArg(0); 1068 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1069 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1070 << TheCall->getDirectCallee() << "'ndrange_t'"; 1071 return true; 1072 } 1073 1074 Expr *BlockArg = TheCall->getArg(1); 1075 if (!isBlockPointer(BlockArg)) { 1076 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1077 << TheCall->getDirectCallee() << "block"; 1078 return true; 1079 } 1080 return checkOpenCLBlockArgs(S, BlockArg); 1081 } 1082 1083 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1084 /// get_kernel_work_group_size 1085 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1086 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1087 if (checkArgCount(S, TheCall, 1)) 1088 return true; 1089 1090 Expr *BlockArg = TheCall->getArg(0); 1091 if (!isBlockPointer(BlockArg)) { 1092 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1093 << TheCall->getDirectCallee() << "block"; 1094 return true; 1095 } 1096 return checkOpenCLBlockArgs(S, BlockArg); 1097 } 1098 1099 /// Diagnose integer type and any valid implicit conversion to it. 1100 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1101 const QualType &IntType); 1102 1103 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1104 unsigned Start, unsigned End) { 1105 bool IllegalParams = false; 1106 for (unsigned I = Start; I <= End; ++I) 1107 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1108 S.Context.getSizeType()); 1109 return IllegalParams; 1110 } 1111 1112 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1113 /// 'local void*' parameter of passed block. 1114 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1115 Expr *BlockArg, 1116 unsigned NumNonVarArgs) { 1117 const BlockPointerType *BPT = 1118 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1119 unsigned NumBlockParams = 1120 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1121 unsigned TotalNumArgs = TheCall->getNumArgs(); 1122 1123 // For each argument passed to the block, a corresponding uint needs to 1124 // be passed to describe the size of the local memory. 1125 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1126 S.Diag(TheCall->getBeginLoc(), 1127 diag::err_opencl_enqueue_kernel_local_size_args); 1128 return true; 1129 } 1130 1131 // Check that the sizes of the local memory are specified by integers. 1132 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1133 TotalNumArgs - 1); 1134 } 1135 1136 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1137 /// overload formats specified in Table 6.13.17.1. 1138 /// int enqueue_kernel(queue_t queue, 1139 /// kernel_enqueue_flags_t flags, 1140 /// const ndrange_t ndrange, 1141 /// void (^block)(void)) 1142 /// int enqueue_kernel(queue_t queue, 1143 /// kernel_enqueue_flags_t flags, 1144 /// const ndrange_t ndrange, 1145 /// uint num_events_in_wait_list, 1146 /// clk_event_t *event_wait_list, 1147 /// clk_event_t *event_ret, 1148 /// void (^block)(void)) 1149 /// int enqueue_kernel(queue_t queue, 1150 /// kernel_enqueue_flags_t flags, 1151 /// const ndrange_t ndrange, 1152 /// void (^block)(local void*, ...), 1153 /// uint size0, ...) 1154 /// int enqueue_kernel(queue_t queue, 1155 /// kernel_enqueue_flags_t flags, 1156 /// const ndrange_t ndrange, 1157 /// uint num_events_in_wait_list, 1158 /// clk_event_t *event_wait_list, 1159 /// clk_event_t *event_ret, 1160 /// void (^block)(local void*, ...), 1161 /// uint size0, ...) 1162 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1163 unsigned NumArgs = TheCall->getNumArgs(); 1164 1165 if (NumArgs < 4) { 1166 S.Diag(TheCall->getBeginLoc(), 1167 diag::err_typecheck_call_too_few_args_at_least) 1168 << 0 << 4 << NumArgs; 1169 return true; 1170 } 1171 1172 Expr *Arg0 = TheCall->getArg(0); 1173 Expr *Arg1 = TheCall->getArg(1); 1174 Expr *Arg2 = TheCall->getArg(2); 1175 Expr *Arg3 = TheCall->getArg(3); 1176 1177 // First argument always needs to be a queue_t type. 1178 if (!Arg0->getType()->isQueueT()) { 1179 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1180 diag::err_opencl_builtin_expected_type) 1181 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1182 return true; 1183 } 1184 1185 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1186 if (!Arg1->getType()->isIntegerType()) { 1187 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1188 diag::err_opencl_builtin_expected_type) 1189 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1190 return true; 1191 } 1192 1193 // Third argument is always an ndrange_t type. 1194 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1195 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1196 diag::err_opencl_builtin_expected_type) 1197 << TheCall->getDirectCallee() << "'ndrange_t'"; 1198 return true; 1199 } 1200 1201 // With four arguments, there is only one form that the function could be 1202 // called in: no events and no variable arguments. 1203 if (NumArgs == 4) { 1204 // check that the last argument is the right block type. 1205 if (!isBlockPointer(Arg3)) { 1206 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1207 << TheCall->getDirectCallee() << "block"; 1208 return true; 1209 } 1210 // we have a block type, check the prototype 1211 const BlockPointerType *BPT = 1212 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1213 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1214 S.Diag(Arg3->getBeginLoc(), 1215 diag::err_opencl_enqueue_kernel_blocks_no_args); 1216 return true; 1217 } 1218 return false; 1219 } 1220 // we can have block + varargs. 1221 if (isBlockPointer(Arg3)) 1222 return (checkOpenCLBlockArgs(S, Arg3) || 1223 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1224 // last two cases with either exactly 7 args or 7 args and varargs. 1225 if (NumArgs >= 7) { 1226 // check common block argument. 1227 Expr *Arg6 = TheCall->getArg(6); 1228 if (!isBlockPointer(Arg6)) { 1229 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1230 << TheCall->getDirectCallee() << "block"; 1231 return true; 1232 } 1233 if (checkOpenCLBlockArgs(S, Arg6)) 1234 return true; 1235 1236 // Forth argument has to be any integer type. 1237 if (!Arg3->getType()->isIntegerType()) { 1238 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1239 diag::err_opencl_builtin_expected_type) 1240 << TheCall->getDirectCallee() << "integer"; 1241 return true; 1242 } 1243 // check remaining common arguments. 1244 Expr *Arg4 = TheCall->getArg(4); 1245 Expr *Arg5 = TheCall->getArg(5); 1246 1247 // Fifth argument is always passed as a pointer to clk_event_t. 1248 if (!Arg4->isNullPointerConstant(S.Context, 1249 Expr::NPC_ValueDependentIsNotNull) && 1250 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1251 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1252 diag::err_opencl_builtin_expected_type) 1253 << TheCall->getDirectCallee() 1254 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1255 return true; 1256 } 1257 1258 // Sixth argument is always passed as a pointer to clk_event_t. 1259 if (!Arg5->isNullPointerConstant(S.Context, 1260 Expr::NPC_ValueDependentIsNotNull) && 1261 !(Arg5->getType()->isPointerType() && 1262 Arg5->getType()->getPointeeType()->isClkEventT())) { 1263 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1264 diag::err_opencl_builtin_expected_type) 1265 << TheCall->getDirectCallee() 1266 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1267 return true; 1268 } 1269 1270 if (NumArgs == 7) 1271 return false; 1272 1273 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1274 } 1275 1276 // None of the specific case has been detected, give generic error 1277 S.Diag(TheCall->getBeginLoc(), 1278 diag::err_opencl_enqueue_kernel_incorrect_args); 1279 return true; 1280 } 1281 1282 /// Returns OpenCL access qual. 1283 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1284 return D->getAttr<OpenCLAccessAttr>(); 1285 } 1286 1287 /// Returns true if pipe element type is different from the pointer. 1288 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1289 const Expr *Arg0 = Call->getArg(0); 1290 // First argument type should always be pipe. 1291 if (!Arg0->getType()->isPipeType()) { 1292 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1293 << Call->getDirectCallee() << Arg0->getSourceRange(); 1294 return true; 1295 } 1296 OpenCLAccessAttr *AccessQual = 1297 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1298 // Validates the access qualifier is compatible with the call. 1299 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1300 // read_only and write_only, and assumed to be read_only if no qualifier is 1301 // specified. 1302 switch (Call->getDirectCallee()->getBuiltinID()) { 1303 case Builtin::BIread_pipe: 1304 case Builtin::BIreserve_read_pipe: 1305 case Builtin::BIcommit_read_pipe: 1306 case Builtin::BIwork_group_reserve_read_pipe: 1307 case Builtin::BIsub_group_reserve_read_pipe: 1308 case Builtin::BIwork_group_commit_read_pipe: 1309 case Builtin::BIsub_group_commit_read_pipe: 1310 if (!(!AccessQual || AccessQual->isReadOnly())) { 1311 S.Diag(Arg0->getBeginLoc(), 1312 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1313 << "read_only" << Arg0->getSourceRange(); 1314 return true; 1315 } 1316 break; 1317 case Builtin::BIwrite_pipe: 1318 case Builtin::BIreserve_write_pipe: 1319 case Builtin::BIcommit_write_pipe: 1320 case Builtin::BIwork_group_reserve_write_pipe: 1321 case Builtin::BIsub_group_reserve_write_pipe: 1322 case Builtin::BIwork_group_commit_write_pipe: 1323 case Builtin::BIsub_group_commit_write_pipe: 1324 if (!(AccessQual && AccessQual->isWriteOnly())) { 1325 S.Diag(Arg0->getBeginLoc(), 1326 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1327 << "write_only" << Arg0->getSourceRange(); 1328 return true; 1329 } 1330 break; 1331 default: 1332 break; 1333 } 1334 return false; 1335 } 1336 1337 /// Returns true if pipe element type is different from the pointer. 1338 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1339 const Expr *Arg0 = Call->getArg(0); 1340 const Expr *ArgIdx = Call->getArg(Idx); 1341 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1342 const QualType EltTy = PipeTy->getElementType(); 1343 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1344 // The Idx argument should be a pointer and the type of the pointer and 1345 // the type of pipe element should also be the same. 1346 if (!ArgTy || 1347 !S.Context.hasSameType( 1348 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1349 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1350 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1351 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1352 return true; 1353 } 1354 return false; 1355 } 1356 1357 // Performs semantic analysis for the read/write_pipe call. 1358 // \param S Reference to the semantic analyzer. 1359 // \param Call A pointer to the builtin call. 1360 // \return True if a semantic error has been found, false otherwise. 1361 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1362 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1363 // functions have two forms. 1364 switch (Call->getNumArgs()) { 1365 case 2: 1366 if (checkOpenCLPipeArg(S, Call)) 1367 return true; 1368 // The call with 2 arguments should be 1369 // read/write_pipe(pipe T, T*). 1370 // Check packet type T. 1371 if (checkOpenCLPipePacketType(S, Call, 1)) 1372 return true; 1373 break; 1374 1375 case 4: { 1376 if (checkOpenCLPipeArg(S, Call)) 1377 return true; 1378 // The call with 4 arguments should be 1379 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1380 // Check reserve_id_t. 1381 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1382 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1383 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1384 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1385 return true; 1386 } 1387 1388 // Check the index. 1389 const Expr *Arg2 = Call->getArg(2); 1390 if (!Arg2->getType()->isIntegerType() && 1391 !Arg2->getType()->isUnsignedIntegerType()) { 1392 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1393 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1394 << Arg2->getType() << Arg2->getSourceRange(); 1395 return true; 1396 } 1397 1398 // Check packet type T. 1399 if (checkOpenCLPipePacketType(S, Call, 3)) 1400 return true; 1401 } break; 1402 default: 1403 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1404 << Call->getDirectCallee() << Call->getSourceRange(); 1405 return true; 1406 } 1407 1408 return false; 1409 } 1410 1411 // Performs a semantic analysis on the {work_group_/sub_group_ 1412 // /_}reserve_{read/write}_pipe 1413 // \param S Reference to the semantic analyzer. 1414 // \param Call The call to the builtin function to be analyzed. 1415 // \return True if a semantic error was found, false otherwise. 1416 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1417 if (checkArgCount(S, Call, 2)) 1418 return true; 1419 1420 if (checkOpenCLPipeArg(S, Call)) 1421 return true; 1422 1423 // Check the reserve size. 1424 if (!Call->getArg(1)->getType()->isIntegerType() && 1425 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1426 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1427 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1428 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1429 return true; 1430 } 1431 1432 // Since return type of reserve_read/write_pipe built-in function is 1433 // reserve_id_t, which is not defined in the builtin def file , we used int 1434 // as return type and need to override the return type of these functions. 1435 Call->setType(S.Context.OCLReserveIDTy); 1436 1437 return false; 1438 } 1439 1440 // Performs a semantic analysis on {work_group_/sub_group_ 1441 // /_}commit_{read/write}_pipe 1442 // \param S Reference to the semantic analyzer. 1443 // \param Call The call to the builtin function to be analyzed. 1444 // \return True if a semantic error was found, false otherwise. 1445 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1446 if (checkArgCount(S, Call, 2)) 1447 return true; 1448 1449 if (checkOpenCLPipeArg(S, Call)) 1450 return true; 1451 1452 // Check reserve_id_t. 1453 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1454 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1455 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1456 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1457 return true; 1458 } 1459 1460 return false; 1461 } 1462 1463 // Performs a semantic analysis on the call to built-in Pipe 1464 // Query Functions. 1465 // \param S Reference to the semantic analyzer. 1466 // \param Call The call to the builtin function to be analyzed. 1467 // \return True if a semantic error was found, false otherwise. 1468 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1469 if (checkArgCount(S, Call, 1)) 1470 return true; 1471 1472 if (!Call->getArg(0)->getType()->isPipeType()) { 1473 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1474 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1475 return true; 1476 } 1477 1478 return false; 1479 } 1480 1481 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1482 // Performs semantic analysis for the to_global/local/private call. 1483 // \param S Reference to the semantic analyzer. 1484 // \param BuiltinID ID of the builtin function. 1485 // \param Call A pointer to the builtin call. 1486 // \return True if a semantic error has been found, false otherwise. 1487 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1488 CallExpr *Call) { 1489 if (checkArgCount(S, Call, 1)) 1490 return true; 1491 1492 auto RT = Call->getArg(0)->getType(); 1493 if (!RT->isPointerType() || RT->getPointeeType() 1494 .getAddressSpace() == LangAS::opencl_constant) { 1495 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1496 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1497 return true; 1498 } 1499 1500 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1501 S.Diag(Call->getArg(0)->getBeginLoc(), 1502 diag::warn_opencl_generic_address_space_arg) 1503 << Call->getDirectCallee()->getNameInfo().getAsString() 1504 << Call->getArg(0)->getSourceRange(); 1505 } 1506 1507 RT = RT->getPointeeType(); 1508 auto Qual = RT.getQualifiers(); 1509 switch (BuiltinID) { 1510 case Builtin::BIto_global: 1511 Qual.setAddressSpace(LangAS::opencl_global); 1512 break; 1513 case Builtin::BIto_local: 1514 Qual.setAddressSpace(LangAS::opencl_local); 1515 break; 1516 case Builtin::BIto_private: 1517 Qual.setAddressSpace(LangAS::opencl_private); 1518 break; 1519 default: 1520 llvm_unreachable("Invalid builtin function"); 1521 } 1522 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1523 RT.getUnqualifiedType(), Qual))); 1524 1525 return false; 1526 } 1527 1528 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1529 if (checkArgCount(S, TheCall, 1)) 1530 return ExprError(); 1531 1532 // Compute __builtin_launder's parameter type from the argument. 1533 // The parameter type is: 1534 // * The type of the argument if it's not an array or function type, 1535 // Otherwise, 1536 // * The decayed argument type. 1537 QualType ParamTy = [&]() { 1538 QualType ArgTy = TheCall->getArg(0)->getType(); 1539 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1540 return S.Context.getPointerType(Ty->getElementType()); 1541 if (ArgTy->isFunctionType()) { 1542 return S.Context.getPointerType(ArgTy); 1543 } 1544 return ArgTy; 1545 }(); 1546 1547 TheCall->setType(ParamTy); 1548 1549 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1550 if (!ParamTy->isPointerType()) 1551 return 0; 1552 if (ParamTy->isFunctionPointerType()) 1553 return 1; 1554 if (ParamTy->isVoidPointerType()) 1555 return 2; 1556 return llvm::Optional<unsigned>{}; 1557 }(); 1558 if (DiagSelect.hasValue()) { 1559 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1560 << DiagSelect.getValue() << TheCall->getSourceRange(); 1561 return ExprError(); 1562 } 1563 1564 // We either have an incomplete class type, or we have a class template 1565 // whose instantiation has not been forced. Example: 1566 // 1567 // template <class T> struct Foo { T value; }; 1568 // Foo<int> *p = nullptr; 1569 // auto *d = __builtin_launder(p); 1570 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1571 diag::err_incomplete_type)) 1572 return ExprError(); 1573 1574 assert(ParamTy->getPointeeType()->isObjectType() && 1575 "Unhandled non-object pointer case"); 1576 1577 InitializedEntity Entity = 1578 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1579 ExprResult Arg = 1580 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1581 if (Arg.isInvalid()) 1582 return ExprError(); 1583 TheCall->setArg(0, Arg.get()); 1584 1585 return TheCall; 1586 } 1587 1588 // Emit an error and return true if the current object format type is in the 1589 // list of unsupported types. 1590 static bool CheckBuiltinTargetNotInUnsupported( 1591 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1592 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1593 llvm::Triple::ObjectFormatType CurObjFormat = 1594 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1595 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1596 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1597 << TheCall->getSourceRange(); 1598 return true; 1599 } 1600 return false; 1601 } 1602 1603 // Emit an error and return true if the current architecture is not in the list 1604 // of supported architectures. 1605 static bool 1606 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1607 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1608 llvm::Triple::ArchType CurArch = 1609 S.getASTContext().getTargetInfo().getTriple().getArch(); 1610 if (llvm::is_contained(SupportedArchs, CurArch)) 1611 return false; 1612 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1613 << TheCall->getSourceRange(); 1614 return true; 1615 } 1616 1617 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1618 SourceLocation CallSiteLoc); 1619 1620 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1621 CallExpr *TheCall) { 1622 switch (TI.getTriple().getArch()) { 1623 default: 1624 // Some builtins don't require additional checking, so just consider these 1625 // acceptable. 1626 return false; 1627 case llvm::Triple::arm: 1628 case llvm::Triple::armeb: 1629 case llvm::Triple::thumb: 1630 case llvm::Triple::thumbeb: 1631 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1632 case llvm::Triple::aarch64: 1633 case llvm::Triple::aarch64_32: 1634 case llvm::Triple::aarch64_be: 1635 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1636 case llvm::Triple::bpfeb: 1637 case llvm::Triple::bpfel: 1638 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1639 case llvm::Triple::hexagon: 1640 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1641 case llvm::Triple::mips: 1642 case llvm::Triple::mipsel: 1643 case llvm::Triple::mips64: 1644 case llvm::Triple::mips64el: 1645 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1646 case llvm::Triple::systemz: 1647 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1648 case llvm::Triple::x86: 1649 case llvm::Triple::x86_64: 1650 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1651 case llvm::Triple::ppc: 1652 case llvm::Triple::ppcle: 1653 case llvm::Triple::ppc64: 1654 case llvm::Triple::ppc64le: 1655 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1656 case llvm::Triple::amdgcn: 1657 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1658 case llvm::Triple::riscv32: 1659 case llvm::Triple::riscv64: 1660 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1661 } 1662 } 1663 1664 ExprResult 1665 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1666 CallExpr *TheCall) { 1667 ExprResult TheCallResult(TheCall); 1668 1669 // Find out if any arguments are required to be integer constant expressions. 1670 unsigned ICEArguments = 0; 1671 ASTContext::GetBuiltinTypeError Error; 1672 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1673 if (Error != ASTContext::GE_None) 1674 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1675 1676 // If any arguments are required to be ICE's, check and diagnose. 1677 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1678 // Skip arguments not required to be ICE's. 1679 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1680 1681 llvm::APSInt Result; 1682 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1683 return true; 1684 ICEArguments &= ~(1 << ArgNo); 1685 } 1686 1687 switch (BuiltinID) { 1688 case Builtin::BI__builtin___CFStringMakeConstantString: 1689 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 1690 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 1691 if (CheckBuiltinTargetNotInUnsupported( 1692 *this, BuiltinID, TheCall, 1693 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 1694 return ExprError(); 1695 assert(TheCall->getNumArgs() == 1 && 1696 "Wrong # arguments to builtin CFStringMakeConstantString"); 1697 if (CheckObjCString(TheCall->getArg(0))) 1698 return ExprError(); 1699 break; 1700 case Builtin::BI__builtin_ms_va_start: 1701 case Builtin::BI__builtin_stdarg_start: 1702 case Builtin::BI__builtin_va_start: 1703 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1704 return ExprError(); 1705 break; 1706 case Builtin::BI__va_start: { 1707 switch (Context.getTargetInfo().getTriple().getArch()) { 1708 case llvm::Triple::aarch64: 1709 case llvm::Triple::arm: 1710 case llvm::Triple::thumb: 1711 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1712 return ExprError(); 1713 break; 1714 default: 1715 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1716 return ExprError(); 1717 break; 1718 } 1719 break; 1720 } 1721 1722 // The acquire, release, and no fence variants are ARM and AArch64 only. 1723 case Builtin::BI_interlockedbittestandset_acq: 1724 case Builtin::BI_interlockedbittestandset_rel: 1725 case Builtin::BI_interlockedbittestandset_nf: 1726 case Builtin::BI_interlockedbittestandreset_acq: 1727 case Builtin::BI_interlockedbittestandreset_rel: 1728 case Builtin::BI_interlockedbittestandreset_nf: 1729 if (CheckBuiltinTargetInSupported( 1730 *this, BuiltinID, TheCall, 1731 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1732 return ExprError(); 1733 break; 1734 1735 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1736 case Builtin::BI_bittest64: 1737 case Builtin::BI_bittestandcomplement64: 1738 case Builtin::BI_bittestandreset64: 1739 case Builtin::BI_bittestandset64: 1740 case Builtin::BI_interlockedbittestandreset64: 1741 case Builtin::BI_interlockedbittestandset64: 1742 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 1743 {llvm::Triple::x86_64, llvm::Triple::arm, 1744 llvm::Triple::thumb, 1745 llvm::Triple::aarch64})) 1746 return ExprError(); 1747 break; 1748 1749 case Builtin::BI__builtin_isgreater: 1750 case Builtin::BI__builtin_isgreaterequal: 1751 case Builtin::BI__builtin_isless: 1752 case Builtin::BI__builtin_islessequal: 1753 case Builtin::BI__builtin_islessgreater: 1754 case Builtin::BI__builtin_isunordered: 1755 if (SemaBuiltinUnorderedCompare(TheCall)) 1756 return ExprError(); 1757 break; 1758 case Builtin::BI__builtin_fpclassify: 1759 if (SemaBuiltinFPClassification(TheCall, 6)) 1760 return ExprError(); 1761 break; 1762 case Builtin::BI__builtin_isfinite: 1763 case Builtin::BI__builtin_isinf: 1764 case Builtin::BI__builtin_isinf_sign: 1765 case Builtin::BI__builtin_isnan: 1766 case Builtin::BI__builtin_isnormal: 1767 case Builtin::BI__builtin_signbit: 1768 case Builtin::BI__builtin_signbitf: 1769 case Builtin::BI__builtin_signbitl: 1770 if (SemaBuiltinFPClassification(TheCall, 1)) 1771 return ExprError(); 1772 break; 1773 case Builtin::BI__builtin_shufflevector: 1774 return SemaBuiltinShuffleVector(TheCall); 1775 // TheCall will be freed by the smart pointer here, but that's fine, since 1776 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1777 case Builtin::BI__builtin_prefetch: 1778 if (SemaBuiltinPrefetch(TheCall)) 1779 return ExprError(); 1780 break; 1781 case Builtin::BI__builtin_alloca_with_align: 1782 case Builtin::BI__builtin_alloca_with_align_uninitialized: 1783 if (SemaBuiltinAllocaWithAlign(TheCall)) 1784 return ExprError(); 1785 LLVM_FALLTHROUGH; 1786 case Builtin::BI__builtin_alloca: 1787 case Builtin::BI__builtin_alloca_uninitialized: 1788 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1789 << TheCall->getDirectCallee(); 1790 break; 1791 case Builtin::BI__arithmetic_fence: 1792 if (SemaBuiltinArithmeticFence(TheCall)) 1793 return ExprError(); 1794 break; 1795 case Builtin::BI__assume: 1796 case Builtin::BI__builtin_assume: 1797 if (SemaBuiltinAssume(TheCall)) 1798 return ExprError(); 1799 break; 1800 case Builtin::BI__builtin_assume_aligned: 1801 if (SemaBuiltinAssumeAligned(TheCall)) 1802 return ExprError(); 1803 break; 1804 case Builtin::BI__builtin_dynamic_object_size: 1805 case Builtin::BI__builtin_object_size: 1806 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1807 return ExprError(); 1808 break; 1809 case Builtin::BI__builtin_longjmp: 1810 if (SemaBuiltinLongjmp(TheCall)) 1811 return ExprError(); 1812 break; 1813 case Builtin::BI__builtin_setjmp: 1814 if (SemaBuiltinSetjmp(TheCall)) 1815 return ExprError(); 1816 break; 1817 case Builtin::BI__builtin_classify_type: 1818 if (checkArgCount(*this, TheCall, 1)) return true; 1819 TheCall->setType(Context.IntTy); 1820 break; 1821 case Builtin::BI__builtin_complex: 1822 if (SemaBuiltinComplex(TheCall)) 1823 return ExprError(); 1824 break; 1825 case Builtin::BI__builtin_constant_p: { 1826 if (checkArgCount(*this, TheCall, 1)) return true; 1827 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1828 if (Arg.isInvalid()) return true; 1829 TheCall->setArg(0, Arg.get()); 1830 TheCall->setType(Context.IntTy); 1831 break; 1832 } 1833 case Builtin::BI__builtin_launder: 1834 return SemaBuiltinLaunder(*this, TheCall); 1835 case Builtin::BI__sync_fetch_and_add: 1836 case Builtin::BI__sync_fetch_and_add_1: 1837 case Builtin::BI__sync_fetch_and_add_2: 1838 case Builtin::BI__sync_fetch_and_add_4: 1839 case Builtin::BI__sync_fetch_and_add_8: 1840 case Builtin::BI__sync_fetch_and_add_16: 1841 case Builtin::BI__sync_fetch_and_sub: 1842 case Builtin::BI__sync_fetch_and_sub_1: 1843 case Builtin::BI__sync_fetch_and_sub_2: 1844 case Builtin::BI__sync_fetch_and_sub_4: 1845 case Builtin::BI__sync_fetch_and_sub_8: 1846 case Builtin::BI__sync_fetch_and_sub_16: 1847 case Builtin::BI__sync_fetch_and_or: 1848 case Builtin::BI__sync_fetch_and_or_1: 1849 case Builtin::BI__sync_fetch_and_or_2: 1850 case Builtin::BI__sync_fetch_and_or_4: 1851 case Builtin::BI__sync_fetch_and_or_8: 1852 case Builtin::BI__sync_fetch_and_or_16: 1853 case Builtin::BI__sync_fetch_and_and: 1854 case Builtin::BI__sync_fetch_and_and_1: 1855 case Builtin::BI__sync_fetch_and_and_2: 1856 case Builtin::BI__sync_fetch_and_and_4: 1857 case Builtin::BI__sync_fetch_and_and_8: 1858 case Builtin::BI__sync_fetch_and_and_16: 1859 case Builtin::BI__sync_fetch_and_xor: 1860 case Builtin::BI__sync_fetch_and_xor_1: 1861 case Builtin::BI__sync_fetch_and_xor_2: 1862 case Builtin::BI__sync_fetch_and_xor_4: 1863 case Builtin::BI__sync_fetch_and_xor_8: 1864 case Builtin::BI__sync_fetch_and_xor_16: 1865 case Builtin::BI__sync_fetch_and_nand: 1866 case Builtin::BI__sync_fetch_and_nand_1: 1867 case Builtin::BI__sync_fetch_and_nand_2: 1868 case Builtin::BI__sync_fetch_and_nand_4: 1869 case Builtin::BI__sync_fetch_and_nand_8: 1870 case Builtin::BI__sync_fetch_and_nand_16: 1871 case Builtin::BI__sync_add_and_fetch: 1872 case Builtin::BI__sync_add_and_fetch_1: 1873 case Builtin::BI__sync_add_and_fetch_2: 1874 case Builtin::BI__sync_add_and_fetch_4: 1875 case Builtin::BI__sync_add_and_fetch_8: 1876 case Builtin::BI__sync_add_and_fetch_16: 1877 case Builtin::BI__sync_sub_and_fetch: 1878 case Builtin::BI__sync_sub_and_fetch_1: 1879 case Builtin::BI__sync_sub_and_fetch_2: 1880 case Builtin::BI__sync_sub_and_fetch_4: 1881 case Builtin::BI__sync_sub_and_fetch_8: 1882 case Builtin::BI__sync_sub_and_fetch_16: 1883 case Builtin::BI__sync_and_and_fetch: 1884 case Builtin::BI__sync_and_and_fetch_1: 1885 case Builtin::BI__sync_and_and_fetch_2: 1886 case Builtin::BI__sync_and_and_fetch_4: 1887 case Builtin::BI__sync_and_and_fetch_8: 1888 case Builtin::BI__sync_and_and_fetch_16: 1889 case Builtin::BI__sync_or_and_fetch: 1890 case Builtin::BI__sync_or_and_fetch_1: 1891 case Builtin::BI__sync_or_and_fetch_2: 1892 case Builtin::BI__sync_or_and_fetch_4: 1893 case Builtin::BI__sync_or_and_fetch_8: 1894 case Builtin::BI__sync_or_and_fetch_16: 1895 case Builtin::BI__sync_xor_and_fetch: 1896 case Builtin::BI__sync_xor_and_fetch_1: 1897 case Builtin::BI__sync_xor_and_fetch_2: 1898 case Builtin::BI__sync_xor_and_fetch_4: 1899 case Builtin::BI__sync_xor_and_fetch_8: 1900 case Builtin::BI__sync_xor_and_fetch_16: 1901 case Builtin::BI__sync_nand_and_fetch: 1902 case Builtin::BI__sync_nand_and_fetch_1: 1903 case Builtin::BI__sync_nand_and_fetch_2: 1904 case Builtin::BI__sync_nand_and_fetch_4: 1905 case Builtin::BI__sync_nand_and_fetch_8: 1906 case Builtin::BI__sync_nand_and_fetch_16: 1907 case Builtin::BI__sync_val_compare_and_swap: 1908 case Builtin::BI__sync_val_compare_and_swap_1: 1909 case Builtin::BI__sync_val_compare_and_swap_2: 1910 case Builtin::BI__sync_val_compare_and_swap_4: 1911 case Builtin::BI__sync_val_compare_and_swap_8: 1912 case Builtin::BI__sync_val_compare_and_swap_16: 1913 case Builtin::BI__sync_bool_compare_and_swap: 1914 case Builtin::BI__sync_bool_compare_and_swap_1: 1915 case Builtin::BI__sync_bool_compare_and_swap_2: 1916 case Builtin::BI__sync_bool_compare_and_swap_4: 1917 case Builtin::BI__sync_bool_compare_and_swap_8: 1918 case Builtin::BI__sync_bool_compare_and_swap_16: 1919 case Builtin::BI__sync_lock_test_and_set: 1920 case Builtin::BI__sync_lock_test_and_set_1: 1921 case Builtin::BI__sync_lock_test_and_set_2: 1922 case Builtin::BI__sync_lock_test_and_set_4: 1923 case Builtin::BI__sync_lock_test_and_set_8: 1924 case Builtin::BI__sync_lock_test_and_set_16: 1925 case Builtin::BI__sync_lock_release: 1926 case Builtin::BI__sync_lock_release_1: 1927 case Builtin::BI__sync_lock_release_2: 1928 case Builtin::BI__sync_lock_release_4: 1929 case Builtin::BI__sync_lock_release_8: 1930 case Builtin::BI__sync_lock_release_16: 1931 case Builtin::BI__sync_swap: 1932 case Builtin::BI__sync_swap_1: 1933 case Builtin::BI__sync_swap_2: 1934 case Builtin::BI__sync_swap_4: 1935 case Builtin::BI__sync_swap_8: 1936 case Builtin::BI__sync_swap_16: 1937 return SemaBuiltinAtomicOverloaded(TheCallResult); 1938 case Builtin::BI__sync_synchronize: 1939 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1940 << TheCall->getCallee()->getSourceRange(); 1941 break; 1942 case Builtin::BI__builtin_nontemporal_load: 1943 case Builtin::BI__builtin_nontemporal_store: 1944 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1945 case Builtin::BI__builtin_memcpy_inline: { 1946 clang::Expr *SizeOp = TheCall->getArg(2); 1947 // We warn about copying to or from `nullptr` pointers when `size` is 1948 // greater than 0. When `size` is value dependent we cannot evaluate its 1949 // value so we bail out. 1950 if (SizeOp->isValueDependent()) 1951 break; 1952 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 1953 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1954 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1955 } 1956 break; 1957 } 1958 #define BUILTIN(ID, TYPE, ATTRS) 1959 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1960 case Builtin::BI##ID: \ 1961 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1962 #include "clang/Basic/Builtins.def" 1963 case Builtin::BI__annotation: 1964 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1965 return ExprError(); 1966 break; 1967 case Builtin::BI__builtin_annotation: 1968 if (SemaBuiltinAnnotation(*this, TheCall)) 1969 return ExprError(); 1970 break; 1971 case Builtin::BI__builtin_addressof: 1972 if (SemaBuiltinAddressof(*this, TheCall)) 1973 return ExprError(); 1974 break; 1975 case Builtin::BI__builtin_function_start: 1976 if (SemaBuiltinFunctionStart(*this, TheCall)) 1977 return ExprError(); 1978 break; 1979 case Builtin::BI__builtin_is_aligned: 1980 case Builtin::BI__builtin_align_up: 1981 case Builtin::BI__builtin_align_down: 1982 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1983 return ExprError(); 1984 break; 1985 case Builtin::BI__builtin_add_overflow: 1986 case Builtin::BI__builtin_sub_overflow: 1987 case Builtin::BI__builtin_mul_overflow: 1988 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1989 return ExprError(); 1990 break; 1991 case Builtin::BI__builtin_operator_new: 1992 case Builtin::BI__builtin_operator_delete: { 1993 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1994 ExprResult Res = 1995 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1996 if (Res.isInvalid()) 1997 CorrectDelayedTyposInExpr(TheCallResult.get()); 1998 return Res; 1999 } 2000 case Builtin::BI__builtin_dump_struct: { 2001 // We first want to ensure we are called with 2 arguments 2002 if (checkArgCount(*this, TheCall, 2)) 2003 return ExprError(); 2004 // Ensure that the first argument is of type 'struct XX *' 2005 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 2006 const QualType PtrArgType = PtrArg->getType(); 2007 if (!PtrArgType->isPointerType() || 2008 !PtrArgType->getPointeeType()->isRecordType()) { 2009 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2010 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 2011 << "structure pointer"; 2012 return ExprError(); 2013 } 2014 2015 // Ensure that the second argument is of type 'FunctionType' 2016 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 2017 const QualType FnPtrArgType = FnPtrArg->getType(); 2018 if (!FnPtrArgType->isPointerType()) { 2019 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2020 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2021 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2022 return ExprError(); 2023 } 2024 2025 const auto *FuncType = 2026 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 2027 2028 if (!FuncType) { 2029 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2030 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2031 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2032 return ExprError(); 2033 } 2034 2035 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 2036 if (!FT->getNumParams()) { 2037 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2038 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2039 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2040 return ExprError(); 2041 } 2042 QualType PT = FT->getParamType(0); 2043 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 2044 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 2045 !PT->getPointeeType().isConstQualified()) { 2046 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2047 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2048 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2049 return ExprError(); 2050 } 2051 } 2052 2053 TheCall->setType(Context.IntTy); 2054 break; 2055 } 2056 case Builtin::BI__builtin_expect_with_probability: { 2057 // We first want to ensure we are called with 3 arguments 2058 if (checkArgCount(*this, TheCall, 3)) 2059 return ExprError(); 2060 // then check probability is constant float in range [0.0, 1.0] 2061 const Expr *ProbArg = TheCall->getArg(2); 2062 SmallVector<PartialDiagnosticAt, 8> Notes; 2063 Expr::EvalResult Eval; 2064 Eval.Diag = &Notes; 2065 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2066 !Eval.Val.isFloat()) { 2067 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2068 << ProbArg->getSourceRange(); 2069 for (const PartialDiagnosticAt &PDiag : Notes) 2070 Diag(PDiag.first, PDiag.second); 2071 return ExprError(); 2072 } 2073 llvm::APFloat Probability = Eval.Val.getFloat(); 2074 bool LoseInfo = false; 2075 Probability.convert(llvm::APFloat::IEEEdouble(), 2076 llvm::RoundingMode::Dynamic, &LoseInfo); 2077 if (!(Probability >= llvm::APFloat(0.0) && 2078 Probability <= llvm::APFloat(1.0))) { 2079 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2080 << ProbArg->getSourceRange(); 2081 return ExprError(); 2082 } 2083 break; 2084 } 2085 case Builtin::BI__builtin_preserve_access_index: 2086 if (SemaBuiltinPreserveAI(*this, TheCall)) 2087 return ExprError(); 2088 break; 2089 case Builtin::BI__builtin_call_with_static_chain: 2090 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2091 return ExprError(); 2092 break; 2093 case Builtin::BI__exception_code: 2094 case Builtin::BI_exception_code: 2095 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2096 diag::err_seh___except_block)) 2097 return ExprError(); 2098 break; 2099 case Builtin::BI__exception_info: 2100 case Builtin::BI_exception_info: 2101 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2102 diag::err_seh___except_filter)) 2103 return ExprError(); 2104 break; 2105 case Builtin::BI__GetExceptionInfo: 2106 if (checkArgCount(*this, TheCall, 1)) 2107 return ExprError(); 2108 2109 if (CheckCXXThrowOperand( 2110 TheCall->getBeginLoc(), 2111 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2112 TheCall)) 2113 return ExprError(); 2114 2115 TheCall->setType(Context.VoidPtrTy); 2116 break; 2117 // OpenCL v2.0, s6.13.16 - Pipe functions 2118 case Builtin::BIread_pipe: 2119 case Builtin::BIwrite_pipe: 2120 // Since those two functions are declared with var args, we need a semantic 2121 // check for the argument. 2122 if (SemaBuiltinRWPipe(*this, TheCall)) 2123 return ExprError(); 2124 break; 2125 case Builtin::BIreserve_read_pipe: 2126 case Builtin::BIreserve_write_pipe: 2127 case Builtin::BIwork_group_reserve_read_pipe: 2128 case Builtin::BIwork_group_reserve_write_pipe: 2129 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2130 return ExprError(); 2131 break; 2132 case Builtin::BIsub_group_reserve_read_pipe: 2133 case Builtin::BIsub_group_reserve_write_pipe: 2134 if (checkOpenCLSubgroupExt(*this, TheCall) || 2135 SemaBuiltinReserveRWPipe(*this, TheCall)) 2136 return ExprError(); 2137 break; 2138 case Builtin::BIcommit_read_pipe: 2139 case Builtin::BIcommit_write_pipe: 2140 case Builtin::BIwork_group_commit_read_pipe: 2141 case Builtin::BIwork_group_commit_write_pipe: 2142 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2143 return ExprError(); 2144 break; 2145 case Builtin::BIsub_group_commit_read_pipe: 2146 case Builtin::BIsub_group_commit_write_pipe: 2147 if (checkOpenCLSubgroupExt(*this, TheCall) || 2148 SemaBuiltinCommitRWPipe(*this, TheCall)) 2149 return ExprError(); 2150 break; 2151 case Builtin::BIget_pipe_num_packets: 2152 case Builtin::BIget_pipe_max_packets: 2153 if (SemaBuiltinPipePackets(*this, TheCall)) 2154 return ExprError(); 2155 break; 2156 case Builtin::BIto_global: 2157 case Builtin::BIto_local: 2158 case Builtin::BIto_private: 2159 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2160 return ExprError(); 2161 break; 2162 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2163 case Builtin::BIenqueue_kernel: 2164 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2165 return ExprError(); 2166 break; 2167 case Builtin::BIget_kernel_work_group_size: 2168 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2169 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2170 return ExprError(); 2171 break; 2172 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2173 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2174 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2175 return ExprError(); 2176 break; 2177 case Builtin::BI__builtin_os_log_format: 2178 Cleanup.setExprNeedsCleanups(true); 2179 LLVM_FALLTHROUGH; 2180 case Builtin::BI__builtin_os_log_format_buffer_size: 2181 if (SemaBuiltinOSLogFormat(TheCall)) 2182 return ExprError(); 2183 break; 2184 case Builtin::BI__builtin_frame_address: 2185 case Builtin::BI__builtin_return_address: { 2186 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2187 return ExprError(); 2188 2189 // -Wframe-address warning if non-zero passed to builtin 2190 // return/frame address. 2191 Expr::EvalResult Result; 2192 if (!TheCall->getArg(0)->isValueDependent() && 2193 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2194 Result.Val.getInt() != 0) 2195 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2196 << ((BuiltinID == Builtin::BI__builtin_return_address) 2197 ? "__builtin_return_address" 2198 : "__builtin_frame_address") 2199 << TheCall->getSourceRange(); 2200 break; 2201 } 2202 2203 // __builtin_elementwise_abs restricts the element type to signed integers or 2204 // floating point types only. 2205 case Builtin::BI__builtin_elementwise_abs: { 2206 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2207 return ExprError(); 2208 2209 QualType ArgTy = TheCall->getArg(0)->getType(); 2210 QualType EltTy = ArgTy; 2211 2212 if (auto *VecTy = EltTy->getAs<VectorType>()) 2213 EltTy = VecTy->getElementType(); 2214 if (EltTy->isUnsignedIntegerType()) { 2215 Diag(TheCall->getArg(0)->getBeginLoc(), 2216 diag::err_builtin_invalid_arg_type) 2217 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2218 return ExprError(); 2219 } 2220 break; 2221 } 2222 2223 // These builtins restrict the element type to floating point 2224 // types only. 2225 case Builtin::BI__builtin_elementwise_ceil: 2226 case Builtin::BI__builtin_elementwise_floor: 2227 case Builtin::BI__builtin_elementwise_roundeven: 2228 case Builtin::BI__builtin_elementwise_trunc: { 2229 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2230 return ExprError(); 2231 2232 QualType ArgTy = TheCall->getArg(0)->getType(); 2233 QualType EltTy = ArgTy; 2234 2235 if (auto *VecTy = EltTy->getAs<VectorType>()) 2236 EltTy = VecTy->getElementType(); 2237 if (!EltTy->isFloatingType()) { 2238 Diag(TheCall->getArg(0)->getBeginLoc(), 2239 diag::err_builtin_invalid_arg_type) 2240 << 1 << /* float ty*/ 5 << ArgTy; 2241 2242 return ExprError(); 2243 } 2244 break; 2245 } 2246 2247 case Builtin::BI__builtin_elementwise_min: 2248 case Builtin::BI__builtin_elementwise_max: 2249 if (SemaBuiltinElementwiseMath(TheCall)) 2250 return ExprError(); 2251 break; 2252 case Builtin::BI__builtin_reduce_max: 2253 case Builtin::BI__builtin_reduce_min: { 2254 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2255 return ExprError(); 2256 2257 const Expr *Arg = TheCall->getArg(0); 2258 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2259 if (!TyA) { 2260 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2261 << 1 << /* vector ty*/ 4 << Arg->getType(); 2262 return ExprError(); 2263 } 2264 2265 TheCall->setType(TyA->getElementType()); 2266 break; 2267 } 2268 2269 // These builtins support vectors of integers only. 2270 case Builtin::BI__builtin_reduce_xor: 2271 case Builtin::BI__builtin_reduce_or: 2272 case Builtin::BI__builtin_reduce_and: { 2273 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2274 return ExprError(); 2275 2276 const Expr *Arg = TheCall->getArg(0); 2277 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2278 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2279 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2280 << 1 << /* vector of integers */ 6 << Arg->getType(); 2281 return ExprError(); 2282 } 2283 TheCall->setType(TyA->getElementType()); 2284 break; 2285 } 2286 2287 case Builtin::BI__builtin_matrix_transpose: 2288 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2289 2290 case Builtin::BI__builtin_matrix_column_major_load: 2291 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2292 2293 case Builtin::BI__builtin_matrix_column_major_store: 2294 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2295 2296 case Builtin::BI__builtin_get_device_side_mangled_name: { 2297 auto Check = [](CallExpr *TheCall) { 2298 if (TheCall->getNumArgs() != 1) 2299 return false; 2300 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2301 if (!DRE) 2302 return false; 2303 auto *D = DRE->getDecl(); 2304 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2305 return false; 2306 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2307 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2308 }; 2309 if (!Check(TheCall)) { 2310 Diag(TheCall->getBeginLoc(), 2311 diag::err_hip_invalid_args_builtin_mangled_name); 2312 return ExprError(); 2313 } 2314 } 2315 } 2316 2317 // Since the target specific builtins for each arch overlap, only check those 2318 // of the arch we are compiling for. 2319 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2320 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2321 assert(Context.getAuxTargetInfo() && 2322 "Aux Target Builtin, but not an aux target?"); 2323 2324 if (CheckTSBuiltinFunctionCall( 2325 *Context.getAuxTargetInfo(), 2326 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2327 return ExprError(); 2328 } else { 2329 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2330 TheCall)) 2331 return ExprError(); 2332 } 2333 } 2334 2335 return TheCallResult; 2336 } 2337 2338 // Get the valid immediate range for the specified NEON type code. 2339 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2340 NeonTypeFlags Type(t); 2341 int IsQuad = ForceQuad ? true : Type.isQuad(); 2342 switch (Type.getEltType()) { 2343 case NeonTypeFlags::Int8: 2344 case NeonTypeFlags::Poly8: 2345 return shift ? 7 : (8 << IsQuad) - 1; 2346 case NeonTypeFlags::Int16: 2347 case NeonTypeFlags::Poly16: 2348 return shift ? 15 : (4 << IsQuad) - 1; 2349 case NeonTypeFlags::Int32: 2350 return shift ? 31 : (2 << IsQuad) - 1; 2351 case NeonTypeFlags::Int64: 2352 case NeonTypeFlags::Poly64: 2353 return shift ? 63 : (1 << IsQuad) - 1; 2354 case NeonTypeFlags::Poly128: 2355 return shift ? 127 : (1 << IsQuad) - 1; 2356 case NeonTypeFlags::Float16: 2357 assert(!shift && "cannot shift float types!"); 2358 return (4 << IsQuad) - 1; 2359 case NeonTypeFlags::Float32: 2360 assert(!shift && "cannot shift float types!"); 2361 return (2 << IsQuad) - 1; 2362 case NeonTypeFlags::Float64: 2363 assert(!shift && "cannot shift float types!"); 2364 return (1 << IsQuad) - 1; 2365 case NeonTypeFlags::BFloat16: 2366 assert(!shift && "cannot shift float types!"); 2367 return (4 << IsQuad) - 1; 2368 } 2369 llvm_unreachable("Invalid NeonTypeFlag!"); 2370 } 2371 2372 /// getNeonEltType - Return the QualType corresponding to the elements of 2373 /// the vector type specified by the NeonTypeFlags. This is used to check 2374 /// the pointer arguments for Neon load/store intrinsics. 2375 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2376 bool IsPolyUnsigned, bool IsInt64Long) { 2377 switch (Flags.getEltType()) { 2378 case NeonTypeFlags::Int8: 2379 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2380 case NeonTypeFlags::Int16: 2381 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2382 case NeonTypeFlags::Int32: 2383 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2384 case NeonTypeFlags::Int64: 2385 if (IsInt64Long) 2386 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2387 else 2388 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2389 : Context.LongLongTy; 2390 case NeonTypeFlags::Poly8: 2391 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2392 case NeonTypeFlags::Poly16: 2393 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2394 case NeonTypeFlags::Poly64: 2395 if (IsInt64Long) 2396 return Context.UnsignedLongTy; 2397 else 2398 return Context.UnsignedLongLongTy; 2399 case NeonTypeFlags::Poly128: 2400 break; 2401 case NeonTypeFlags::Float16: 2402 return Context.HalfTy; 2403 case NeonTypeFlags::Float32: 2404 return Context.FloatTy; 2405 case NeonTypeFlags::Float64: 2406 return Context.DoubleTy; 2407 case NeonTypeFlags::BFloat16: 2408 return Context.BFloat16Ty; 2409 } 2410 llvm_unreachable("Invalid NeonTypeFlag!"); 2411 } 2412 2413 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2414 // Range check SVE intrinsics that take immediate values. 2415 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2416 2417 switch (BuiltinID) { 2418 default: 2419 return false; 2420 #define GET_SVE_IMMEDIATE_CHECK 2421 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2422 #undef GET_SVE_IMMEDIATE_CHECK 2423 } 2424 2425 // Perform all the immediate checks for this builtin call. 2426 bool HasError = false; 2427 for (auto &I : ImmChecks) { 2428 int ArgNum, CheckTy, ElementSizeInBits; 2429 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2430 2431 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2432 2433 // Function that checks whether the operand (ArgNum) is an immediate 2434 // that is one of the predefined values. 2435 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2436 int ErrDiag) -> bool { 2437 // We can't check the value of a dependent argument. 2438 Expr *Arg = TheCall->getArg(ArgNum); 2439 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2440 return false; 2441 2442 // Check constant-ness first. 2443 llvm::APSInt Imm; 2444 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2445 return true; 2446 2447 if (!CheckImm(Imm.getSExtValue())) 2448 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2449 return false; 2450 }; 2451 2452 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2453 case SVETypeFlags::ImmCheck0_31: 2454 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2455 HasError = true; 2456 break; 2457 case SVETypeFlags::ImmCheck0_13: 2458 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2459 HasError = true; 2460 break; 2461 case SVETypeFlags::ImmCheck1_16: 2462 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2463 HasError = true; 2464 break; 2465 case SVETypeFlags::ImmCheck0_7: 2466 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2467 HasError = true; 2468 break; 2469 case SVETypeFlags::ImmCheckExtract: 2470 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2471 (2048 / ElementSizeInBits) - 1)) 2472 HasError = true; 2473 break; 2474 case SVETypeFlags::ImmCheckShiftRight: 2475 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2476 HasError = true; 2477 break; 2478 case SVETypeFlags::ImmCheckShiftRightNarrow: 2479 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2480 ElementSizeInBits / 2)) 2481 HasError = true; 2482 break; 2483 case SVETypeFlags::ImmCheckShiftLeft: 2484 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2485 ElementSizeInBits - 1)) 2486 HasError = true; 2487 break; 2488 case SVETypeFlags::ImmCheckLaneIndex: 2489 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2490 (128 / (1 * ElementSizeInBits)) - 1)) 2491 HasError = true; 2492 break; 2493 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2494 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2495 (128 / (2 * ElementSizeInBits)) - 1)) 2496 HasError = true; 2497 break; 2498 case SVETypeFlags::ImmCheckLaneIndexDot: 2499 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2500 (128 / (4 * ElementSizeInBits)) - 1)) 2501 HasError = true; 2502 break; 2503 case SVETypeFlags::ImmCheckComplexRot90_270: 2504 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2505 diag::err_rotation_argument_to_cadd)) 2506 HasError = true; 2507 break; 2508 case SVETypeFlags::ImmCheckComplexRotAll90: 2509 if (CheckImmediateInSet( 2510 [](int64_t V) { 2511 return V == 0 || V == 90 || V == 180 || V == 270; 2512 }, 2513 diag::err_rotation_argument_to_cmla)) 2514 HasError = true; 2515 break; 2516 case SVETypeFlags::ImmCheck0_1: 2517 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2518 HasError = true; 2519 break; 2520 case SVETypeFlags::ImmCheck0_2: 2521 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2522 HasError = true; 2523 break; 2524 case SVETypeFlags::ImmCheck0_3: 2525 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2526 HasError = true; 2527 break; 2528 } 2529 } 2530 2531 return HasError; 2532 } 2533 2534 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2535 unsigned BuiltinID, CallExpr *TheCall) { 2536 llvm::APSInt Result; 2537 uint64_t mask = 0; 2538 unsigned TV = 0; 2539 int PtrArgNum = -1; 2540 bool HasConstPtr = false; 2541 switch (BuiltinID) { 2542 #define GET_NEON_OVERLOAD_CHECK 2543 #include "clang/Basic/arm_neon.inc" 2544 #include "clang/Basic/arm_fp16.inc" 2545 #undef GET_NEON_OVERLOAD_CHECK 2546 } 2547 2548 // For NEON intrinsics which are overloaded on vector element type, validate 2549 // the immediate which specifies which variant to emit. 2550 unsigned ImmArg = TheCall->getNumArgs()-1; 2551 if (mask) { 2552 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2553 return true; 2554 2555 TV = Result.getLimitedValue(64); 2556 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2557 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2558 << TheCall->getArg(ImmArg)->getSourceRange(); 2559 } 2560 2561 if (PtrArgNum >= 0) { 2562 // Check that pointer arguments have the specified type. 2563 Expr *Arg = TheCall->getArg(PtrArgNum); 2564 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2565 Arg = ICE->getSubExpr(); 2566 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2567 QualType RHSTy = RHS.get()->getType(); 2568 2569 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2570 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2571 Arch == llvm::Triple::aarch64_32 || 2572 Arch == llvm::Triple::aarch64_be; 2573 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2574 QualType EltTy = 2575 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2576 if (HasConstPtr) 2577 EltTy = EltTy.withConst(); 2578 QualType LHSTy = Context.getPointerType(EltTy); 2579 AssignConvertType ConvTy; 2580 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2581 if (RHS.isInvalid()) 2582 return true; 2583 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2584 RHS.get(), AA_Assigning)) 2585 return true; 2586 } 2587 2588 // For NEON intrinsics which take an immediate value as part of the 2589 // instruction, range check them here. 2590 unsigned i = 0, l = 0, u = 0; 2591 switch (BuiltinID) { 2592 default: 2593 return false; 2594 #define GET_NEON_IMMEDIATE_CHECK 2595 #include "clang/Basic/arm_neon.inc" 2596 #include "clang/Basic/arm_fp16.inc" 2597 #undef GET_NEON_IMMEDIATE_CHECK 2598 } 2599 2600 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2601 } 2602 2603 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2604 switch (BuiltinID) { 2605 default: 2606 return false; 2607 #include "clang/Basic/arm_mve_builtin_sema.inc" 2608 } 2609 } 2610 2611 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2612 CallExpr *TheCall) { 2613 bool Err = false; 2614 switch (BuiltinID) { 2615 default: 2616 return false; 2617 #include "clang/Basic/arm_cde_builtin_sema.inc" 2618 } 2619 2620 if (Err) 2621 return true; 2622 2623 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2624 } 2625 2626 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2627 const Expr *CoprocArg, bool WantCDE) { 2628 if (isConstantEvaluated()) 2629 return false; 2630 2631 // We can't check the value of a dependent argument. 2632 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2633 return false; 2634 2635 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2636 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2637 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2638 2639 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2640 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2641 2642 if (IsCDECoproc != WantCDE) 2643 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2644 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2645 2646 return false; 2647 } 2648 2649 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2650 unsigned MaxWidth) { 2651 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2652 BuiltinID == ARM::BI__builtin_arm_ldaex || 2653 BuiltinID == ARM::BI__builtin_arm_strex || 2654 BuiltinID == ARM::BI__builtin_arm_stlex || 2655 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2656 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2657 BuiltinID == AArch64::BI__builtin_arm_strex || 2658 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2659 "unexpected ARM builtin"); 2660 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2661 BuiltinID == ARM::BI__builtin_arm_ldaex || 2662 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2663 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2664 2665 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2666 2667 // Ensure that we have the proper number of arguments. 2668 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2669 return true; 2670 2671 // Inspect the pointer argument of the atomic builtin. This should always be 2672 // a pointer type, whose element is an integral scalar or pointer type. 2673 // Because it is a pointer type, we don't have to worry about any implicit 2674 // casts here. 2675 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2676 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2677 if (PointerArgRes.isInvalid()) 2678 return true; 2679 PointerArg = PointerArgRes.get(); 2680 2681 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2682 if (!pointerType) { 2683 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2684 << PointerArg->getType() << PointerArg->getSourceRange(); 2685 return true; 2686 } 2687 2688 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2689 // task is to insert the appropriate casts into the AST. First work out just 2690 // what the appropriate type is. 2691 QualType ValType = pointerType->getPointeeType(); 2692 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2693 if (IsLdrex) 2694 AddrType.addConst(); 2695 2696 // Issue a warning if the cast is dodgy. 2697 CastKind CastNeeded = CK_NoOp; 2698 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2699 CastNeeded = CK_BitCast; 2700 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2701 << PointerArg->getType() << Context.getPointerType(AddrType) 2702 << AA_Passing << PointerArg->getSourceRange(); 2703 } 2704 2705 // Finally, do the cast and replace the argument with the corrected version. 2706 AddrType = Context.getPointerType(AddrType); 2707 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2708 if (PointerArgRes.isInvalid()) 2709 return true; 2710 PointerArg = PointerArgRes.get(); 2711 2712 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2713 2714 // In general, we allow ints, floats and pointers to be loaded and stored. 2715 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2716 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2717 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2718 << PointerArg->getType() << PointerArg->getSourceRange(); 2719 return true; 2720 } 2721 2722 // But ARM doesn't have instructions to deal with 128-bit versions. 2723 if (Context.getTypeSize(ValType) > MaxWidth) { 2724 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2725 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2726 << PointerArg->getType() << PointerArg->getSourceRange(); 2727 return true; 2728 } 2729 2730 switch (ValType.getObjCLifetime()) { 2731 case Qualifiers::OCL_None: 2732 case Qualifiers::OCL_ExplicitNone: 2733 // okay 2734 break; 2735 2736 case Qualifiers::OCL_Weak: 2737 case Qualifiers::OCL_Strong: 2738 case Qualifiers::OCL_Autoreleasing: 2739 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2740 << ValType << PointerArg->getSourceRange(); 2741 return true; 2742 } 2743 2744 if (IsLdrex) { 2745 TheCall->setType(ValType); 2746 return false; 2747 } 2748 2749 // Initialize the argument to be stored. 2750 ExprResult ValArg = TheCall->getArg(0); 2751 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2752 Context, ValType, /*consume*/ false); 2753 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2754 if (ValArg.isInvalid()) 2755 return true; 2756 TheCall->setArg(0, ValArg.get()); 2757 2758 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2759 // but the custom checker bypasses all default analysis. 2760 TheCall->setType(Context.IntTy); 2761 return false; 2762 } 2763 2764 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2765 CallExpr *TheCall) { 2766 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2767 BuiltinID == ARM::BI__builtin_arm_ldaex || 2768 BuiltinID == ARM::BI__builtin_arm_strex || 2769 BuiltinID == ARM::BI__builtin_arm_stlex) { 2770 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2771 } 2772 2773 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2774 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2775 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2776 } 2777 2778 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2779 BuiltinID == ARM::BI__builtin_arm_wsr64) 2780 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2781 2782 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2783 BuiltinID == ARM::BI__builtin_arm_rsrp || 2784 BuiltinID == ARM::BI__builtin_arm_wsr || 2785 BuiltinID == ARM::BI__builtin_arm_wsrp) 2786 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2787 2788 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2789 return true; 2790 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2791 return true; 2792 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2793 return true; 2794 2795 // For intrinsics which take an immediate value as part of the instruction, 2796 // range check them here. 2797 // FIXME: VFP Intrinsics should error if VFP not present. 2798 switch (BuiltinID) { 2799 default: return false; 2800 case ARM::BI__builtin_arm_ssat: 2801 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2802 case ARM::BI__builtin_arm_usat: 2803 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2804 case ARM::BI__builtin_arm_ssat16: 2805 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2806 case ARM::BI__builtin_arm_usat16: 2807 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2808 case ARM::BI__builtin_arm_vcvtr_f: 2809 case ARM::BI__builtin_arm_vcvtr_d: 2810 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2811 case ARM::BI__builtin_arm_dmb: 2812 case ARM::BI__builtin_arm_dsb: 2813 case ARM::BI__builtin_arm_isb: 2814 case ARM::BI__builtin_arm_dbg: 2815 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2816 case ARM::BI__builtin_arm_cdp: 2817 case ARM::BI__builtin_arm_cdp2: 2818 case ARM::BI__builtin_arm_mcr: 2819 case ARM::BI__builtin_arm_mcr2: 2820 case ARM::BI__builtin_arm_mrc: 2821 case ARM::BI__builtin_arm_mrc2: 2822 case ARM::BI__builtin_arm_mcrr: 2823 case ARM::BI__builtin_arm_mcrr2: 2824 case ARM::BI__builtin_arm_mrrc: 2825 case ARM::BI__builtin_arm_mrrc2: 2826 case ARM::BI__builtin_arm_ldc: 2827 case ARM::BI__builtin_arm_ldcl: 2828 case ARM::BI__builtin_arm_ldc2: 2829 case ARM::BI__builtin_arm_ldc2l: 2830 case ARM::BI__builtin_arm_stc: 2831 case ARM::BI__builtin_arm_stcl: 2832 case ARM::BI__builtin_arm_stc2: 2833 case ARM::BI__builtin_arm_stc2l: 2834 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2835 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2836 /*WantCDE*/ false); 2837 } 2838 } 2839 2840 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2841 unsigned BuiltinID, 2842 CallExpr *TheCall) { 2843 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2844 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2845 BuiltinID == AArch64::BI__builtin_arm_strex || 2846 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2847 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2848 } 2849 2850 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2851 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2852 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2853 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2854 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2855 } 2856 2857 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2858 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2859 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2860 2861 // Memory Tagging Extensions (MTE) Intrinsics 2862 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2863 BuiltinID == AArch64::BI__builtin_arm_addg || 2864 BuiltinID == AArch64::BI__builtin_arm_gmi || 2865 BuiltinID == AArch64::BI__builtin_arm_ldg || 2866 BuiltinID == AArch64::BI__builtin_arm_stg || 2867 BuiltinID == AArch64::BI__builtin_arm_subp) { 2868 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2869 } 2870 2871 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2872 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2873 BuiltinID == AArch64::BI__builtin_arm_wsr || 2874 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2875 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2876 2877 // Only check the valid encoding range. Any constant in this range would be 2878 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2879 // an exception for incorrect registers. This matches MSVC behavior. 2880 if (BuiltinID == AArch64::BI_ReadStatusReg || 2881 BuiltinID == AArch64::BI_WriteStatusReg) 2882 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2883 2884 if (BuiltinID == AArch64::BI__getReg) 2885 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2886 2887 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2888 return true; 2889 2890 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2891 return true; 2892 2893 // For intrinsics which take an immediate value as part of the instruction, 2894 // range check them here. 2895 unsigned i = 0, l = 0, u = 0; 2896 switch (BuiltinID) { 2897 default: return false; 2898 case AArch64::BI__builtin_arm_dmb: 2899 case AArch64::BI__builtin_arm_dsb: 2900 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2901 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2902 } 2903 2904 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2905 } 2906 2907 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2908 if (Arg->getType()->getAsPlaceholderType()) 2909 return false; 2910 2911 // The first argument needs to be a record field access. 2912 // If it is an array element access, we delay decision 2913 // to BPF backend to check whether the access is a 2914 // field access or not. 2915 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2916 isa<MemberExpr>(Arg->IgnoreParens()) || 2917 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 2918 } 2919 2920 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2921 QualType VectorTy, QualType EltTy) { 2922 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2923 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2924 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2925 << Call->getSourceRange() << VectorEltTy << EltTy; 2926 return false; 2927 } 2928 return true; 2929 } 2930 2931 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2932 QualType ArgType = Arg->getType(); 2933 if (ArgType->getAsPlaceholderType()) 2934 return false; 2935 2936 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2937 // format: 2938 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2939 // 2. <type> var; 2940 // __builtin_preserve_type_info(var, flag); 2941 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 2942 !isa<UnaryOperator>(Arg->IgnoreParens())) 2943 return false; 2944 2945 // Typedef type. 2946 if (ArgType->getAs<TypedefType>()) 2947 return true; 2948 2949 // Record type or Enum type. 2950 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2951 if (const auto *RT = Ty->getAs<RecordType>()) { 2952 if (!RT->getDecl()->getDeclName().isEmpty()) 2953 return true; 2954 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2955 if (!ET->getDecl()->getDeclName().isEmpty()) 2956 return true; 2957 } 2958 2959 return false; 2960 } 2961 2962 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2963 QualType ArgType = Arg->getType(); 2964 if (ArgType->getAsPlaceholderType()) 2965 return false; 2966 2967 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2968 // format: 2969 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2970 // flag); 2971 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2972 if (!UO) 2973 return false; 2974 2975 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2976 if (!CE) 2977 return false; 2978 if (CE->getCastKind() != CK_IntegralToPointer && 2979 CE->getCastKind() != CK_NullToPointer) 2980 return false; 2981 2982 // The integer must be from an EnumConstantDecl. 2983 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2984 if (!DR) 2985 return false; 2986 2987 const EnumConstantDecl *Enumerator = 2988 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2989 if (!Enumerator) 2990 return false; 2991 2992 // The type must be EnumType. 2993 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2994 const auto *ET = Ty->getAs<EnumType>(); 2995 if (!ET) 2996 return false; 2997 2998 // The enum value must be supported. 2999 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3000 } 3001 3002 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3003 CallExpr *TheCall) { 3004 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3005 BuiltinID == BPF::BI__builtin_btf_type_id || 3006 BuiltinID == BPF::BI__builtin_preserve_type_info || 3007 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3008 "unexpected BPF builtin"); 3009 3010 if (checkArgCount(*this, TheCall, 2)) 3011 return true; 3012 3013 // The second argument needs to be a constant int 3014 Expr *Arg = TheCall->getArg(1); 3015 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3016 diag::kind kind; 3017 if (!Value) { 3018 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3019 kind = diag::err_preserve_field_info_not_const; 3020 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3021 kind = diag::err_btf_type_id_not_const; 3022 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3023 kind = diag::err_preserve_type_info_not_const; 3024 else 3025 kind = diag::err_preserve_enum_value_not_const; 3026 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3027 return true; 3028 } 3029 3030 // The first argument 3031 Arg = TheCall->getArg(0); 3032 bool InvalidArg = false; 3033 bool ReturnUnsignedInt = true; 3034 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3035 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3036 InvalidArg = true; 3037 kind = diag::err_preserve_field_info_not_field; 3038 } 3039 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3040 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3041 InvalidArg = true; 3042 kind = diag::err_preserve_type_info_invalid; 3043 } 3044 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3045 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3046 InvalidArg = true; 3047 kind = diag::err_preserve_enum_value_invalid; 3048 } 3049 ReturnUnsignedInt = false; 3050 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3051 ReturnUnsignedInt = false; 3052 } 3053 3054 if (InvalidArg) { 3055 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3056 return true; 3057 } 3058 3059 if (ReturnUnsignedInt) 3060 TheCall->setType(Context.UnsignedIntTy); 3061 else 3062 TheCall->setType(Context.UnsignedLongTy); 3063 return false; 3064 } 3065 3066 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3067 struct ArgInfo { 3068 uint8_t OpNum; 3069 bool IsSigned; 3070 uint8_t BitWidth; 3071 uint8_t Align; 3072 }; 3073 struct BuiltinInfo { 3074 unsigned BuiltinID; 3075 ArgInfo Infos[2]; 3076 }; 3077 3078 static BuiltinInfo Infos[] = { 3079 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3080 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3081 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3082 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3083 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3084 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3085 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3086 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3087 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3088 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3089 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3090 3091 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3092 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3093 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3094 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3095 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3096 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3097 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3098 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3099 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3100 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3101 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3102 3103 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3104 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3105 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3106 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3107 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3108 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3109 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3110 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3111 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3112 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3113 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3114 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3115 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3116 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3117 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3118 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3119 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3120 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3121 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3122 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3123 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3124 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3125 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3126 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3127 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3128 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3129 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3130 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3131 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3132 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3133 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3134 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3135 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3136 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3137 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3138 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3139 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3140 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3141 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3142 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3143 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3144 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3145 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3146 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3147 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3148 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3149 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3150 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3151 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3152 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3153 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3154 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3155 {{ 1, false, 6, 0 }} }, 3156 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3157 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3158 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3159 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3160 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3161 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3162 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3163 {{ 1, false, 5, 0 }} }, 3164 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3165 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3166 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3167 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3168 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3169 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3170 { 2, false, 5, 0 }} }, 3171 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3172 { 2, false, 6, 0 }} }, 3173 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3174 { 3, false, 5, 0 }} }, 3175 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3176 { 3, false, 6, 0 }} }, 3177 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3178 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3179 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3180 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3181 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3182 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3183 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3184 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3185 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3186 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3187 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3188 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3189 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3190 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3191 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3192 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3193 {{ 2, false, 4, 0 }, 3194 { 3, false, 5, 0 }} }, 3195 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3196 {{ 2, false, 4, 0 }, 3197 { 3, false, 5, 0 }} }, 3198 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3199 {{ 2, false, 4, 0 }, 3200 { 3, false, 5, 0 }} }, 3201 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3202 {{ 2, false, 4, 0 }, 3203 { 3, false, 5, 0 }} }, 3204 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3205 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3206 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3207 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3208 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3209 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3210 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3211 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3212 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3213 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3214 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3215 { 2, false, 5, 0 }} }, 3216 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3217 { 2, false, 6, 0 }} }, 3218 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3219 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3220 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3221 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3222 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3223 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3224 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3225 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3226 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3227 {{ 1, false, 4, 0 }} }, 3228 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3229 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3230 {{ 1, false, 4, 0 }} }, 3231 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3232 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3233 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3234 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3235 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3236 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3237 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3238 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3239 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3240 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3241 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3242 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3243 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3244 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3245 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3246 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3247 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3248 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3249 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3250 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3251 {{ 3, false, 1, 0 }} }, 3252 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3253 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3254 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3255 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3256 {{ 3, false, 1, 0 }} }, 3257 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3258 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3259 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3260 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3261 {{ 3, false, 1, 0 }} }, 3262 }; 3263 3264 // Use a dynamically initialized static to sort the table exactly once on 3265 // first run. 3266 static const bool SortOnce = 3267 (llvm::sort(Infos, 3268 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3269 return LHS.BuiltinID < RHS.BuiltinID; 3270 }), 3271 true); 3272 (void)SortOnce; 3273 3274 const BuiltinInfo *F = llvm::partition_point( 3275 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3276 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3277 return false; 3278 3279 bool Error = false; 3280 3281 for (const ArgInfo &A : F->Infos) { 3282 // Ignore empty ArgInfo elements. 3283 if (A.BitWidth == 0) 3284 continue; 3285 3286 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3287 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3288 if (!A.Align) { 3289 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3290 } else { 3291 unsigned M = 1 << A.Align; 3292 Min *= M; 3293 Max *= M; 3294 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3295 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3296 } 3297 } 3298 return Error; 3299 } 3300 3301 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3302 CallExpr *TheCall) { 3303 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3304 } 3305 3306 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3307 unsigned BuiltinID, CallExpr *TheCall) { 3308 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3309 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3310 } 3311 3312 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3313 CallExpr *TheCall) { 3314 3315 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3316 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3317 if (!TI.hasFeature("dsp")) 3318 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3319 } 3320 3321 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3322 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3323 if (!TI.hasFeature("dspr2")) 3324 return Diag(TheCall->getBeginLoc(), 3325 diag::err_mips_builtin_requires_dspr2); 3326 } 3327 3328 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3329 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3330 if (!TI.hasFeature("msa")) 3331 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3332 } 3333 3334 return false; 3335 } 3336 3337 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3338 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3339 // ordering for DSP is unspecified. MSA is ordered by the data format used 3340 // by the underlying instruction i.e., df/m, df/n and then by size. 3341 // 3342 // FIXME: The size tests here should instead be tablegen'd along with the 3343 // definitions from include/clang/Basic/BuiltinsMips.def. 3344 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3345 // be too. 3346 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3347 unsigned i = 0, l = 0, u = 0, m = 0; 3348 switch (BuiltinID) { 3349 default: return false; 3350 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3351 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3352 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3353 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3354 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3355 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3356 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3357 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3358 // df/m field. 3359 // These intrinsics take an unsigned 3 bit immediate. 3360 case Mips::BI__builtin_msa_bclri_b: 3361 case Mips::BI__builtin_msa_bnegi_b: 3362 case Mips::BI__builtin_msa_bseti_b: 3363 case Mips::BI__builtin_msa_sat_s_b: 3364 case Mips::BI__builtin_msa_sat_u_b: 3365 case Mips::BI__builtin_msa_slli_b: 3366 case Mips::BI__builtin_msa_srai_b: 3367 case Mips::BI__builtin_msa_srari_b: 3368 case Mips::BI__builtin_msa_srli_b: 3369 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3370 case Mips::BI__builtin_msa_binsli_b: 3371 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3372 // These intrinsics take an unsigned 4 bit immediate. 3373 case Mips::BI__builtin_msa_bclri_h: 3374 case Mips::BI__builtin_msa_bnegi_h: 3375 case Mips::BI__builtin_msa_bseti_h: 3376 case Mips::BI__builtin_msa_sat_s_h: 3377 case Mips::BI__builtin_msa_sat_u_h: 3378 case Mips::BI__builtin_msa_slli_h: 3379 case Mips::BI__builtin_msa_srai_h: 3380 case Mips::BI__builtin_msa_srari_h: 3381 case Mips::BI__builtin_msa_srli_h: 3382 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3383 case Mips::BI__builtin_msa_binsli_h: 3384 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3385 // These intrinsics take an unsigned 5 bit immediate. 3386 // The first block of intrinsics actually have an unsigned 5 bit field, 3387 // not a df/n field. 3388 case Mips::BI__builtin_msa_cfcmsa: 3389 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3390 case Mips::BI__builtin_msa_clei_u_b: 3391 case Mips::BI__builtin_msa_clei_u_h: 3392 case Mips::BI__builtin_msa_clei_u_w: 3393 case Mips::BI__builtin_msa_clei_u_d: 3394 case Mips::BI__builtin_msa_clti_u_b: 3395 case Mips::BI__builtin_msa_clti_u_h: 3396 case Mips::BI__builtin_msa_clti_u_w: 3397 case Mips::BI__builtin_msa_clti_u_d: 3398 case Mips::BI__builtin_msa_maxi_u_b: 3399 case Mips::BI__builtin_msa_maxi_u_h: 3400 case Mips::BI__builtin_msa_maxi_u_w: 3401 case Mips::BI__builtin_msa_maxi_u_d: 3402 case Mips::BI__builtin_msa_mini_u_b: 3403 case Mips::BI__builtin_msa_mini_u_h: 3404 case Mips::BI__builtin_msa_mini_u_w: 3405 case Mips::BI__builtin_msa_mini_u_d: 3406 case Mips::BI__builtin_msa_addvi_b: 3407 case Mips::BI__builtin_msa_addvi_h: 3408 case Mips::BI__builtin_msa_addvi_w: 3409 case Mips::BI__builtin_msa_addvi_d: 3410 case Mips::BI__builtin_msa_bclri_w: 3411 case Mips::BI__builtin_msa_bnegi_w: 3412 case Mips::BI__builtin_msa_bseti_w: 3413 case Mips::BI__builtin_msa_sat_s_w: 3414 case Mips::BI__builtin_msa_sat_u_w: 3415 case Mips::BI__builtin_msa_slli_w: 3416 case Mips::BI__builtin_msa_srai_w: 3417 case Mips::BI__builtin_msa_srari_w: 3418 case Mips::BI__builtin_msa_srli_w: 3419 case Mips::BI__builtin_msa_srlri_w: 3420 case Mips::BI__builtin_msa_subvi_b: 3421 case Mips::BI__builtin_msa_subvi_h: 3422 case Mips::BI__builtin_msa_subvi_w: 3423 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3424 case Mips::BI__builtin_msa_binsli_w: 3425 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3426 // These intrinsics take an unsigned 6 bit immediate. 3427 case Mips::BI__builtin_msa_bclri_d: 3428 case Mips::BI__builtin_msa_bnegi_d: 3429 case Mips::BI__builtin_msa_bseti_d: 3430 case Mips::BI__builtin_msa_sat_s_d: 3431 case Mips::BI__builtin_msa_sat_u_d: 3432 case Mips::BI__builtin_msa_slli_d: 3433 case Mips::BI__builtin_msa_srai_d: 3434 case Mips::BI__builtin_msa_srari_d: 3435 case Mips::BI__builtin_msa_srli_d: 3436 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3437 case Mips::BI__builtin_msa_binsli_d: 3438 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3439 // These intrinsics take a signed 5 bit immediate. 3440 case Mips::BI__builtin_msa_ceqi_b: 3441 case Mips::BI__builtin_msa_ceqi_h: 3442 case Mips::BI__builtin_msa_ceqi_w: 3443 case Mips::BI__builtin_msa_ceqi_d: 3444 case Mips::BI__builtin_msa_clti_s_b: 3445 case Mips::BI__builtin_msa_clti_s_h: 3446 case Mips::BI__builtin_msa_clti_s_w: 3447 case Mips::BI__builtin_msa_clti_s_d: 3448 case Mips::BI__builtin_msa_clei_s_b: 3449 case Mips::BI__builtin_msa_clei_s_h: 3450 case Mips::BI__builtin_msa_clei_s_w: 3451 case Mips::BI__builtin_msa_clei_s_d: 3452 case Mips::BI__builtin_msa_maxi_s_b: 3453 case Mips::BI__builtin_msa_maxi_s_h: 3454 case Mips::BI__builtin_msa_maxi_s_w: 3455 case Mips::BI__builtin_msa_maxi_s_d: 3456 case Mips::BI__builtin_msa_mini_s_b: 3457 case Mips::BI__builtin_msa_mini_s_h: 3458 case Mips::BI__builtin_msa_mini_s_w: 3459 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3460 // These intrinsics take an unsigned 8 bit immediate. 3461 case Mips::BI__builtin_msa_andi_b: 3462 case Mips::BI__builtin_msa_nori_b: 3463 case Mips::BI__builtin_msa_ori_b: 3464 case Mips::BI__builtin_msa_shf_b: 3465 case Mips::BI__builtin_msa_shf_h: 3466 case Mips::BI__builtin_msa_shf_w: 3467 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3468 case Mips::BI__builtin_msa_bseli_b: 3469 case Mips::BI__builtin_msa_bmnzi_b: 3470 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3471 // df/n format 3472 // These intrinsics take an unsigned 4 bit immediate. 3473 case Mips::BI__builtin_msa_copy_s_b: 3474 case Mips::BI__builtin_msa_copy_u_b: 3475 case Mips::BI__builtin_msa_insve_b: 3476 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3477 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3478 // These intrinsics take an unsigned 3 bit immediate. 3479 case Mips::BI__builtin_msa_copy_s_h: 3480 case Mips::BI__builtin_msa_copy_u_h: 3481 case Mips::BI__builtin_msa_insve_h: 3482 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3483 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3484 // These intrinsics take an unsigned 2 bit immediate. 3485 case Mips::BI__builtin_msa_copy_s_w: 3486 case Mips::BI__builtin_msa_copy_u_w: 3487 case Mips::BI__builtin_msa_insve_w: 3488 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3489 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3490 // These intrinsics take an unsigned 1 bit immediate. 3491 case Mips::BI__builtin_msa_copy_s_d: 3492 case Mips::BI__builtin_msa_copy_u_d: 3493 case Mips::BI__builtin_msa_insve_d: 3494 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3495 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3496 // Memory offsets and immediate loads. 3497 // These intrinsics take a signed 10 bit immediate. 3498 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3499 case Mips::BI__builtin_msa_ldi_h: 3500 case Mips::BI__builtin_msa_ldi_w: 3501 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3502 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3503 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3504 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3505 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3506 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3507 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3508 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3509 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3510 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3511 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3512 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3513 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3514 } 3515 3516 if (!m) 3517 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3518 3519 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3520 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3521 } 3522 3523 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3524 /// advancing the pointer over the consumed characters. The decoded type is 3525 /// returned. If the decoded type represents a constant integer with a 3526 /// constraint on its value then Mask is set to that value. The type descriptors 3527 /// used in Str are specific to PPC MMA builtins and are documented in the file 3528 /// defining the PPC builtins. 3529 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3530 unsigned &Mask) { 3531 bool RequireICE = false; 3532 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3533 switch (*Str++) { 3534 case 'V': 3535 return Context.getVectorType(Context.UnsignedCharTy, 16, 3536 VectorType::VectorKind::AltiVecVector); 3537 case 'i': { 3538 char *End; 3539 unsigned size = strtoul(Str, &End, 10); 3540 assert(End != Str && "Missing constant parameter constraint"); 3541 Str = End; 3542 Mask = size; 3543 return Context.IntTy; 3544 } 3545 case 'W': { 3546 char *End; 3547 unsigned size = strtoul(Str, &End, 10); 3548 assert(End != Str && "Missing PowerPC MMA type size"); 3549 Str = End; 3550 QualType Type; 3551 switch (size) { 3552 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3553 case size: Type = Context.Id##Ty; break; 3554 #include "clang/Basic/PPCTypes.def" 3555 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3556 } 3557 bool CheckVectorArgs = false; 3558 while (!CheckVectorArgs) { 3559 switch (*Str++) { 3560 case '*': 3561 Type = Context.getPointerType(Type); 3562 break; 3563 case 'C': 3564 Type = Type.withConst(); 3565 break; 3566 default: 3567 CheckVectorArgs = true; 3568 --Str; 3569 break; 3570 } 3571 } 3572 return Type; 3573 } 3574 default: 3575 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3576 } 3577 } 3578 3579 static bool isPPC_64Builtin(unsigned BuiltinID) { 3580 // These builtins only work on PPC 64bit targets. 3581 switch (BuiltinID) { 3582 case PPC::BI__builtin_divde: 3583 case PPC::BI__builtin_divdeu: 3584 case PPC::BI__builtin_bpermd: 3585 case PPC::BI__builtin_ppc_ldarx: 3586 case PPC::BI__builtin_ppc_stdcx: 3587 case PPC::BI__builtin_ppc_tdw: 3588 case PPC::BI__builtin_ppc_trapd: 3589 case PPC::BI__builtin_ppc_cmpeqb: 3590 case PPC::BI__builtin_ppc_setb: 3591 case PPC::BI__builtin_ppc_mulhd: 3592 case PPC::BI__builtin_ppc_mulhdu: 3593 case PPC::BI__builtin_ppc_maddhd: 3594 case PPC::BI__builtin_ppc_maddhdu: 3595 case PPC::BI__builtin_ppc_maddld: 3596 case PPC::BI__builtin_ppc_load8r: 3597 case PPC::BI__builtin_ppc_store8r: 3598 case PPC::BI__builtin_ppc_insert_exp: 3599 case PPC::BI__builtin_ppc_extract_sig: 3600 case PPC::BI__builtin_ppc_addex: 3601 case PPC::BI__builtin_darn: 3602 case PPC::BI__builtin_darn_raw: 3603 case PPC::BI__builtin_ppc_compare_and_swaplp: 3604 case PPC::BI__builtin_ppc_fetch_and_addlp: 3605 case PPC::BI__builtin_ppc_fetch_and_andlp: 3606 case PPC::BI__builtin_ppc_fetch_and_orlp: 3607 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3608 return true; 3609 } 3610 return false; 3611 } 3612 3613 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3614 StringRef FeatureToCheck, unsigned DiagID, 3615 StringRef DiagArg = "") { 3616 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3617 return false; 3618 3619 if (DiagArg.empty()) 3620 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3621 else 3622 S.Diag(TheCall->getBeginLoc(), DiagID) 3623 << DiagArg << TheCall->getSourceRange(); 3624 3625 return true; 3626 } 3627 3628 /// Returns true if the argument consists of one contiguous run of 1s with any 3629 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3630 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3631 /// since all 1s are not contiguous. 3632 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3633 llvm::APSInt Result; 3634 // We can't check the value of a dependent argument. 3635 Expr *Arg = TheCall->getArg(ArgNum); 3636 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3637 return false; 3638 3639 // Check constant-ness first. 3640 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3641 return true; 3642 3643 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3644 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3645 return false; 3646 3647 return Diag(TheCall->getBeginLoc(), 3648 diag::err_argument_not_contiguous_bit_field) 3649 << ArgNum << Arg->getSourceRange(); 3650 } 3651 3652 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3653 CallExpr *TheCall) { 3654 unsigned i = 0, l = 0, u = 0; 3655 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3656 llvm::APSInt Result; 3657 3658 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3659 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3660 << TheCall->getSourceRange(); 3661 3662 switch (BuiltinID) { 3663 default: return false; 3664 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3665 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3666 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3667 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3668 case PPC::BI__builtin_altivec_dss: 3669 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3670 case PPC::BI__builtin_tbegin: 3671 case PPC::BI__builtin_tend: 3672 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3673 SemaFeatureCheck(*this, TheCall, "htm", 3674 diag::err_ppc_builtin_requires_htm); 3675 case PPC::BI__builtin_tsr: 3676 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3677 SemaFeatureCheck(*this, TheCall, "htm", 3678 diag::err_ppc_builtin_requires_htm); 3679 case PPC::BI__builtin_tabortwc: 3680 case PPC::BI__builtin_tabortdc: 3681 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3682 SemaFeatureCheck(*this, TheCall, "htm", 3683 diag::err_ppc_builtin_requires_htm); 3684 case PPC::BI__builtin_tabortwci: 3685 case PPC::BI__builtin_tabortdci: 3686 return SemaFeatureCheck(*this, TheCall, "htm", 3687 diag::err_ppc_builtin_requires_htm) || 3688 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3689 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 3690 case PPC::BI__builtin_tabort: 3691 case PPC::BI__builtin_tcheck: 3692 case PPC::BI__builtin_treclaim: 3693 case PPC::BI__builtin_trechkpt: 3694 case PPC::BI__builtin_tendall: 3695 case PPC::BI__builtin_tresume: 3696 case PPC::BI__builtin_tsuspend: 3697 case PPC::BI__builtin_get_texasr: 3698 case PPC::BI__builtin_get_texasru: 3699 case PPC::BI__builtin_get_tfhar: 3700 case PPC::BI__builtin_get_tfiar: 3701 case PPC::BI__builtin_set_texasr: 3702 case PPC::BI__builtin_set_texasru: 3703 case PPC::BI__builtin_set_tfhar: 3704 case PPC::BI__builtin_set_tfiar: 3705 case PPC::BI__builtin_ttest: 3706 return SemaFeatureCheck(*this, TheCall, "htm", 3707 diag::err_ppc_builtin_requires_htm); 3708 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 3709 // __builtin_(un)pack_longdouble are available only if long double uses IBM 3710 // extended double representation. 3711 case PPC::BI__builtin_unpack_longdouble: 3712 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 3713 return true; 3714 LLVM_FALLTHROUGH; 3715 case PPC::BI__builtin_pack_longdouble: 3716 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 3717 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 3718 << "ibmlongdouble"; 3719 return false; 3720 case PPC::BI__builtin_altivec_dst: 3721 case PPC::BI__builtin_altivec_dstt: 3722 case PPC::BI__builtin_altivec_dstst: 3723 case PPC::BI__builtin_altivec_dststt: 3724 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3725 case PPC::BI__builtin_vsx_xxpermdi: 3726 case PPC::BI__builtin_vsx_xxsldwi: 3727 return SemaBuiltinVSX(TheCall); 3728 case PPC::BI__builtin_divwe: 3729 case PPC::BI__builtin_divweu: 3730 case PPC::BI__builtin_divde: 3731 case PPC::BI__builtin_divdeu: 3732 return SemaFeatureCheck(*this, TheCall, "extdiv", 3733 diag::err_ppc_builtin_only_on_arch, "7"); 3734 case PPC::BI__builtin_bpermd: 3735 return SemaFeatureCheck(*this, TheCall, "bpermd", 3736 diag::err_ppc_builtin_only_on_arch, "7"); 3737 case PPC::BI__builtin_unpack_vector_int128: 3738 return SemaFeatureCheck(*this, TheCall, "vsx", 3739 diag::err_ppc_builtin_only_on_arch, "7") || 3740 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3741 case PPC::BI__builtin_pack_vector_int128: 3742 return SemaFeatureCheck(*this, TheCall, "vsx", 3743 diag::err_ppc_builtin_only_on_arch, "7"); 3744 case PPC::BI__builtin_altivec_vgnb: 3745 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3746 case PPC::BI__builtin_altivec_vec_replace_elt: 3747 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3748 QualType VecTy = TheCall->getArg(0)->getType(); 3749 QualType EltTy = TheCall->getArg(1)->getType(); 3750 unsigned Width = Context.getIntWidth(EltTy); 3751 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3752 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3753 } 3754 case PPC::BI__builtin_vsx_xxeval: 3755 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3756 case PPC::BI__builtin_altivec_vsldbi: 3757 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3758 case PPC::BI__builtin_altivec_vsrdbi: 3759 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3760 case PPC::BI__builtin_vsx_xxpermx: 3761 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3762 case PPC::BI__builtin_ppc_tw: 3763 case PPC::BI__builtin_ppc_tdw: 3764 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3765 case PPC::BI__builtin_ppc_cmpeqb: 3766 case PPC::BI__builtin_ppc_setb: 3767 case PPC::BI__builtin_ppc_maddhd: 3768 case PPC::BI__builtin_ppc_maddhdu: 3769 case PPC::BI__builtin_ppc_maddld: 3770 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3771 diag::err_ppc_builtin_only_on_arch, "9"); 3772 case PPC::BI__builtin_ppc_cmprb: 3773 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3774 diag::err_ppc_builtin_only_on_arch, "9") || 3775 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3776 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3777 // be a constant that represents a contiguous bit field. 3778 case PPC::BI__builtin_ppc_rlwnm: 3779 return SemaValueIsRunOfOnes(TheCall, 2); 3780 case PPC::BI__builtin_ppc_rlwimi: 3781 case PPC::BI__builtin_ppc_rldimi: 3782 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3783 SemaValueIsRunOfOnes(TheCall, 3); 3784 case PPC::BI__builtin_ppc_extract_exp: 3785 case PPC::BI__builtin_ppc_extract_sig: 3786 case PPC::BI__builtin_ppc_insert_exp: 3787 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3788 diag::err_ppc_builtin_only_on_arch, "9"); 3789 case PPC::BI__builtin_ppc_addex: { 3790 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3791 diag::err_ppc_builtin_only_on_arch, "9") || 3792 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3793 return true; 3794 // Output warning for reserved values 1 to 3. 3795 int ArgValue = 3796 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3797 if (ArgValue != 0) 3798 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3799 << ArgValue; 3800 return false; 3801 } 3802 case PPC::BI__builtin_ppc_mtfsb0: 3803 case PPC::BI__builtin_ppc_mtfsb1: 3804 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3805 case PPC::BI__builtin_ppc_mtfsf: 3806 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3807 case PPC::BI__builtin_ppc_mtfsfi: 3808 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3809 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3810 case PPC::BI__builtin_ppc_alignx: 3811 return SemaBuiltinConstantArgPower2(TheCall, 0); 3812 case PPC::BI__builtin_ppc_rdlam: 3813 return SemaValueIsRunOfOnes(TheCall, 2); 3814 case PPC::BI__builtin_ppc_icbt: 3815 case PPC::BI__builtin_ppc_sthcx: 3816 case PPC::BI__builtin_ppc_stbcx: 3817 case PPC::BI__builtin_ppc_lharx: 3818 case PPC::BI__builtin_ppc_lbarx: 3819 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3820 diag::err_ppc_builtin_only_on_arch, "8"); 3821 case PPC::BI__builtin_vsx_ldrmb: 3822 case PPC::BI__builtin_vsx_strmb: 3823 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3824 diag::err_ppc_builtin_only_on_arch, "8") || 3825 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3826 case PPC::BI__builtin_altivec_vcntmbb: 3827 case PPC::BI__builtin_altivec_vcntmbh: 3828 case PPC::BI__builtin_altivec_vcntmbw: 3829 case PPC::BI__builtin_altivec_vcntmbd: 3830 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3831 case PPC::BI__builtin_darn: 3832 case PPC::BI__builtin_darn_raw: 3833 case PPC::BI__builtin_darn_32: 3834 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3835 diag::err_ppc_builtin_only_on_arch, "9"); 3836 case PPC::BI__builtin_vsx_xxgenpcvbm: 3837 case PPC::BI__builtin_vsx_xxgenpcvhm: 3838 case PPC::BI__builtin_vsx_xxgenpcvwm: 3839 case PPC::BI__builtin_vsx_xxgenpcvdm: 3840 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3841 case PPC::BI__builtin_ppc_compare_exp_uo: 3842 case PPC::BI__builtin_ppc_compare_exp_lt: 3843 case PPC::BI__builtin_ppc_compare_exp_gt: 3844 case PPC::BI__builtin_ppc_compare_exp_eq: 3845 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3846 diag::err_ppc_builtin_only_on_arch, "9") || 3847 SemaFeatureCheck(*this, TheCall, "vsx", 3848 diag::err_ppc_builtin_requires_vsx); 3849 case PPC::BI__builtin_ppc_test_data_class: { 3850 // Check if the first argument of the __builtin_ppc_test_data_class call is 3851 // valid. The argument must be either a 'float' or a 'double'. 3852 QualType ArgType = TheCall->getArg(0)->getType(); 3853 if (ArgType != QualType(Context.FloatTy) && 3854 ArgType != QualType(Context.DoubleTy)) 3855 return Diag(TheCall->getBeginLoc(), 3856 diag::err_ppc_invalid_test_data_class_type); 3857 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3858 diag::err_ppc_builtin_only_on_arch, "9") || 3859 SemaFeatureCheck(*this, TheCall, "vsx", 3860 diag::err_ppc_builtin_requires_vsx) || 3861 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3862 } 3863 case PPC::BI__builtin_ppc_load8r: 3864 case PPC::BI__builtin_ppc_store8r: 3865 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3866 diag::err_ppc_builtin_only_on_arch, "7"); 3867 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3868 case PPC::BI__builtin_##Name: \ 3869 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 3870 #include "clang/Basic/BuiltinsPPC.def" 3871 } 3872 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3873 } 3874 3875 // Check if the given type is a non-pointer PPC MMA type. This function is used 3876 // in Sema to prevent invalid uses of restricted PPC MMA types. 3877 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3878 if (Type->isPointerType() || Type->isArrayType()) 3879 return false; 3880 3881 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3882 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3883 if (false 3884 #include "clang/Basic/PPCTypes.def" 3885 ) { 3886 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3887 return true; 3888 } 3889 return false; 3890 } 3891 3892 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3893 CallExpr *TheCall) { 3894 // position of memory order and scope arguments in the builtin 3895 unsigned OrderIndex, ScopeIndex; 3896 switch (BuiltinID) { 3897 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3898 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3899 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3900 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3901 OrderIndex = 2; 3902 ScopeIndex = 3; 3903 break; 3904 case AMDGPU::BI__builtin_amdgcn_fence: 3905 OrderIndex = 0; 3906 ScopeIndex = 1; 3907 break; 3908 default: 3909 return false; 3910 } 3911 3912 ExprResult Arg = TheCall->getArg(OrderIndex); 3913 auto ArgExpr = Arg.get(); 3914 Expr::EvalResult ArgResult; 3915 3916 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3917 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3918 << ArgExpr->getType(); 3919 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3920 3921 // Check validity of memory ordering as per C11 / C++11's memody model. 3922 // Only fence needs check. Atomic dec/inc allow all memory orders. 3923 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3924 return Diag(ArgExpr->getBeginLoc(), 3925 diag::warn_atomic_op_has_invalid_memory_order) 3926 << ArgExpr->getSourceRange(); 3927 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3928 case llvm::AtomicOrderingCABI::relaxed: 3929 case llvm::AtomicOrderingCABI::consume: 3930 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3931 return Diag(ArgExpr->getBeginLoc(), 3932 diag::warn_atomic_op_has_invalid_memory_order) 3933 << ArgExpr->getSourceRange(); 3934 break; 3935 case llvm::AtomicOrderingCABI::acquire: 3936 case llvm::AtomicOrderingCABI::release: 3937 case llvm::AtomicOrderingCABI::acq_rel: 3938 case llvm::AtomicOrderingCABI::seq_cst: 3939 break; 3940 } 3941 3942 Arg = TheCall->getArg(ScopeIndex); 3943 ArgExpr = Arg.get(); 3944 Expr::EvalResult ArgResult1; 3945 // Check that sync scope is a constant literal 3946 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3947 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3948 << ArgExpr->getType(); 3949 3950 return false; 3951 } 3952 3953 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3954 llvm::APSInt Result; 3955 3956 // We can't check the value of a dependent argument. 3957 Expr *Arg = TheCall->getArg(ArgNum); 3958 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3959 return false; 3960 3961 // Check constant-ness first. 3962 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3963 return true; 3964 3965 int64_t Val = Result.getSExtValue(); 3966 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3967 return false; 3968 3969 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3970 << Arg->getSourceRange(); 3971 } 3972 3973 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3974 unsigned BuiltinID, 3975 CallExpr *TheCall) { 3976 // CodeGenFunction can also detect this, but this gives a better error 3977 // message. 3978 bool FeatureMissing = false; 3979 SmallVector<StringRef> ReqFeatures; 3980 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3981 Features.split(ReqFeatures, ','); 3982 3983 // Check if each required feature is included 3984 for (StringRef F : ReqFeatures) { 3985 SmallVector<StringRef> ReqOpFeatures; 3986 F.split(ReqOpFeatures, '|'); 3987 bool HasFeature = false; 3988 for (StringRef OF : ReqOpFeatures) { 3989 if (TI.hasFeature(OF)) { 3990 HasFeature = true; 3991 continue; 3992 } 3993 } 3994 3995 if (!HasFeature) { 3996 std::string FeatureStrs; 3997 for (StringRef OF : ReqOpFeatures) { 3998 // If the feature is 64bit, alter the string so it will print better in 3999 // the diagnostic. 4000 if (OF == "64bit") 4001 OF = "RV64"; 4002 4003 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4004 OF.consume_front("experimental-"); 4005 std::string FeatureStr = OF.str(); 4006 FeatureStr[0] = std::toupper(FeatureStr[0]); 4007 // Combine strings. 4008 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4009 FeatureStrs += "'"; 4010 FeatureStrs += FeatureStr; 4011 FeatureStrs += "'"; 4012 } 4013 // Error message 4014 FeatureMissing = true; 4015 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4016 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4017 } 4018 } 4019 4020 if (FeatureMissing) 4021 return true; 4022 4023 switch (BuiltinID) { 4024 case RISCVVector::BI__builtin_rvv_vsetvli: 4025 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4026 CheckRISCVLMUL(TheCall, 2); 4027 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4028 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4029 CheckRISCVLMUL(TheCall, 1); 4030 } 4031 4032 return false; 4033 } 4034 4035 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4036 CallExpr *TheCall) { 4037 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4038 Expr *Arg = TheCall->getArg(0); 4039 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4040 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4041 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4042 << Arg->getSourceRange(); 4043 } 4044 4045 // For intrinsics which take an immediate value as part of the instruction, 4046 // range check them here. 4047 unsigned i = 0, l = 0, u = 0; 4048 switch (BuiltinID) { 4049 default: return false; 4050 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4051 case SystemZ::BI__builtin_s390_verimb: 4052 case SystemZ::BI__builtin_s390_verimh: 4053 case SystemZ::BI__builtin_s390_verimf: 4054 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4055 case SystemZ::BI__builtin_s390_vfaeb: 4056 case SystemZ::BI__builtin_s390_vfaeh: 4057 case SystemZ::BI__builtin_s390_vfaef: 4058 case SystemZ::BI__builtin_s390_vfaebs: 4059 case SystemZ::BI__builtin_s390_vfaehs: 4060 case SystemZ::BI__builtin_s390_vfaefs: 4061 case SystemZ::BI__builtin_s390_vfaezb: 4062 case SystemZ::BI__builtin_s390_vfaezh: 4063 case SystemZ::BI__builtin_s390_vfaezf: 4064 case SystemZ::BI__builtin_s390_vfaezbs: 4065 case SystemZ::BI__builtin_s390_vfaezhs: 4066 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4067 case SystemZ::BI__builtin_s390_vfisb: 4068 case SystemZ::BI__builtin_s390_vfidb: 4069 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4070 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4071 case SystemZ::BI__builtin_s390_vftcisb: 4072 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4073 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4074 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4075 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4076 case SystemZ::BI__builtin_s390_vstrcb: 4077 case SystemZ::BI__builtin_s390_vstrch: 4078 case SystemZ::BI__builtin_s390_vstrcf: 4079 case SystemZ::BI__builtin_s390_vstrczb: 4080 case SystemZ::BI__builtin_s390_vstrczh: 4081 case SystemZ::BI__builtin_s390_vstrczf: 4082 case SystemZ::BI__builtin_s390_vstrcbs: 4083 case SystemZ::BI__builtin_s390_vstrchs: 4084 case SystemZ::BI__builtin_s390_vstrcfs: 4085 case SystemZ::BI__builtin_s390_vstrczbs: 4086 case SystemZ::BI__builtin_s390_vstrczhs: 4087 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4088 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4089 case SystemZ::BI__builtin_s390_vfminsb: 4090 case SystemZ::BI__builtin_s390_vfmaxsb: 4091 case SystemZ::BI__builtin_s390_vfmindb: 4092 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4093 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4094 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4095 case SystemZ::BI__builtin_s390_vclfnhs: 4096 case SystemZ::BI__builtin_s390_vclfnls: 4097 case SystemZ::BI__builtin_s390_vcfn: 4098 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4099 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4100 } 4101 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4102 } 4103 4104 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4105 /// This checks that the target supports __builtin_cpu_supports and 4106 /// that the string argument is constant and valid. 4107 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4108 CallExpr *TheCall) { 4109 Expr *Arg = TheCall->getArg(0); 4110 4111 // Check if the argument is a string literal. 4112 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4113 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4114 << Arg->getSourceRange(); 4115 4116 // Check the contents of the string. 4117 StringRef Feature = 4118 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4119 if (!TI.validateCpuSupports(Feature)) 4120 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4121 << Arg->getSourceRange(); 4122 return false; 4123 } 4124 4125 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4126 /// This checks that the target supports __builtin_cpu_is and 4127 /// that the string argument is constant and valid. 4128 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4129 Expr *Arg = TheCall->getArg(0); 4130 4131 // Check if the argument is a string literal. 4132 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4133 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4134 << Arg->getSourceRange(); 4135 4136 // Check the contents of the string. 4137 StringRef Feature = 4138 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4139 if (!TI.validateCpuIs(Feature)) 4140 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4141 << Arg->getSourceRange(); 4142 return false; 4143 } 4144 4145 // Check if the rounding mode is legal. 4146 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4147 // Indicates if this instruction has rounding control or just SAE. 4148 bool HasRC = false; 4149 4150 unsigned ArgNum = 0; 4151 switch (BuiltinID) { 4152 default: 4153 return false; 4154 case X86::BI__builtin_ia32_vcvttsd2si32: 4155 case X86::BI__builtin_ia32_vcvttsd2si64: 4156 case X86::BI__builtin_ia32_vcvttsd2usi32: 4157 case X86::BI__builtin_ia32_vcvttsd2usi64: 4158 case X86::BI__builtin_ia32_vcvttss2si32: 4159 case X86::BI__builtin_ia32_vcvttss2si64: 4160 case X86::BI__builtin_ia32_vcvttss2usi32: 4161 case X86::BI__builtin_ia32_vcvttss2usi64: 4162 case X86::BI__builtin_ia32_vcvttsh2si32: 4163 case X86::BI__builtin_ia32_vcvttsh2si64: 4164 case X86::BI__builtin_ia32_vcvttsh2usi32: 4165 case X86::BI__builtin_ia32_vcvttsh2usi64: 4166 ArgNum = 1; 4167 break; 4168 case X86::BI__builtin_ia32_maxpd512: 4169 case X86::BI__builtin_ia32_maxps512: 4170 case X86::BI__builtin_ia32_minpd512: 4171 case X86::BI__builtin_ia32_minps512: 4172 case X86::BI__builtin_ia32_maxph512: 4173 case X86::BI__builtin_ia32_minph512: 4174 ArgNum = 2; 4175 break; 4176 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4177 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4178 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4179 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4180 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4181 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4182 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4183 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4184 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4185 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4186 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4187 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4188 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4189 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4190 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4191 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4192 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4193 case X86::BI__builtin_ia32_exp2pd_mask: 4194 case X86::BI__builtin_ia32_exp2ps_mask: 4195 case X86::BI__builtin_ia32_getexppd512_mask: 4196 case X86::BI__builtin_ia32_getexpps512_mask: 4197 case X86::BI__builtin_ia32_getexpph512_mask: 4198 case X86::BI__builtin_ia32_rcp28pd_mask: 4199 case X86::BI__builtin_ia32_rcp28ps_mask: 4200 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4201 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4202 case X86::BI__builtin_ia32_vcomisd: 4203 case X86::BI__builtin_ia32_vcomiss: 4204 case X86::BI__builtin_ia32_vcomish: 4205 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4206 ArgNum = 3; 4207 break; 4208 case X86::BI__builtin_ia32_cmppd512_mask: 4209 case X86::BI__builtin_ia32_cmpps512_mask: 4210 case X86::BI__builtin_ia32_cmpsd_mask: 4211 case X86::BI__builtin_ia32_cmpss_mask: 4212 case X86::BI__builtin_ia32_cmpsh_mask: 4213 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4214 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4215 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4216 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4217 case X86::BI__builtin_ia32_getexpss128_round_mask: 4218 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4219 case X86::BI__builtin_ia32_getmantpd512_mask: 4220 case X86::BI__builtin_ia32_getmantps512_mask: 4221 case X86::BI__builtin_ia32_getmantph512_mask: 4222 case X86::BI__builtin_ia32_maxsd_round_mask: 4223 case X86::BI__builtin_ia32_maxss_round_mask: 4224 case X86::BI__builtin_ia32_maxsh_round_mask: 4225 case X86::BI__builtin_ia32_minsd_round_mask: 4226 case X86::BI__builtin_ia32_minss_round_mask: 4227 case X86::BI__builtin_ia32_minsh_round_mask: 4228 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4229 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4230 case X86::BI__builtin_ia32_reducepd512_mask: 4231 case X86::BI__builtin_ia32_reduceps512_mask: 4232 case X86::BI__builtin_ia32_reduceph512_mask: 4233 case X86::BI__builtin_ia32_rndscalepd_mask: 4234 case X86::BI__builtin_ia32_rndscaleps_mask: 4235 case X86::BI__builtin_ia32_rndscaleph_mask: 4236 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4237 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4238 ArgNum = 4; 4239 break; 4240 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4241 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4242 case X86::BI__builtin_ia32_fixupimmps512_mask: 4243 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4244 case X86::BI__builtin_ia32_fixupimmsd_mask: 4245 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4246 case X86::BI__builtin_ia32_fixupimmss_mask: 4247 case X86::BI__builtin_ia32_fixupimmss_maskz: 4248 case X86::BI__builtin_ia32_getmantsd_round_mask: 4249 case X86::BI__builtin_ia32_getmantss_round_mask: 4250 case X86::BI__builtin_ia32_getmantsh_round_mask: 4251 case X86::BI__builtin_ia32_rangepd512_mask: 4252 case X86::BI__builtin_ia32_rangeps512_mask: 4253 case X86::BI__builtin_ia32_rangesd128_round_mask: 4254 case X86::BI__builtin_ia32_rangess128_round_mask: 4255 case X86::BI__builtin_ia32_reducesd_mask: 4256 case X86::BI__builtin_ia32_reducess_mask: 4257 case X86::BI__builtin_ia32_reducesh_mask: 4258 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4259 case X86::BI__builtin_ia32_rndscaless_round_mask: 4260 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4261 ArgNum = 5; 4262 break; 4263 case X86::BI__builtin_ia32_vcvtsd2si64: 4264 case X86::BI__builtin_ia32_vcvtsd2si32: 4265 case X86::BI__builtin_ia32_vcvtsd2usi32: 4266 case X86::BI__builtin_ia32_vcvtsd2usi64: 4267 case X86::BI__builtin_ia32_vcvtss2si32: 4268 case X86::BI__builtin_ia32_vcvtss2si64: 4269 case X86::BI__builtin_ia32_vcvtss2usi32: 4270 case X86::BI__builtin_ia32_vcvtss2usi64: 4271 case X86::BI__builtin_ia32_vcvtsh2si32: 4272 case X86::BI__builtin_ia32_vcvtsh2si64: 4273 case X86::BI__builtin_ia32_vcvtsh2usi32: 4274 case X86::BI__builtin_ia32_vcvtsh2usi64: 4275 case X86::BI__builtin_ia32_sqrtpd512: 4276 case X86::BI__builtin_ia32_sqrtps512: 4277 case X86::BI__builtin_ia32_sqrtph512: 4278 ArgNum = 1; 4279 HasRC = true; 4280 break; 4281 case X86::BI__builtin_ia32_addph512: 4282 case X86::BI__builtin_ia32_divph512: 4283 case X86::BI__builtin_ia32_mulph512: 4284 case X86::BI__builtin_ia32_subph512: 4285 case X86::BI__builtin_ia32_addpd512: 4286 case X86::BI__builtin_ia32_addps512: 4287 case X86::BI__builtin_ia32_divpd512: 4288 case X86::BI__builtin_ia32_divps512: 4289 case X86::BI__builtin_ia32_mulpd512: 4290 case X86::BI__builtin_ia32_mulps512: 4291 case X86::BI__builtin_ia32_subpd512: 4292 case X86::BI__builtin_ia32_subps512: 4293 case X86::BI__builtin_ia32_cvtsi2sd64: 4294 case X86::BI__builtin_ia32_cvtsi2ss32: 4295 case X86::BI__builtin_ia32_cvtsi2ss64: 4296 case X86::BI__builtin_ia32_cvtusi2sd64: 4297 case X86::BI__builtin_ia32_cvtusi2ss32: 4298 case X86::BI__builtin_ia32_cvtusi2ss64: 4299 case X86::BI__builtin_ia32_vcvtusi2sh: 4300 case X86::BI__builtin_ia32_vcvtusi642sh: 4301 case X86::BI__builtin_ia32_vcvtsi2sh: 4302 case X86::BI__builtin_ia32_vcvtsi642sh: 4303 ArgNum = 2; 4304 HasRC = true; 4305 break; 4306 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4307 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4308 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4309 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4310 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4311 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4312 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4313 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4314 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4315 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4316 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4317 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4318 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4319 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4320 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4321 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4322 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4323 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4324 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4325 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4326 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4327 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4328 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4329 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4330 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4331 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4332 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4333 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4334 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4335 ArgNum = 3; 4336 HasRC = true; 4337 break; 4338 case X86::BI__builtin_ia32_addsh_round_mask: 4339 case X86::BI__builtin_ia32_addss_round_mask: 4340 case X86::BI__builtin_ia32_addsd_round_mask: 4341 case X86::BI__builtin_ia32_divsh_round_mask: 4342 case X86::BI__builtin_ia32_divss_round_mask: 4343 case X86::BI__builtin_ia32_divsd_round_mask: 4344 case X86::BI__builtin_ia32_mulsh_round_mask: 4345 case X86::BI__builtin_ia32_mulss_round_mask: 4346 case X86::BI__builtin_ia32_mulsd_round_mask: 4347 case X86::BI__builtin_ia32_subsh_round_mask: 4348 case X86::BI__builtin_ia32_subss_round_mask: 4349 case X86::BI__builtin_ia32_subsd_round_mask: 4350 case X86::BI__builtin_ia32_scalefph512_mask: 4351 case X86::BI__builtin_ia32_scalefpd512_mask: 4352 case X86::BI__builtin_ia32_scalefps512_mask: 4353 case X86::BI__builtin_ia32_scalefsd_round_mask: 4354 case X86::BI__builtin_ia32_scalefss_round_mask: 4355 case X86::BI__builtin_ia32_scalefsh_round_mask: 4356 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4357 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4358 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4359 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4360 case X86::BI__builtin_ia32_sqrtss_round_mask: 4361 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4362 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4363 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4364 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4365 case X86::BI__builtin_ia32_vfmaddss3_mask: 4366 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4367 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4368 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4369 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4370 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4371 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4372 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4373 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4374 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4375 case X86::BI__builtin_ia32_vfmaddps512_mask: 4376 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4377 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4378 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4379 case X86::BI__builtin_ia32_vfmaddph512_mask: 4380 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4381 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4382 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4383 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4384 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4385 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4386 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4387 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4388 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4389 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4390 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4391 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4392 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4393 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4394 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4395 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4396 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4397 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4398 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4399 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4400 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4401 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4402 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4403 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4404 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4405 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4406 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4407 case X86::BI__builtin_ia32_vfmulcsh_mask: 4408 case X86::BI__builtin_ia32_vfmulcph512_mask: 4409 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4410 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4411 ArgNum = 4; 4412 HasRC = true; 4413 break; 4414 } 4415 4416 llvm::APSInt Result; 4417 4418 // We can't check the value of a dependent argument. 4419 Expr *Arg = TheCall->getArg(ArgNum); 4420 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4421 return false; 4422 4423 // Check constant-ness first. 4424 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4425 return true; 4426 4427 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4428 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4429 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4430 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4431 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4432 Result == 8/*ROUND_NO_EXC*/ || 4433 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4434 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4435 return false; 4436 4437 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4438 << Arg->getSourceRange(); 4439 } 4440 4441 // Check if the gather/scatter scale is legal. 4442 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4443 CallExpr *TheCall) { 4444 unsigned ArgNum = 0; 4445 switch (BuiltinID) { 4446 default: 4447 return false; 4448 case X86::BI__builtin_ia32_gatherpfdpd: 4449 case X86::BI__builtin_ia32_gatherpfdps: 4450 case X86::BI__builtin_ia32_gatherpfqpd: 4451 case X86::BI__builtin_ia32_gatherpfqps: 4452 case X86::BI__builtin_ia32_scatterpfdpd: 4453 case X86::BI__builtin_ia32_scatterpfdps: 4454 case X86::BI__builtin_ia32_scatterpfqpd: 4455 case X86::BI__builtin_ia32_scatterpfqps: 4456 ArgNum = 3; 4457 break; 4458 case X86::BI__builtin_ia32_gatherd_pd: 4459 case X86::BI__builtin_ia32_gatherd_pd256: 4460 case X86::BI__builtin_ia32_gatherq_pd: 4461 case X86::BI__builtin_ia32_gatherq_pd256: 4462 case X86::BI__builtin_ia32_gatherd_ps: 4463 case X86::BI__builtin_ia32_gatherd_ps256: 4464 case X86::BI__builtin_ia32_gatherq_ps: 4465 case X86::BI__builtin_ia32_gatherq_ps256: 4466 case X86::BI__builtin_ia32_gatherd_q: 4467 case X86::BI__builtin_ia32_gatherd_q256: 4468 case X86::BI__builtin_ia32_gatherq_q: 4469 case X86::BI__builtin_ia32_gatherq_q256: 4470 case X86::BI__builtin_ia32_gatherd_d: 4471 case X86::BI__builtin_ia32_gatherd_d256: 4472 case X86::BI__builtin_ia32_gatherq_d: 4473 case X86::BI__builtin_ia32_gatherq_d256: 4474 case X86::BI__builtin_ia32_gather3div2df: 4475 case X86::BI__builtin_ia32_gather3div2di: 4476 case X86::BI__builtin_ia32_gather3div4df: 4477 case X86::BI__builtin_ia32_gather3div4di: 4478 case X86::BI__builtin_ia32_gather3div4sf: 4479 case X86::BI__builtin_ia32_gather3div4si: 4480 case X86::BI__builtin_ia32_gather3div8sf: 4481 case X86::BI__builtin_ia32_gather3div8si: 4482 case X86::BI__builtin_ia32_gather3siv2df: 4483 case X86::BI__builtin_ia32_gather3siv2di: 4484 case X86::BI__builtin_ia32_gather3siv4df: 4485 case X86::BI__builtin_ia32_gather3siv4di: 4486 case X86::BI__builtin_ia32_gather3siv4sf: 4487 case X86::BI__builtin_ia32_gather3siv4si: 4488 case X86::BI__builtin_ia32_gather3siv8sf: 4489 case X86::BI__builtin_ia32_gather3siv8si: 4490 case X86::BI__builtin_ia32_gathersiv8df: 4491 case X86::BI__builtin_ia32_gathersiv16sf: 4492 case X86::BI__builtin_ia32_gatherdiv8df: 4493 case X86::BI__builtin_ia32_gatherdiv16sf: 4494 case X86::BI__builtin_ia32_gathersiv8di: 4495 case X86::BI__builtin_ia32_gathersiv16si: 4496 case X86::BI__builtin_ia32_gatherdiv8di: 4497 case X86::BI__builtin_ia32_gatherdiv16si: 4498 case X86::BI__builtin_ia32_scatterdiv2df: 4499 case X86::BI__builtin_ia32_scatterdiv2di: 4500 case X86::BI__builtin_ia32_scatterdiv4df: 4501 case X86::BI__builtin_ia32_scatterdiv4di: 4502 case X86::BI__builtin_ia32_scatterdiv4sf: 4503 case X86::BI__builtin_ia32_scatterdiv4si: 4504 case X86::BI__builtin_ia32_scatterdiv8sf: 4505 case X86::BI__builtin_ia32_scatterdiv8si: 4506 case X86::BI__builtin_ia32_scattersiv2df: 4507 case X86::BI__builtin_ia32_scattersiv2di: 4508 case X86::BI__builtin_ia32_scattersiv4df: 4509 case X86::BI__builtin_ia32_scattersiv4di: 4510 case X86::BI__builtin_ia32_scattersiv4sf: 4511 case X86::BI__builtin_ia32_scattersiv4si: 4512 case X86::BI__builtin_ia32_scattersiv8sf: 4513 case X86::BI__builtin_ia32_scattersiv8si: 4514 case X86::BI__builtin_ia32_scattersiv8df: 4515 case X86::BI__builtin_ia32_scattersiv16sf: 4516 case X86::BI__builtin_ia32_scatterdiv8df: 4517 case X86::BI__builtin_ia32_scatterdiv16sf: 4518 case X86::BI__builtin_ia32_scattersiv8di: 4519 case X86::BI__builtin_ia32_scattersiv16si: 4520 case X86::BI__builtin_ia32_scatterdiv8di: 4521 case X86::BI__builtin_ia32_scatterdiv16si: 4522 ArgNum = 4; 4523 break; 4524 } 4525 4526 llvm::APSInt Result; 4527 4528 // We can't check the value of a dependent argument. 4529 Expr *Arg = TheCall->getArg(ArgNum); 4530 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4531 return false; 4532 4533 // Check constant-ness first. 4534 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4535 return true; 4536 4537 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4538 return false; 4539 4540 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4541 << Arg->getSourceRange(); 4542 } 4543 4544 enum { TileRegLow = 0, TileRegHigh = 7 }; 4545 4546 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4547 ArrayRef<int> ArgNums) { 4548 for (int ArgNum : ArgNums) { 4549 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4550 return true; 4551 } 4552 return false; 4553 } 4554 4555 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4556 ArrayRef<int> ArgNums) { 4557 // Because the max number of tile register is TileRegHigh + 1, so here we use 4558 // each bit to represent the usage of them in bitset. 4559 std::bitset<TileRegHigh + 1> ArgValues; 4560 for (int ArgNum : ArgNums) { 4561 Expr *Arg = TheCall->getArg(ArgNum); 4562 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4563 continue; 4564 4565 llvm::APSInt Result; 4566 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4567 return true; 4568 int ArgExtValue = Result.getExtValue(); 4569 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4570 "Incorrect tile register num."); 4571 if (ArgValues.test(ArgExtValue)) 4572 return Diag(TheCall->getBeginLoc(), 4573 diag::err_x86_builtin_tile_arg_duplicate) 4574 << TheCall->getArg(ArgNum)->getSourceRange(); 4575 ArgValues.set(ArgExtValue); 4576 } 4577 return false; 4578 } 4579 4580 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4581 ArrayRef<int> ArgNums) { 4582 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4583 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4584 } 4585 4586 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4587 switch (BuiltinID) { 4588 default: 4589 return false; 4590 case X86::BI__builtin_ia32_tileloadd64: 4591 case X86::BI__builtin_ia32_tileloaddt164: 4592 case X86::BI__builtin_ia32_tilestored64: 4593 case X86::BI__builtin_ia32_tilezero: 4594 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4595 case X86::BI__builtin_ia32_tdpbssd: 4596 case X86::BI__builtin_ia32_tdpbsud: 4597 case X86::BI__builtin_ia32_tdpbusd: 4598 case X86::BI__builtin_ia32_tdpbuud: 4599 case X86::BI__builtin_ia32_tdpbf16ps: 4600 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4601 } 4602 } 4603 static bool isX86_32Builtin(unsigned BuiltinID) { 4604 // These builtins only work on x86-32 targets. 4605 switch (BuiltinID) { 4606 case X86::BI__builtin_ia32_readeflags_u32: 4607 case X86::BI__builtin_ia32_writeeflags_u32: 4608 return true; 4609 } 4610 4611 return false; 4612 } 4613 4614 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4615 CallExpr *TheCall) { 4616 if (BuiltinID == X86::BI__builtin_cpu_supports) 4617 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4618 4619 if (BuiltinID == X86::BI__builtin_cpu_is) 4620 return SemaBuiltinCpuIs(*this, TI, TheCall); 4621 4622 // Check for 32-bit only builtins on a 64-bit target. 4623 const llvm::Triple &TT = TI.getTriple(); 4624 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4625 return Diag(TheCall->getCallee()->getBeginLoc(), 4626 diag::err_32_bit_builtin_64_bit_tgt); 4627 4628 // If the intrinsic has rounding or SAE make sure its valid. 4629 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4630 return true; 4631 4632 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4633 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4634 return true; 4635 4636 // If the intrinsic has a tile arguments, make sure they are valid. 4637 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4638 return true; 4639 4640 // For intrinsics which take an immediate value as part of the instruction, 4641 // range check them here. 4642 int i = 0, l = 0, u = 0; 4643 switch (BuiltinID) { 4644 default: 4645 return false; 4646 case X86::BI__builtin_ia32_vec_ext_v2si: 4647 case X86::BI__builtin_ia32_vec_ext_v2di: 4648 case X86::BI__builtin_ia32_vextractf128_pd256: 4649 case X86::BI__builtin_ia32_vextractf128_ps256: 4650 case X86::BI__builtin_ia32_vextractf128_si256: 4651 case X86::BI__builtin_ia32_extract128i256: 4652 case X86::BI__builtin_ia32_extractf64x4_mask: 4653 case X86::BI__builtin_ia32_extracti64x4_mask: 4654 case X86::BI__builtin_ia32_extractf32x8_mask: 4655 case X86::BI__builtin_ia32_extracti32x8_mask: 4656 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4657 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4658 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4659 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4660 i = 1; l = 0; u = 1; 4661 break; 4662 case X86::BI__builtin_ia32_vec_set_v2di: 4663 case X86::BI__builtin_ia32_vinsertf128_pd256: 4664 case X86::BI__builtin_ia32_vinsertf128_ps256: 4665 case X86::BI__builtin_ia32_vinsertf128_si256: 4666 case X86::BI__builtin_ia32_insert128i256: 4667 case X86::BI__builtin_ia32_insertf32x8: 4668 case X86::BI__builtin_ia32_inserti32x8: 4669 case X86::BI__builtin_ia32_insertf64x4: 4670 case X86::BI__builtin_ia32_inserti64x4: 4671 case X86::BI__builtin_ia32_insertf64x2_256: 4672 case X86::BI__builtin_ia32_inserti64x2_256: 4673 case X86::BI__builtin_ia32_insertf32x4_256: 4674 case X86::BI__builtin_ia32_inserti32x4_256: 4675 i = 2; l = 0; u = 1; 4676 break; 4677 case X86::BI__builtin_ia32_vpermilpd: 4678 case X86::BI__builtin_ia32_vec_ext_v4hi: 4679 case X86::BI__builtin_ia32_vec_ext_v4si: 4680 case X86::BI__builtin_ia32_vec_ext_v4sf: 4681 case X86::BI__builtin_ia32_vec_ext_v4di: 4682 case X86::BI__builtin_ia32_extractf32x4_mask: 4683 case X86::BI__builtin_ia32_extracti32x4_mask: 4684 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4685 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4686 i = 1; l = 0; u = 3; 4687 break; 4688 case X86::BI_mm_prefetch: 4689 case X86::BI__builtin_ia32_vec_ext_v8hi: 4690 case X86::BI__builtin_ia32_vec_ext_v8si: 4691 i = 1; l = 0; u = 7; 4692 break; 4693 case X86::BI__builtin_ia32_sha1rnds4: 4694 case X86::BI__builtin_ia32_blendpd: 4695 case X86::BI__builtin_ia32_shufpd: 4696 case X86::BI__builtin_ia32_vec_set_v4hi: 4697 case X86::BI__builtin_ia32_vec_set_v4si: 4698 case X86::BI__builtin_ia32_vec_set_v4di: 4699 case X86::BI__builtin_ia32_shuf_f32x4_256: 4700 case X86::BI__builtin_ia32_shuf_f64x2_256: 4701 case X86::BI__builtin_ia32_shuf_i32x4_256: 4702 case X86::BI__builtin_ia32_shuf_i64x2_256: 4703 case X86::BI__builtin_ia32_insertf64x2_512: 4704 case X86::BI__builtin_ia32_inserti64x2_512: 4705 case X86::BI__builtin_ia32_insertf32x4: 4706 case X86::BI__builtin_ia32_inserti32x4: 4707 i = 2; l = 0; u = 3; 4708 break; 4709 case X86::BI__builtin_ia32_vpermil2pd: 4710 case X86::BI__builtin_ia32_vpermil2pd256: 4711 case X86::BI__builtin_ia32_vpermil2ps: 4712 case X86::BI__builtin_ia32_vpermil2ps256: 4713 i = 3; l = 0; u = 3; 4714 break; 4715 case X86::BI__builtin_ia32_cmpb128_mask: 4716 case X86::BI__builtin_ia32_cmpw128_mask: 4717 case X86::BI__builtin_ia32_cmpd128_mask: 4718 case X86::BI__builtin_ia32_cmpq128_mask: 4719 case X86::BI__builtin_ia32_cmpb256_mask: 4720 case X86::BI__builtin_ia32_cmpw256_mask: 4721 case X86::BI__builtin_ia32_cmpd256_mask: 4722 case X86::BI__builtin_ia32_cmpq256_mask: 4723 case X86::BI__builtin_ia32_cmpb512_mask: 4724 case X86::BI__builtin_ia32_cmpw512_mask: 4725 case X86::BI__builtin_ia32_cmpd512_mask: 4726 case X86::BI__builtin_ia32_cmpq512_mask: 4727 case X86::BI__builtin_ia32_ucmpb128_mask: 4728 case X86::BI__builtin_ia32_ucmpw128_mask: 4729 case X86::BI__builtin_ia32_ucmpd128_mask: 4730 case X86::BI__builtin_ia32_ucmpq128_mask: 4731 case X86::BI__builtin_ia32_ucmpb256_mask: 4732 case X86::BI__builtin_ia32_ucmpw256_mask: 4733 case X86::BI__builtin_ia32_ucmpd256_mask: 4734 case X86::BI__builtin_ia32_ucmpq256_mask: 4735 case X86::BI__builtin_ia32_ucmpb512_mask: 4736 case X86::BI__builtin_ia32_ucmpw512_mask: 4737 case X86::BI__builtin_ia32_ucmpd512_mask: 4738 case X86::BI__builtin_ia32_ucmpq512_mask: 4739 case X86::BI__builtin_ia32_vpcomub: 4740 case X86::BI__builtin_ia32_vpcomuw: 4741 case X86::BI__builtin_ia32_vpcomud: 4742 case X86::BI__builtin_ia32_vpcomuq: 4743 case X86::BI__builtin_ia32_vpcomb: 4744 case X86::BI__builtin_ia32_vpcomw: 4745 case X86::BI__builtin_ia32_vpcomd: 4746 case X86::BI__builtin_ia32_vpcomq: 4747 case X86::BI__builtin_ia32_vec_set_v8hi: 4748 case X86::BI__builtin_ia32_vec_set_v8si: 4749 i = 2; l = 0; u = 7; 4750 break; 4751 case X86::BI__builtin_ia32_vpermilpd256: 4752 case X86::BI__builtin_ia32_roundps: 4753 case X86::BI__builtin_ia32_roundpd: 4754 case X86::BI__builtin_ia32_roundps256: 4755 case X86::BI__builtin_ia32_roundpd256: 4756 case X86::BI__builtin_ia32_getmantpd128_mask: 4757 case X86::BI__builtin_ia32_getmantpd256_mask: 4758 case X86::BI__builtin_ia32_getmantps128_mask: 4759 case X86::BI__builtin_ia32_getmantps256_mask: 4760 case X86::BI__builtin_ia32_getmantpd512_mask: 4761 case X86::BI__builtin_ia32_getmantps512_mask: 4762 case X86::BI__builtin_ia32_getmantph128_mask: 4763 case X86::BI__builtin_ia32_getmantph256_mask: 4764 case X86::BI__builtin_ia32_getmantph512_mask: 4765 case X86::BI__builtin_ia32_vec_ext_v16qi: 4766 case X86::BI__builtin_ia32_vec_ext_v16hi: 4767 i = 1; l = 0; u = 15; 4768 break; 4769 case X86::BI__builtin_ia32_pblendd128: 4770 case X86::BI__builtin_ia32_blendps: 4771 case X86::BI__builtin_ia32_blendpd256: 4772 case X86::BI__builtin_ia32_shufpd256: 4773 case X86::BI__builtin_ia32_roundss: 4774 case X86::BI__builtin_ia32_roundsd: 4775 case X86::BI__builtin_ia32_rangepd128_mask: 4776 case X86::BI__builtin_ia32_rangepd256_mask: 4777 case X86::BI__builtin_ia32_rangepd512_mask: 4778 case X86::BI__builtin_ia32_rangeps128_mask: 4779 case X86::BI__builtin_ia32_rangeps256_mask: 4780 case X86::BI__builtin_ia32_rangeps512_mask: 4781 case X86::BI__builtin_ia32_getmantsd_round_mask: 4782 case X86::BI__builtin_ia32_getmantss_round_mask: 4783 case X86::BI__builtin_ia32_getmantsh_round_mask: 4784 case X86::BI__builtin_ia32_vec_set_v16qi: 4785 case X86::BI__builtin_ia32_vec_set_v16hi: 4786 i = 2; l = 0; u = 15; 4787 break; 4788 case X86::BI__builtin_ia32_vec_ext_v32qi: 4789 i = 1; l = 0; u = 31; 4790 break; 4791 case X86::BI__builtin_ia32_cmpps: 4792 case X86::BI__builtin_ia32_cmpss: 4793 case X86::BI__builtin_ia32_cmppd: 4794 case X86::BI__builtin_ia32_cmpsd: 4795 case X86::BI__builtin_ia32_cmpps256: 4796 case X86::BI__builtin_ia32_cmppd256: 4797 case X86::BI__builtin_ia32_cmpps128_mask: 4798 case X86::BI__builtin_ia32_cmppd128_mask: 4799 case X86::BI__builtin_ia32_cmpps256_mask: 4800 case X86::BI__builtin_ia32_cmppd256_mask: 4801 case X86::BI__builtin_ia32_cmpps512_mask: 4802 case X86::BI__builtin_ia32_cmppd512_mask: 4803 case X86::BI__builtin_ia32_cmpsd_mask: 4804 case X86::BI__builtin_ia32_cmpss_mask: 4805 case X86::BI__builtin_ia32_vec_set_v32qi: 4806 i = 2; l = 0; u = 31; 4807 break; 4808 case X86::BI__builtin_ia32_permdf256: 4809 case X86::BI__builtin_ia32_permdi256: 4810 case X86::BI__builtin_ia32_permdf512: 4811 case X86::BI__builtin_ia32_permdi512: 4812 case X86::BI__builtin_ia32_vpermilps: 4813 case X86::BI__builtin_ia32_vpermilps256: 4814 case X86::BI__builtin_ia32_vpermilpd512: 4815 case X86::BI__builtin_ia32_vpermilps512: 4816 case X86::BI__builtin_ia32_pshufd: 4817 case X86::BI__builtin_ia32_pshufd256: 4818 case X86::BI__builtin_ia32_pshufd512: 4819 case X86::BI__builtin_ia32_pshufhw: 4820 case X86::BI__builtin_ia32_pshufhw256: 4821 case X86::BI__builtin_ia32_pshufhw512: 4822 case X86::BI__builtin_ia32_pshuflw: 4823 case X86::BI__builtin_ia32_pshuflw256: 4824 case X86::BI__builtin_ia32_pshuflw512: 4825 case X86::BI__builtin_ia32_vcvtps2ph: 4826 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4827 case X86::BI__builtin_ia32_vcvtps2ph256: 4828 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4829 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4830 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4831 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4832 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4833 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4834 case X86::BI__builtin_ia32_rndscaleps_mask: 4835 case X86::BI__builtin_ia32_rndscalepd_mask: 4836 case X86::BI__builtin_ia32_rndscaleph_mask: 4837 case X86::BI__builtin_ia32_reducepd128_mask: 4838 case X86::BI__builtin_ia32_reducepd256_mask: 4839 case X86::BI__builtin_ia32_reducepd512_mask: 4840 case X86::BI__builtin_ia32_reduceps128_mask: 4841 case X86::BI__builtin_ia32_reduceps256_mask: 4842 case X86::BI__builtin_ia32_reduceps512_mask: 4843 case X86::BI__builtin_ia32_reduceph128_mask: 4844 case X86::BI__builtin_ia32_reduceph256_mask: 4845 case X86::BI__builtin_ia32_reduceph512_mask: 4846 case X86::BI__builtin_ia32_prold512: 4847 case X86::BI__builtin_ia32_prolq512: 4848 case X86::BI__builtin_ia32_prold128: 4849 case X86::BI__builtin_ia32_prold256: 4850 case X86::BI__builtin_ia32_prolq128: 4851 case X86::BI__builtin_ia32_prolq256: 4852 case X86::BI__builtin_ia32_prord512: 4853 case X86::BI__builtin_ia32_prorq512: 4854 case X86::BI__builtin_ia32_prord128: 4855 case X86::BI__builtin_ia32_prord256: 4856 case X86::BI__builtin_ia32_prorq128: 4857 case X86::BI__builtin_ia32_prorq256: 4858 case X86::BI__builtin_ia32_fpclasspd128_mask: 4859 case X86::BI__builtin_ia32_fpclasspd256_mask: 4860 case X86::BI__builtin_ia32_fpclassps128_mask: 4861 case X86::BI__builtin_ia32_fpclassps256_mask: 4862 case X86::BI__builtin_ia32_fpclassps512_mask: 4863 case X86::BI__builtin_ia32_fpclasspd512_mask: 4864 case X86::BI__builtin_ia32_fpclassph128_mask: 4865 case X86::BI__builtin_ia32_fpclassph256_mask: 4866 case X86::BI__builtin_ia32_fpclassph512_mask: 4867 case X86::BI__builtin_ia32_fpclasssd_mask: 4868 case X86::BI__builtin_ia32_fpclassss_mask: 4869 case X86::BI__builtin_ia32_fpclasssh_mask: 4870 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4871 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4872 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4873 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4874 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4875 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4876 case X86::BI__builtin_ia32_kshiftliqi: 4877 case X86::BI__builtin_ia32_kshiftlihi: 4878 case X86::BI__builtin_ia32_kshiftlisi: 4879 case X86::BI__builtin_ia32_kshiftlidi: 4880 case X86::BI__builtin_ia32_kshiftriqi: 4881 case X86::BI__builtin_ia32_kshiftrihi: 4882 case X86::BI__builtin_ia32_kshiftrisi: 4883 case X86::BI__builtin_ia32_kshiftridi: 4884 i = 1; l = 0; u = 255; 4885 break; 4886 case X86::BI__builtin_ia32_vperm2f128_pd256: 4887 case X86::BI__builtin_ia32_vperm2f128_ps256: 4888 case X86::BI__builtin_ia32_vperm2f128_si256: 4889 case X86::BI__builtin_ia32_permti256: 4890 case X86::BI__builtin_ia32_pblendw128: 4891 case X86::BI__builtin_ia32_pblendw256: 4892 case X86::BI__builtin_ia32_blendps256: 4893 case X86::BI__builtin_ia32_pblendd256: 4894 case X86::BI__builtin_ia32_palignr128: 4895 case X86::BI__builtin_ia32_palignr256: 4896 case X86::BI__builtin_ia32_palignr512: 4897 case X86::BI__builtin_ia32_alignq512: 4898 case X86::BI__builtin_ia32_alignd512: 4899 case X86::BI__builtin_ia32_alignd128: 4900 case X86::BI__builtin_ia32_alignd256: 4901 case X86::BI__builtin_ia32_alignq128: 4902 case X86::BI__builtin_ia32_alignq256: 4903 case X86::BI__builtin_ia32_vcomisd: 4904 case X86::BI__builtin_ia32_vcomiss: 4905 case X86::BI__builtin_ia32_shuf_f32x4: 4906 case X86::BI__builtin_ia32_shuf_f64x2: 4907 case X86::BI__builtin_ia32_shuf_i32x4: 4908 case X86::BI__builtin_ia32_shuf_i64x2: 4909 case X86::BI__builtin_ia32_shufpd512: 4910 case X86::BI__builtin_ia32_shufps: 4911 case X86::BI__builtin_ia32_shufps256: 4912 case X86::BI__builtin_ia32_shufps512: 4913 case X86::BI__builtin_ia32_dbpsadbw128: 4914 case X86::BI__builtin_ia32_dbpsadbw256: 4915 case X86::BI__builtin_ia32_dbpsadbw512: 4916 case X86::BI__builtin_ia32_vpshldd128: 4917 case X86::BI__builtin_ia32_vpshldd256: 4918 case X86::BI__builtin_ia32_vpshldd512: 4919 case X86::BI__builtin_ia32_vpshldq128: 4920 case X86::BI__builtin_ia32_vpshldq256: 4921 case X86::BI__builtin_ia32_vpshldq512: 4922 case X86::BI__builtin_ia32_vpshldw128: 4923 case X86::BI__builtin_ia32_vpshldw256: 4924 case X86::BI__builtin_ia32_vpshldw512: 4925 case X86::BI__builtin_ia32_vpshrdd128: 4926 case X86::BI__builtin_ia32_vpshrdd256: 4927 case X86::BI__builtin_ia32_vpshrdd512: 4928 case X86::BI__builtin_ia32_vpshrdq128: 4929 case X86::BI__builtin_ia32_vpshrdq256: 4930 case X86::BI__builtin_ia32_vpshrdq512: 4931 case X86::BI__builtin_ia32_vpshrdw128: 4932 case X86::BI__builtin_ia32_vpshrdw256: 4933 case X86::BI__builtin_ia32_vpshrdw512: 4934 i = 2; l = 0; u = 255; 4935 break; 4936 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4937 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4938 case X86::BI__builtin_ia32_fixupimmps512_mask: 4939 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4940 case X86::BI__builtin_ia32_fixupimmsd_mask: 4941 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4942 case X86::BI__builtin_ia32_fixupimmss_mask: 4943 case X86::BI__builtin_ia32_fixupimmss_maskz: 4944 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4945 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4946 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4947 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4948 case X86::BI__builtin_ia32_fixupimmps128_mask: 4949 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4950 case X86::BI__builtin_ia32_fixupimmps256_mask: 4951 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4952 case X86::BI__builtin_ia32_pternlogd512_mask: 4953 case X86::BI__builtin_ia32_pternlogd512_maskz: 4954 case X86::BI__builtin_ia32_pternlogq512_mask: 4955 case X86::BI__builtin_ia32_pternlogq512_maskz: 4956 case X86::BI__builtin_ia32_pternlogd128_mask: 4957 case X86::BI__builtin_ia32_pternlogd128_maskz: 4958 case X86::BI__builtin_ia32_pternlogd256_mask: 4959 case X86::BI__builtin_ia32_pternlogd256_maskz: 4960 case X86::BI__builtin_ia32_pternlogq128_mask: 4961 case X86::BI__builtin_ia32_pternlogq128_maskz: 4962 case X86::BI__builtin_ia32_pternlogq256_mask: 4963 case X86::BI__builtin_ia32_pternlogq256_maskz: 4964 i = 3; l = 0; u = 255; 4965 break; 4966 case X86::BI__builtin_ia32_gatherpfdpd: 4967 case X86::BI__builtin_ia32_gatherpfdps: 4968 case X86::BI__builtin_ia32_gatherpfqpd: 4969 case X86::BI__builtin_ia32_gatherpfqps: 4970 case X86::BI__builtin_ia32_scatterpfdpd: 4971 case X86::BI__builtin_ia32_scatterpfdps: 4972 case X86::BI__builtin_ia32_scatterpfqpd: 4973 case X86::BI__builtin_ia32_scatterpfqps: 4974 i = 4; l = 2; u = 3; 4975 break; 4976 case X86::BI__builtin_ia32_reducesd_mask: 4977 case X86::BI__builtin_ia32_reducess_mask: 4978 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4979 case X86::BI__builtin_ia32_rndscaless_round_mask: 4980 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4981 case X86::BI__builtin_ia32_reducesh_mask: 4982 i = 4; l = 0; u = 255; 4983 break; 4984 } 4985 4986 // Note that we don't force a hard error on the range check here, allowing 4987 // template-generated or macro-generated dead code to potentially have out-of- 4988 // range values. These need to code generate, but don't need to necessarily 4989 // make any sense. We use a warning that defaults to an error. 4990 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4991 } 4992 4993 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4994 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4995 /// Returns true when the format fits the function and the FormatStringInfo has 4996 /// been populated. 4997 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4998 FormatStringInfo *FSI) { 4999 FSI->HasVAListArg = Format->getFirstArg() == 0; 5000 FSI->FormatIdx = Format->getFormatIdx() - 1; 5001 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5002 5003 // The way the format attribute works in GCC, the implicit this argument 5004 // of member functions is counted. However, it doesn't appear in our own 5005 // lists, so decrement format_idx in that case. 5006 if (IsCXXMember) { 5007 if(FSI->FormatIdx == 0) 5008 return false; 5009 --FSI->FormatIdx; 5010 if (FSI->FirstDataArg != 0) 5011 --FSI->FirstDataArg; 5012 } 5013 return true; 5014 } 5015 5016 /// Checks if a the given expression evaluates to null. 5017 /// 5018 /// Returns true if the value evaluates to null. 5019 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5020 // If the expression has non-null type, it doesn't evaluate to null. 5021 if (auto nullability 5022 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5023 if (*nullability == NullabilityKind::NonNull) 5024 return false; 5025 } 5026 5027 // As a special case, transparent unions initialized with zero are 5028 // considered null for the purposes of the nonnull attribute. 5029 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5030 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5031 if (const CompoundLiteralExpr *CLE = 5032 dyn_cast<CompoundLiteralExpr>(Expr)) 5033 if (const InitListExpr *ILE = 5034 dyn_cast<InitListExpr>(CLE->getInitializer())) 5035 Expr = ILE->getInit(0); 5036 } 5037 5038 bool Result; 5039 return (!Expr->isValueDependent() && 5040 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5041 !Result); 5042 } 5043 5044 static void CheckNonNullArgument(Sema &S, 5045 const Expr *ArgExpr, 5046 SourceLocation CallSiteLoc) { 5047 if (CheckNonNullExpr(S, ArgExpr)) 5048 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5049 S.PDiag(diag::warn_null_arg) 5050 << ArgExpr->getSourceRange()); 5051 } 5052 5053 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5054 FormatStringInfo FSI; 5055 if ((GetFormatStringType(Format) == FST_NSString) && 5056 getFormatStringInfo(Format, false, &FSI)) { 5057 Idx = FSI.FormatIdx; 5058 return true; 5059 } 5060 return false; 5061 } 5062 5063 /// Diagnose use of %s directive in an NSString which is being passed 5064 /// as formatting string to formatting method. 5065 static void 5066 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5067 const NamedDecl *FDecl, 5068 Expr **Args, 5069 unsigned NumArgs) { 5070 unsigned Idx = 0; 5071 bool Format = false; 5072 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5073 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5074 Idx = 2; 5075 Format = true; 5076 } 5077 else 5078 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5079 if (S.GetFormatNSStringIdx(I, Idx)) { 5080 Format = true; 5081 break; 5082 } 5083 } 5084 if (!Format || NumArgs <= Idx) 5085 return; 5086 const Expr *FormatExpr = Args[Idx]; 5087 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5088 FormatExpr = CSCE->getSubExpr(); 5089 const StringLiteral *FormatString; 5090 if (const ObjCStringLiteral *OSL = 5091 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5092 FormatString = OSL->getString(); 5093 else 5094 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5095 if (!FormatString) 5096 return; 5097 if (S.FormatStringHasSArg(FormatString)) { 5098 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5099 << "%s" << 1 << 1; 5100 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5101 << FDecl->getDeclName(); 5102 } 5103 } 5104 5105 /// Determine whether the given type has a non-null nullability annotation. 5106 static bool isNonNullType(ASTContext &ctx, QualType type) { 5107 if (auto nullability = type->getNullability(ctx)) 5108 return *nullability == NullabilityKind::NonNull; 5109 5110 return false; 5111 } 5112 5113 static void CheckNonNullArguments(Sema &S, 5114 const NamedDecl *FDecl, 5115 const FunctionProtoType *Proto, 5116 ArrayRef<const Expr *> Args, 5117 SourceLocation CallSiteLoc) { 5118 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5119 5120 // Already checked by by constant evaluator. 5121 if (S.isConstantEvaluated()) 5122 return; 5123 // Check the attributes attached to the method/function itself. 5124 llvm::SmallBitVector NonNullArgs; 5125 if (FDecl) { 5126 // Handle the nonnull attribute on the function/method declaration itself. 5127 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5128 if (!NonNull->args_size()) { 5129 // Easy case: all pointer arguments are nonnull. 5130 for (const auto *Arg : Args) 5131 if (S.isValidPointerAttrType(Arg->getType())) 5132 CheckNonNullArgument(S, Arg, CallSiteLoc); 5133 return; 5134 } 5135 5136 for (const ParamIdx &Idx : NonNull->args()) { 5137 unsigned IdxAST = Idx.getASTIndex(); 5138 if (IdxAST >= Args.size()) 5139 continue; 5140 if (NonNullArgs.empty()) 5141 NonNullArgs.resize(Args.size()); 5142 NonNullArgs.set(IdxAST); 5143 } 5144 } 5145 } 5146 5147 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5148 // Handle the nonnull attribute on the parameters of the 5149 // function/method. 5150 ArrayRef<ParmVarDecl*> parms; 5151 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5152 parms = FD->parameters(); 5153 else 5154 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5155 5156 unsigned ParamIndex = 0; 5157 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5158 I != E; ++I, ++ParamIndex) { 5159 const ParmVarDecl *PVD = *I; 5160 if (PVD->hasAttr<NonNullAttr>() || 5161 isNonNullType(S.Context, PVD->getType())) { 5162 if (NonNullArgs.empty()) 5163 NonNullArgs.resize(Args.size()); 5164 5165 NonNullArgs.set(ParamIndex); 5166 } 5167 } 5168 } else { 5169 // If we have a non-function, non-method declaration but no 5170 // function prototype, try to dig out the function prototype. 5171 if (!Proto) { 5172 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5173 QualType type = VD->getType().getNonReferenceType(); 5174 if (auto pointerType = type->getAs<PointerType>()) 5175 type = pointerType->getPointeeType(); 5176 else if (auto blockType = type->getAs<BlockPointerType>()) 5177 type = blockType->getPointeeType(); 5178 // FIXME: data member pointers? 5179 5180 // Dig out the function prototype, if there is one. 5181 Proto = type->getAs<FunctionProtoType>(); 5182 } 5183 } 5184 5185 // Fill in non-null argument information from the nullability 5186 // information on the parameter types (if we have them). 5187 if (Proto) { 5188 unsigned Index = 0; 5189 for (auto paramType : Proto->getParamTypes()) { 5190 if (isNonNullType(S.Context, paramType)) { 5191 if (NonNullArgs.empty()) 5192 NonNullArgs.resize(Args.size()); 5193 5194 NonNullArgs.set(Index); 5195 } 5196 5197 ++Index; 5198 } 5199 } 5200 } 5201 5202 // Check for non-null arguments. 5203 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5204 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5205 if (NonNullArgs[ArgIndex]) 5206 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5207 } 5208 } 5209 5210 /// Warn if a pointer or reference argument passed to a function points to an 5211 /// object that is less aligned than the parameter. This can happen when 5212 /// creating a typedef with a lower alignment than the original type and then 5213 /// calling functions defined in terms of the original type. 5214 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5215 StringRef ParamName, QualType ArgTy, 5216 QualType ParamTy) { 5217 5218 // If a function accepts a pointer or reference type 5219 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5220 return; 5221 5222 // If the parameter is a pointer type, get the pointee type for the 5223 // argument too. If the parameter is a reference type, don't try to get 5224 // the pointee type for the argument. 5225 if (ParamTy->isPointerType()) 5226 ArgTy = ArgTy->getPointeeType(); 5227 5228 // Remove reference or pointer 5229 ParamTy = ParamTy->getPointeeType(); 5230 5231 // Find expected alignment, and the actual alignment of the passed object. 5232 // getTypeAlignInChars requires complete types 5233 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5234 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5235 ArgTy->isUndeducedType()) 5236 return; 5237 5238 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5239 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5240 5241 // If the argument is less aligned than the parameter, there is a 5242 // potential alignment issue. 5243 if (ArgAlign < ParamAlign) 5244 Diag(Loc, diag::warn_param_mismatched_alignment) 5245 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5246 << ParamName << (FDecl != nullptr) << FDecl; 5247 } 5248 5249 /// Handles the checks for format strings, non-POD arguments to vararg 5250 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5251 /// attributes. 5252 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5253 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5254 bool IsMemberFunction, SourceLocation Loc, 5255 SourceRange Range, VariadicCallType CallType) { 5256 // FIXME: We should check as much as we can in the template definition. 5257 if (CurContext->isDependentContext()) 5258 return; 5259 5260 // Printf and scanf checking. 5261 llvm::SmallBitVector CheckedVarArgs; 5262 if (FDecl) { 5263 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5264 // Only create vector if there are format attributes. 5265 CheckedVarArgs.resize(Args.size()); 5266 5267 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5268 CheckedVarArgs); 5269 } 5270 } 5271 5272 // Refuse POD arguments that weren't caught by the format string 5273 // checks above. 5274 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5275 if (CallType != VariadicDoesNotApply && 5276 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5277 unsigned NumParams = Proto ? Proto->getNumParams() 5278 : FDecl && isa<FunctionDecl>(FDecl) 5279 ? cast<FunctionDecl>(FDecl)->getNumParams() 5280 : FDecl && isa<ObjCMethodDecl>(FDecl) 5281 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5282 : 0; 5283 5284 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5285 // Args[ArgIdx] can be null in malformed code. 5286 if (const Expr *Arg = Args[ArgIdx]) { 5287 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5288 checkVariadicArgument(Arg, CallType); 5289 } 5290 } 5291 } 5292 5293 if (FDecl || Proto) { 5294 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5295 5296 // Type safety checking. 5297 if (FDecl) { 5298 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5299 CheckArgumentWithTypeTag(I, Args, Loc); 5300 } 5301 } 5302 5303 // Check that passed arguments match the alignment of original arguments. 5304 // Try to get the missing prototype from the declaration. 5305 if (!Proto && FDecl) { 5306 const auto *FT = FDecl->getFunctionType(); 5307 if (isa_and_nonnull<FunctionProtoType>(FT)) 5308 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5309 } 5310 if (Proto) { 5311 // For variadic functions, we may have more args than parameters. 5312 // For some K&R functions, we may have less args than parameters. 5313 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5314 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5315 // Args[ArgIdx] can be null in malformed code. 5316 if (const Expr *Arg = Args[ArgIdx]) { 5317 if (Arg->containsErrors()) 5318 continue; 5319 5320 QualType ParamTy = Proto->getParamType(ArgIdx); 5321 QualType ArgTy = Arg->getType(); 5322 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5323 ArgTy, ParamTy); 5324 } 5325 } 5326 } 5327 5328 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5329 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5330 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5331 if (!Arg->isValueDependent()) { 5332 Expr::EvalResult Align; 5333 if (Arg->EvaluateAsInt(Align, Context)) { 5334 const llvm::APSInt &I = Align.Val.getInt(); 5335 if (!I.isPowerOf2()) 5336 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5337 << Arg->getSourceRange(); 5338 5339 if (I > Sema::MaximumAlignment) 5340 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5341 << Arg->getSourceRange() << Sema::MaximumAlignment; 5342 } 5343 } 5344 } 5345 5346 if (FD) 5347 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5348 } 5349 5350 /// CheckConstructorCall - Check a constructor call for correctness and safety 5351 /// properties not enforced by the C type system. 5352 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5353 ArrayRef<const Expr *> Args, 5354 const FunctionProtoType *Proto, 5355 SourceLocation Loc) { 5356 VariadicCallType CallType = 5357 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5358 5359 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5360 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5361 Context.getPointerType(Ctor->getThisObjectType())); 5362 5363 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5364 Loc, SourceRange(), CallType); 5365 } 5366 5367 /// CheckFunctionCall - Check a direct function call for various correctness 5368 /// and safety properties not strictly enforced by the C type system. 5369 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5370 const FunctionProtoType *Proto) { 5371 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5372 isa<CXXMethodDecl>(FDecl); 5373 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5374 IsMemberOperatorCall; 5375 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5376 TheCall->getCallee()); 5377 Expr** Args = TheCall->getArgs(); 5378 unsigned NumArgs = TheCall->getNumArgs(); 5379 5380 Expr *ImplicitThis = nullptr; 5381 if (IsMemberOperatorCall) { 5382 // If this is a call to a member operator, hide the first argument 5383 // from checkCall. 5384 // FIXME: Our choice of AST representation here is less than ideal. 5385 ImplicitThis = Args[0]; 5386 ++Args; 5387 --NumArgs; 5388 } else if (IsMemberFunction) 5389 ImplicitThis = 5390 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5391 5392 if (ImplicitThis) { 5393 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5394 // used. 5395 QualType ThisType = ImplicitThis->getType(); 5396 if (!ThisType->isPointerType()) { 5397 assert(!ThisType->isReferenceType()); 5398 ThisType = Context.getPointerType(ThisType); 5399 } 5400 5401 QualType ThisTypeFromDecl = 5402 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5403 5404 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5405 ThisTypeFromDecl); 5406 } 5407 5408 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5409 IsMemberFunction, TheCall->getRParenLoc(), 5410 TheCall->getCallee()->getSourceRange(), CallType); 5411 5412 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5413 // None of the checks below are needed for functions that don't have 5414 // simple names (e.g., C++ conversion functions). 5415 if (!FnInfo) 5416 return false; 5417 5418 CheckTCBEnforcement(TheCall, FDecl); 5419 5420 CheckAbsoluteValueFunction(TheCall, FDecl); 5421 CheckMaxUnsignedZero(TheCall, FDecl); 5422 5423 if (getLangOpts().ObjC) 5424 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5425 5426 unsigned CMId = FDecl->getMemoryFunctionKind(); 5427 5428 // Handle memory setting and copying functions. 5429 switch (CMId) { 5430 case 0: 5431 return false; 5432 case Builtin::BIstrlcpy: // fallthrough 5433 case Builtin::BIstrlcat: 5434 CheckStrlcpycatArguments(TheCall, FnInfo); 5435 break; 5436 case Builtin::BIstrncat: 5437 CheckStrncatArguments(TheCall, FnInfo); 5438 break; 5439 case Builtin::BIfree: 5440 CheckFreeArguments(TheCall); 5441 break; 5442 default: 5443 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5444 } 5445 5446 return false; 5447 } 5448 5449 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5450 ArrayRef<const Expr *> Args) { 5451 VariadicCallType CallType = 5452 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5453 5454 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5455 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5456 CallType); 5457 5458 return false; 5459 } 5460 5461 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5462 const FunctionProtoType *Proto) { 5463 QualType Ty; 5464 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5465 Ty = V->getType().getNonReferenceType(); 5466 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5467 Ty = F->getType().getNonReferenceType(); 5468 else 5469 return false; 5470 5471 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5472 !Ty->isFunctionProtoType()) 5473 return false; 5474 5475 VariadicCallType CallType; 5476 if (!Proto || !Proto->isVariadic()) { 5477 CallType = VariadicDoesNotApply; 5478 } else if (Ty->isBlockPointerType()) { 5479 CallType = VariadicBlock; 5480 } else { // Ty->isFunctionPointerType() 5481 CallType = VariadicFunction; 5482 } 5483 5484 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5485 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5486 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5487 TheCall->getCallee()->getSourceRange(), CallType); 5488 5489 return false; 5490 } 5491 5492 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5493 /// such as function pointers returned from functions. 5494 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5495 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5496 TheCall->getCallee()); 5497 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5498 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5499 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5500 TheCall->getCallee()->getSourceRange(), CallType); 5501 5502 return false; 5503 } 5504 5505 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5506 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5507 return false; 5508 5509 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5510 switch (Op) { 5511 case AtomicExpr::AO__c11_atomic_init: 5512 case AtomicExpr::AO__opencl_atomic_init: 5513 llvm_unreachable("There is no ordering argument for an init"); 5514 5515 case AtomicExpr::AO__c11_atomic_load: 5516 case AtomicExpr::AO__opencl_atomic_load: 5517 case AtomicExpr::AO__hip_atomic_load: 5518 case AtomicExpr::AO__atomic_load_n: 5519 case AtomicExpr::AO__atomic_load: 5520 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5521 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5522 5523 case AtomicExpr::AO__c11_atomic_store: 5524 case AtomicExpr::AO__opencl_atomic_store: 5525 case AtomicExpr::AO__hip_atomic_store: 5526 case AtomicExpr::AO__atomic_store: 5527 case AtomicExpr::AO__atomic_store_n: 5528 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5529 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5530 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5531 5532 default: 5533 return true; 5534 } 5535 } 5536 5537 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5538 AtomicExpr::AtomicOp Op) { 5539 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5540 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5541 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5542 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5543 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5544 Op); 5545 } 5546 5547 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5548 SourceLocation RParenLoc, MultiExprArg Args, 5549 AtomicExpr::AtomicOp Op, 5550 AtomicArgumentOrder ArgOrder) { 5551 // All the non-OpenCL operations take one of the following forms. 5552 // The OpenCL operations take the __c11 forms with one extra argument for 5553 // synchronization scope. 5554 enum { 5555 // C __c11_atomic_init(A *, C) 5556 Init, 5557 5558 // C __c11_atomic_load(A *, int) 5559 Load, 5560 5561 // void __atomic_load(A *, CP, int) 5562 LoadCopy, 5563 5564 // void __atomic_store(A *, CP, int) 5565 Copy, 5566 5567 // C __c11_atomic_add(A *, M, int) 5568 Arithmetic, 5569 5570 // C __atomic_exchange_n(A *, CP, int) 5571 Xchg, 5572 5573 // void __atomic_exchange(A *, C *, CP, int) 5574 GNUXchg, 5575 5576 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5577 C11CmpXchg, 5578 5579 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5580 GNUCmpXchg 5581 } Form = Init; 5582 5583 const unsigned NumForm = GNUCmpXchg + 1; 5584 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5585 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5586 // where: 5587 // C is an appropriate type, 5588 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5589 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5590 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5591 // the int parameters are for orderings. 5592 5593 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5594 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5595 "need to update code for modified forms"); 5596 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5597 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5598 AtomicExpr::AO__atomic_load, 5599 "need to update code for modified C11 atomics"); 5600 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5601 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5602 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 5603 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 5604 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5605 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5606 IsOpenCL; 5607 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5608 Op == AtomicExpr::AO__atomic_store_n || 5609 Op == AtomicExpr::AO__atomic_exchange_n || 5610 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5611 bool IsAddSub = false; 5612 5613 switch (Op) { 5614 case AtomicExpr::AO__c11_atomic_init: 5615 case AtomicExpr::AO__opencl_atomic_init: 5616 Form = Init; 5617 break; 5618 5619 case AtomicExpr::AO__c11_atomic_load: 5620 case AtomicExpr::AO__opencl_atomic_load: 5621 case AtomicExpr::AO__hip_atomic_load: 5622 case AtomicExpr::AO__atomic_load_n: 5623 Form = Load; 5624 break; 5625 5626 case AtomicExpr::AO__atomic_load: 5627 Form = LoadCopy; 5628 break; 5629 5630 case AtomicExpr::AO__c11_atomic_store: 5631 case AtomicExpr::AO__opencl_atomic_store: 5632 case AtomicExpr::AO__hip_atomic_store: 5633 case AtomicExpr::AO__atomic_store: 5634 case AtomicExpr::AO__atomic_store_n: 5635 Form = Copy; 5636 break; 5637 case AtomicExpr::AO__hip_atomic_fetch_add: 5638 case AtomicExpr::AO__hip_atomic_fetch_min: 5639 case AtomicExpr::AO__hip_atomic_fetch_max: 5640 case AtomicExpr::AO__c11_atomic_fetch_add: 5641 case AtomicExpr::AO__c11_atomic_fetch_sub: 5642 case AtomicExpr::AO__opencl_atomic_fetch_add: 5643 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5644 case AtomicExpr::AO__atomic_fetch_add: 5645 case AtomicExpr::AO__atomic_fetch_sub: 5646 case AtomicExpr::AO__atomic_add_fetch: 5647 case AtomicExpr::AO__atomic_sub_fetch: 5648 IsAddSub = true; 5649 Form = Arithmetic; 5650 break; 5651 case AtomicExpr::AO__c11_atomic_fetch_and: 5652 case AtomicExpr::AO__c11_atomic_fetch_or: 5653 case AtomicExpr::AO__c11_atomic_fetch_xor: 5654 case AtomicExpr::AO__hip_atomic_fetch_and: 5655 case AtomicExpr::AO__hip_atomic_fetch_or: 5656 case AtomicExpr::AO__hip_atomic_fetch_xor: 5657 case AtomicExpr::AO__c11_atomic_fetch_nand: 5658 case AtomicExpr::AO__opencl_atomic_fetch_and: 5659 case AtomicExpr::AO__opencl_atomic_fetch_or: 5660 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5661 case AtomicExpr::AO__atomic_fetch_and: 5662 case AtomicExpr::AO__atomic_fetch_or: 5663 case AtomicExpr::AO__atomic_fetch_xor: 5664 case AtomicExpr::AO__atomic_fetch_nand: 5665 case AtomicExpr::AO__atomic_and_fetch: 5666 case AtomicExpr::AO__atomic_or_fetch: 5667 case AtomicExpr::AO__atomic_xor_fetch: 5668 case AtomicExpr::AO__atomic_nand_fetch: 5669 Form = Arithmetic; 5670 break; 5671 case AtomicExpr::AO__c11_atomic_fetch_min: 5672 case AtomicExpr::AO__c11_atomic_fetch_max: 5673 case AtomicExpr::AO__opencl_atomic_fetch_min: 5674 case AtomicExpr::AO__opencl_atomic_fetch_max: 5675 case AtomicExpr::AO__atomic_min_fetch: 5676 case AtomicExpr::AO__atomic_max_fetch: 5677 case AtomicExpr::AO__atomic_fetch_min: 5678 case AtomicExpr::AO__atomic_fetch_max: 5679 Form = Arithmetic; 5680 break; 5681 5682 case AtomicExpr::AO__c11_atomic_exchange: 5683 case AtomicExpr::AO__hip_atomic_exchange: 5684 case AtomicExpr::AO__opencl_atomic_exchange: 5685 case AtomicExpr::AO__atomic_exchange_n: 5686 Form = Xchg; 5687 break; 5688 5689 case AtomicExpr::AO__atomic_exchange: 5690 Form = GNUXchg; 5691 break; 5692 5693 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5694 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5695 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 5696 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5697 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5698 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 5699 Form = C11CmpXchg; 5700 break; 5701 5702 case AtomicExpr::AO__atomic_compare_exchange: 5703 case AtomicExpr::AO__atomic_compare_exchange_n: 5704 Form = GNUCmpXchg; 5705 break; 5706 } 5707 5708 unsigned AdjustedNumArgs = NumArgs[Form]; 5709 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 5710 ++AdjustedNumArgs; 5711 // Check we have the right number of arguments. 5712 if (Args.size() < AdjustedNumArgs) { 5713 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5714 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5715 << ExprRange; 5716 return ExprError(); 5717 } else if (Args.size() > AdjustedNumArgs) { 5718 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5719 diag::err_typecheck_call_too_many_args) 5720 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5721 << ExprRange; 5722 return ExprError(); 5723 } 5724 5725 // Inspect the first argument of the atomic operation. 5726 Expr *Ptr = Args[0]; 5727 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5728 if (ConvertedPtr.isInvalid()) 5729 return ExprError(); 5730 5731 Ptr = ConvertedPtr.get(); 5732 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5733 if (!pointerType) { 5734 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5735 << Ptr->getType() << Ptr->getSourceRange(); 5736 return ExprError(); 5737 } 5738 5739 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5740 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5741 QualType ValType = AtomTy; // 'C' 5742 if (IsC11) { 5743 if (!AtomTy->isAtomicType()) { 5744 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5745 << Ptr->getType() << Ptr->getSourceRange(); 5746 return ExprError(); 5747 } 5748 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5749 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5750 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5751 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5752 << Ptr->getSourceRange(); 5753 return ExprError(); 5754 } 5755 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5756 } else if (Form != Load && Form != LoadCopy) { 5757 if (ValType.isConstQualified()) { 5758 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5759 << Ptr->getType() << Ptr->getSourceRange(); 5760 return ExprError(); 5761 } 5762 } 5763 5764 // For an arithmetic operation, the implied arithmetic must be well-formed. 5765 if (Form == Arithmetic) { 5766 // GCC does not enforce these rules for GNU atomics, but we do to help catch 5767 // trivial type errors. 5768 auto IsAllowedValueType = [&](QualType ValType) { 5769 if (ValType->isIntegerType()) 5770 return true; 5771 if (ValType->isPointerType()) 5772 return true; 5773 if (!ValType->isFloatingType()) 5774 return false; 5775 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5776 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5777 &Context.getTargetInfo().getLongDoubleFormat() == 5778 &llvm::APFloat::x87DoubleExtended()) 5779 return false; 5780 return true; 5781 }; 5782 if (IsAddSub && !IsAllowedValueType(ValType)) { 5783 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5784 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5785 return ExprError(); 5786 } 5787 if (!IsAddSub && !ValType->isIntegerType()) { 5788 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5789 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5790 return ExprError(); 5791 } 5792 if (IsC11 && ValType->isPointerType() && 5793 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5794 diag::err_incomplete_type)) { 5795 return ExprError(); 5796 } 5797 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5798 // For __atomic_*_n operations, the value type must be a scalar integral or 5799 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5800 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5801 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5802 return ExprError(); 5803 } 5804 5805 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5806 !AtomTy->isScalarType()) { 5807 // For GNU atomics, require a trivially-copyable type. This is not part of 5808 // the GNU atomics specification but we enforce it for consistency with 5809 // other atomics which generally all require a trivially-copyable type. This 5810 // is because atomics just copy bits. 5811 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5812 << Ptr->getType() << Ptr->getSourceRange(); 5813 return ExprError(); 5814 } 5815 5816 switch (ValType.getObjCLifetime()) { 5817 case Qualifiers::OCL_None: 5818 case Qualifiers::OCL_ExplicitNone: 5819 // okay 5820 break; 5821 5822 case Qualifiers::OCL_Weak: 5823 case Qualifiers::OCL_Strong: 5824 case Qualifiers::OCL_Autoreleasing: 5825 // FIXME: Can this happen? By this point, ValType should be known 5826 // to be trivially copyable. 5827 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5828 << ValType << Ptr->getSourceRange(); 5829 return ExprError(); 5830 } 5831 5832 // All atomic operations have an overload which takes a pointer to a volatile 5833 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5834 // into the result or the other operands. Similarly atomic_load takes a 5835 // pointer to a const 'A'. 5836 ValType.removeLocalVolatile(); 5837 ValType.removeLocalConst(); 5838 QualType ResultType = ValType; 5839 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5840 Form == Init) 5841 ResultType = Context.VoidTy; 5842 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5843 ResultType = Context.BoolTy; 5844 5845 // The type of a parameter passed 'by value'. In the GNU atomics, such 5846 // arguments are actually passed as pointers. 5847 QualType ByValType = ValType; // 'CP' 5848 bool IsPassedByAddress = false; 5849 if (!IsC11 && !IsHIP && !IsN) { 5850 ByValType = Ptr->getType(); 5851 IsPassedByAddress = true; 5852 } 5853 5854 SmallVector<Expr *, 5> APIOrderedArgs; 5855 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5856 APIOrderedArgs.push_back(Args[0]); 5857 switch (Form) { 5858 case Init: 5859 case Load: 5860 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5861 break; 5862 case LoadCopy: 5863 case Copy: 5864 case Arithmetic: 5865 case Xchg: 5866 APIOrderedArgs.push_back(Args[2]); // Val1 5867 APIOrderedArgs.push_back(Args[1]); // Order 5868 break; 5869 case GNUXchg: 5870 APIOrderedArgs.push_back(Args[2]); // Val1 5871 APIOrderedArgs.push_back(Args[3]); // Val2 5872 APIOrderedArgs.push_back(Args[1]); // Order 5873 break; 5874 case C11CmpXchg: 5875 APIOrderedArgs.push_back(Args[2]); // Val1 5876 APIOrderedArgs.push_back(Args[4]); // Val2 5877 APIOrderedArgs.push_back(Args[1]); // Order 5878 APIOrderedArgs.push_back(Args[3]); // OrderFail 5879 break; 5880 case GNUCmpXchg: 5881 APIOrderedArgs.push_back(Args[2]); // Val1 5882 APIOrderedArgs.push_back(Args[4]); // Val2 5883 APIOrderedArgs.push_back(Args[5]); // Weak 5884 APIOrderedArgs.push_back(Args[1]); // Order 5885 APIOrderedArgs.push_back(Args[3]); // OrderFail 5886 break; 5887 } 5888 } else 5889 APIOrderedArgs.append(Args.begin(), Args.end()); 5890 5891 // The first argument's non-CV pointer type is used to deduce the type of 5892 // subsequent arguments, except for: 5893 // - weak flag (always converted to bool) 5894 // - memory order (always converted to int) 5895 // - scope (always converted to int) 5896 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5897 QualType Ty; 5898 if (i < NumVals[Form] + 1) { 5899 switch (i) { 5900 case 0: 5901 // The first argument is always a pointer. It has a fixed type. 5902 // It is always dereferenced, a nullptr is undefined. 5903 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5904 // Nothing else to do: we already know all we want about this pointer. 5905 continue; 5906 case 1: 5907 // The second argument is the non-atomic operand. For arithmetic, this 5908 // is always passed by value, and for a compare_exchange it is always 5909 // passed by address. For the rest, GNU uses by-address and C11 uses 5910 // by-value. 5911 assert(Form != Load); 5912 if (Form == Arithmetic && ValType->isPointerType()) 5913 Ty = Context.getPointerDiffType(); 5914 else if (Form == Init || Form == Arithmetic) 5915 Ty = ValType; 5916 else if (Form == Copy || Form == Xchg) { 5917 if (IsPassedByAddress) { 5918 // The value pointer is always dereferenced, a nullptr is undefined. 5919 CheckNonNullArgument(*this, APIOrderedArgs[i], 5920 ExprRange.getBegin()); 5921 } 5922 Ty = ByValType; 5923 } else { 5924 Expr *ValArg = APIOrderedArgs[i]; 5925 // The value pointer is always dereferenced, a nullptr is undefined. 5926 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5927 LangAS AS = LangAS::Default; 5928 // Keep address space of non-atomic pointer type. 5929 if (const PointerType *PtrTy = 5930 ValArg->getType()->getAs<PointerType>()) { 5931 AS = PtrTy->getPointeeType().getAddressSpace(); 5932 } 5933 Ty = Context.getPointerType( 5934 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5935 } 5936 break; 5937 case 2: 5938 // The third argument to compare_exchange / GNU exchange is the desired 5939 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5940 if (IsPassedByAddress) 5941 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5942 Ty = ByValType; 5943 break; 5944 case 3: 5945 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5946 Ty = Context.BoolTy; 5947 break; 5948 } 5949 } else { 5950 // The order(s) and scope are always converted to int. 5951 Ty = Context.IntTy; 5952 } 5953 5954 InitializedEntity Entity = 5955 InitializedEntity::InitializeParameter(Context, Ty, false); 5956 ExprResult Arg = APIOrderedArgs[i]; 5957 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5958 if (Arg.isInvalid()) 5959 return true; 5960 APIOrderedArgs[i] = Arg.get(); 5961 } 5962 5963 // Permute the arguments into a 'consistent' order. 5964 SmallVector<Expr*, 5> SubExprs; 5965 SubExprs.push_back(Ptr); 5966 switch (Form) { 5967 case Init: 5968 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5969 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5970 break; 5971 case Load: 5972 SubExprs.push_back(APIOrderedArgs[1]); // Order 5973 break; 5974 case LoadCopy: 5975 case Copy: 5976 case Arithmetic: 5977 case Xchg: 5978 SubExprs.push_back(APIOrderedArgs[2]); // Order 5979 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5980 break; 5981 case GNUXchg: 5982 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5983 SubExprs.push_back(APIOrderedArgs[3]); // Order 5984 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5985 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5986 break; 5987 case C11CmpXchg: 5988 SubExprs.push_back(APIOrderedArgs[3]); // Order 5989 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5990 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5991 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5992 break; 5993 case GNUCmpXchg: 5994 SubExprs.push_back(APIOrderedArgs[4]); // Order 5995 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5996 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5997 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5998 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5999 break; 6000 } 6001 6002 if (SubExprs.size() >= 2 && Form != Init) { 6003 if (Optional<llvm::APSInt> Result = 6004 SubExprs[1]->getIntegerConstantExpr(Context)) 6005 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6006 Diag(SubExprs[1]->getBeginLoc(), 6007 diag::warn_atomic_op_has_invalid_memory_order) 6008 << SubExprs[1]->getSourceRange(); 6009 } 6010 6011 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6012 auto *Scope = Args[Args.size() - 1]; 6013 if (Optional<llvm::APSInt> Result = 6014 Scope->getIntegerConstantExpr(Context)) { 6015 if (!ScopeModel->isValid(Result->getZExtValue())) 6016 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6017 << Scope->getSourceRange(); 6018 } 6019 SubExprs.push_back(Scope); 6020 } 6021 6022 AtomicExpr *AE = new (Context) 6023 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6024 6025 if ((Op == AtomicExpr::AO__c11_atomic_load || 6026 Op == AtomicExpr::AO__c11_atomic_store || 6027 Op == AtomicExpr::AO__opencl_atomic_load || 6028 Op == AtomicExpr::AO__hip_atomic_load || 6029 Op == AtomicExpr::AO__opencl_atomic_store || 6030 Op == AtomicExpr::AO__hip_atomic_store) && 6031 Context.AtomicUsesUnsupportedLibcall(AE)) 6032 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6033 << ((Op == AtomicExpr::AO__c11_atomic_load || 6034 Op == AtomicExpr::AO__opencl_atomic_load || 6035 Op == AtomicExpr::AO__hip_atomic_load) 6036 ? 0 6037 : 1); 6038 6039 if (ValType->isBitIntType()) { 6040 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6041 return ExprError(); 6042 } 6043 6044 return AE; 6045 } 6046 6047 /// checkBuiltinArgument - Given a call to a builtin function, perform 6048 /// normal type-checking on the given argument, updating the call in 6049 /// place. This is useful when a builtin function requires custom 6050 /// type-checking for some of its arguments but not necessarily all of 6051 /// them. 6052 /// 6053 /// Returns true on error. 6054 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6055 FunctionDecl *Fn = E->getDirectCallee(); 6056 assert(Fn && "builtin call without direct callee!"); 6057 6058 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6059 InitializedEntity Entity = 6060 InitializedEntity::InitializeParameter(S.Context, Param); 6061 6062 ExprResult Arg = E->getArg(0); 6063 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6064 if (Arg.isInvalid()) 6065 return true; 6066 6067 E->setArg(ArgIndex, Arg.get()); 6068 return false; 6069 } 6070 6071 /// We have a call to a function like __sync_fetch_and_add, which is an 6072 /// overloaded function based on the pointer type of its first argument. 6073 /// The main BuildCallExpr routines have already promoted the types of 6074 /// arguments because all of these calls are prototyped as void(...). 6075 /// 6076 /// This function goes through and does final semantic checking for these 6077 /// builtins, as well as generating any warnings. 6078 ExprResult 6079 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6080 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6081 Expr *Callee = TheCall->getCallee(); 6082 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6083 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6084 6085 // Ensure that we have at least one argument to do type inference from. 6086 if (TheCall->getNumArgs() < 1) { 6087 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6088 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6089 return ExprError(); 6090 } 6091 6092 // Inspect the first argument of the atomic builtin. This should always be 6093 // a pointer type, whose element is an integral scalar or pointer type. 6094 // Because it is a pointer type, we don't have to worry about any implicit 6095 // casts here. 6096 // FIXME: We don't allow floating point scalars as input. 6097 Expr *FirstArg = TheCall->getArg(0); 6098 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6099 if (FirstArgResult.isInvalid()) 6100 return ExprError(); 6101 FirstArg = FirstArgResult.get(); 6102 TheCall->setArg(0, FirstArg); 6103 6104 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6105 if (!pointerType) { 6106 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6107 << FirstArg->getType() << FirstArg->getSourceRange(); 6108 return ExprError(); 6109 } 6110 6111 QualType ValType = pointerType->getPointeeType(); 6112 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6113 !ValType->isBlockPointerType()) { 6114 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6115 << FirstArg->getType() << FirstArg->getSourceRange(); 6116 return ExprError(); 6117 } 6118 6119 if (ValType.isConstQualified()) { 6120 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6121 << FirstArg->getType() << FirstArg->getSourceRange(); 6122 return ExprError(); 6123 } 6124 6125 switch (ValType.getObjCLifetime()) { 6126 case Qualifiers::OCL_None: 6127 case Qualifiers::OCL_ExplicitNone: 6128 // okay 6129 break; 6130 6131 case Qualifiers::OCL_Weak: 6132 case Qualifiers::OCL_Strong: 6133 case Qualifiers::OCL_Autoreleasing: 6134 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6135 << ValType << FirstArg->getSourceRange(); 6136 return ExprError(); 6137 } 6138 6139 // Strip any qualifiers off ValType. 6140 ValType = ValType.getUnqualifiedType(); 6141 6142 // The majority of builtins return a value, but a few have special return 6143 // types, so allow them to override appropriately below. 6144 QualType ResultType = ValType; 6145 6146 // We need to figure out which concrete builtin this maps onto. For example, 6147 // __sync_fetch_and_add with a 2 byte object turns into 6148 // __sync_fetch_and_add_2. 6149 #define BUILTIN_ROW(x) \ 6150 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6151 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6152 6153 static const unsigned BuiltinIndices[][5] = { 6154 BUILTIN_ROW(__sync_fetch_and_add), 6155 BUILTIN_ROW(__sync_fetch_and_sub), 6156 BUILTIN_ROW(__sync_fetch_and_or), 6157 BUILTIN_ROW(__sync_fetch_and_and), 6158 BUILTIN_ROW(__sync_fetch_and_xor), 6159 BUILTIN_ROW(__sync_fetch_and_nand), 6160 6161 BUILTIN_ROW(__sync_add_and_fetch), 6162 BUILTIN_ROW(__sync_sub_and_fetch), 6163 BUILTIN_ROW(__sync_and_and_fetch), 6164 BUILTIN_ROW(__sync_or_and_fetch), 6165 BUILTIN_ROW(__sync_xor_and_fetch), 6166 BUILTIN_ROW(__sync_nand_and_fetch), 6167 6168 BUILTIN_ROW(__sync_val_compare_and_swap), 6169 BUILTIN_ROW(__sync_bool_compare_and_swap), 6170 BUILTIN_ROW(__sync_lock_test_and_set), 6171 BUILTIN_ROW(__sync_lock_release), 6172 BUILTIN_ROW(__sync_swap) 6173 }; 6174 #undef BUILTIN_ROW 6175 6176 // Determine the index of the size. 6177 unsigned SizeIndex; 6178 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6179 case 1: SizeIndex = 0; break; 6180 case 2: SizeIndex = 1; break; 6181 case 4: SizeIndex = 2; break; 6182 case 8: SizeIndex = 3; break; 6183 case 16: SizeIndex = 4; break; 6184 default: 6185 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6186 << FirstArg->getType() << FirstArg->getSourceRange(); 6187 return ExprError(); 6188 } 6189 6190 // Each of these builtins has one pointer argument, followed by some number of 6191 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6192 // that we ignore. Find out which row of BuiltinIndices to read from as well 6193 // as the number of fixed args. 6194 unsigned BuiltinID = FDecl->getBuiltinID(); 6195 unsigned BuiltinIndex, NumFixed = 1; 6196 bool WarnAboutSemanticsChange = false; 6197 switch (BuiltinID) { 6198 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6199 case Builtin::BI__sync_fetch_and_add: 6200 case Builtin::BI__sync_fetch_and_add_1: 6201 case Builtin::BI__sync_fetch_and_add_2: 6202 case Builtin::BI__sync_fetch_and_add_4: 6203 case Builtin::BI__sync_fetch_and_add_8: 6204 case Builtin::BI__sync_fetch_and_add_16: 6205 BuiltinIndex = 0; 6206 break; 6207 6208 case Builtin::BI__sync_fetch_and_sub: 6209 case Builtin::BI__sync_fetch_and_sub_1: 6210 case Builtin::BI__sync_fetch_and_sub_2: 6211 case Builtin::BI__sync_fetch_and_sub_4: 6212 case Builtin::BI__sync_fetch_and_sub_8: 6213 case Builtin::BI__sync_fetch_and_sub_16: 6214 BuiltinIndex = 1; 6215 break; 6216 6217 case Builtin::BI__sync_fetch_and_or: 6218 case Builtin::BI__sync_fetch_and_or_1: 6219 case Builtin::BI__sync_fetch_and_or_2: 6220 case Builtin::BI__sync_fetch_and_or_4: 6221 case Builtin::BI__sync_fetch_and_or_8: 6222 case Builtin::BI__sync_fetch_and_or_16: 6223 BuiltinIndex = 2; 6224 break; 6225 6226 case Builtin::BI__sync_fetch_and_and: 6227 case Builtin::BI__sync_fetch_and_and_1: 6228 case Builtin::BI__sync_fetch_and_and_2: 6229 case Builtin::BI__sync_fetch_and_and_4: 6230 case Builtin::BI__sync_fetch_and_and_8: 6231 case Builtin::BI__sync_fetch_and_and_16: 6232 BuiltinIndex = 3; 6233 break; 6234 6235 case Builtin::BI__sync_fetch_and_xor: 6236 case Builtin::BI__sync_fetch_and_xor_1: 6237 case Builtin::BI__sync_fetch_and_xor_2: 6238 case Builtin::BI__sync_fetch_and_xor_4: 6239 case Builtin::BI__sync_fetch_and_xor_8: 6240 case Builtin::BI__sync_fetch_and_xor_16: 6241 BuiltinIndex = 4; 6242 break; 6243 6244 case Builtin::BI__sync_fetch_and_nand: 6245 case Builtin::BI__sync_fetch_and_nand_1: 6246 case Builtin::BI__sync_fetch_and_nand_2: 6247 case Builtin::BI__sync_fetch_and_nand_4: 6248 case Builtin::BI__sync_fetch_and_nand_8: 6249 case Builtin::BI__sync_fetch_and_nand_16: 6250 BuiltinIndex = 5; 6251 WarnAboutSemanticsChange = true; 6252 break; 6253 6254 case Builtin::BI__sync_add_and_fetch: 6255 case Builtin::BI__sync_add_and_fetch_1: 6256 case Builtin::BI__sync_add_and_fetch_2: 6257 case Builtin::BI__sync_add_and_fetch_4: 6258 case Builtin::BI__sync_add_and_fetch_8: 6259 case Builtin::BI__sync_add_and_fetch_16: 6260 BuiltinIndex = 6; 6261 break; 6262 6263 case Builtin::BI__sync_sub_and_fetch: 6264 case Builtin::BI__sync_sub_and_fetch_1: 6265 case Builtin::BI__sync_sub_and_fetch_2: 6266 case Builtin::BI__sync_sub_and_fetch_4: 6267 case Builtin::BI__sync_sub_and_fetch_8: 6268 case Builtin::BI__sync_sub_and_fetch_16: 6269 BuiltinIndex = 7; 6270 break; 6271 6272 case Builtin::BI__sync_and_and_fetch: 6273 case Builtin::BI__sync_and_and_fetch_1: 6274 case Builtin::BI__sync_and_and_fetch_2: 6275 case Builtin::BI__sync_and_and_fetch_4: 6276 case Builtin::BI__sync_and_and_fetch_8: 6277 case Builtin::BI__sync_and_and_fetch_16: 6278 BuiltinIndex = 8; 6279 break; 6280 6281 case Builtin::BI__sync_or_and_fetch: 6282 case Builtin::BI__sync_or_and_fetch_1: 6283 case Builtin::BI__sync_or_and_fetch_2: 6284 case Builtin::BI__sync_or_and_fetch_4: 6285 case Builtin::BI__sync_or_and_fetch_8: 6286 case Builtin::BI__sync_or_and_fetch_16: 6287 BuiltinIndex = 9; 6288 break; 6289 6290 case Builtin::BI__sync_xor_and_fetch: 6291 case Builtin::BI__sync_xor_and_fetch_1: 6292 case Builtin::BI__sync_xor_and_fetch_2: 6293 case Builtin::BI__sync_xor_and_fetch_4: 6294 case Builtin::BI__sync_xor_and_fetch_8: 6295 case Builtin::BI__sync_xor_and_fetch_16: 6296 BuiltinIndex = 10; 6297 break; 6298 6299 case Builtin::BI__sync_nand_and_fetch: 6300 case Builtin::BI__sync_nand_and_fetch_1: 6301 case Builtin::BI__sync_nand_and_fetch_2: 6302 case Builtin::BI__sync_nand_and_fetch_4: 6303 case Builtin::BI__sync_nand_and_fetch_8: 6304 case Builtin::BI__sync_nand_and_fetch_16: 6305 BuiltinIndex = 11; 6306 WarnAboutSemanticsChange = true; 6307 break; 6308 6309 case Builtin::BI__sync_val_compare_and_swap: 6310 case Builtin::BI__sync_val_compare_and_swap_1: 6311 case Builtin::BI__sync_val_compare_and_swap_2: 6312 case Builtin::BI__sync_val_compare_and_swap_4: 6313 case Builtin::BI__sync_val_compare_and_swap_8: 6314 case Builtin::BI__sync_val_compare_and_swap_16: 6315 BuiltinIndex = 12; 6316 NumFixed = 2; 6317 break; 6318 6319 case Builtin::BI__sync_bool_compare_and_swap: 6320 case Builtin::BI__sync_bool_compare_and_swap_1: 6321 case Builtin::BI__sync_bool_compare_and_swap_2: 6322 case Builtin::BI__sync_bool_compare_and_swap_4: 6323 case Builtin::BI__sync_bool_compare_and_swap_8: 6324 case Builtin::BI__sync_bool_compare_and_swap_16: 6325 BuiltinIndex = 13; 6326 NumFixed = 2; 6327 ResultType = Context.BoolTy; 6328 break; 6329 6330 case Builtin::BI__sync_lock_test_and_set: 6331 case Builtin::BI__sync_lock_test_and_set_1: 6332 case Builtin::BI__sync_lock_test_and_set_2: 6333 case Builtin::BI__sync_lock_test_and_set_4: 6334 case Builtin::BI__sync_lock_test_and_set_8: 6335 case Builtin::BI__sync_lock_test_and_set_16: 6336 BuiltinIndex = 14; 6337 break; 6338 6339 case Builtin::BI__sync_lock_release: 6340 case Builtin::BI__sync_lock_release_1: 6341 case Builtin::BI__sync_lock_release_2: 6342 case Builtin::BI__sync_lock_release_4: 6343 case Builtin::BI__sync_lock_release_8: 6344 case Builtin::BI__sync_lock_release_16: 6345 BuiltinIndex = 15; 6346 NumFixed = 0; 6347 ResultType = Context.VoidTy; 6348 break; 6349 6350 case Builtin::BI__sync_swap: 6351 case Builtin::BI__sync_swap_1: 6352 case Builtin::BI__sync_swap_2: 6353 case Builtin::BI__sync_swap_4: 6354 case Builtin::BI__sync_swap_8: 6355 case Builtin::BI__sync_swap_16: 6356 BuiltinIndex = 16; 6357 break; 6358 } 6359 6360 // Now that we know how many fixed arguments we expect, first check that we 6361 // have at least that many. 6362 if (TheCall->getNumArgs() < 1+NumFixed) { 6363 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6364 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6365 << Callee->getSourceRange(); 6366 return ExprError(); 6367 } 6368 6369 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6370 << Callee->getSourceRange(); 6371 6372 if (WarnAboutSemanticsChange) { 6373 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6374 << Callee->getSourceRange(); 6375 } 6376 6377 // Get the decl for the concrete builtin from this, we can tell what the 6378 // concrete integer type we should convert to is. 6379 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6380 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6381 FunctionDecl *NewBuiltinDecl; 6382 if (NewBuiltinID == BuiltinID) 6383 NewBuiltinDecl = FDecl; 6384 else { 6385 // Perform builtin lookup to avoid redeclaring it. 6386 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6387 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6388 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6389 assert(Res.getFoundDecl()); 6390 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6391 if (!NewBuiltinDecl) 6392 return ExprError(); 6393 } 6394 6395 // The first argument --- the pointer --- has a fixed type; we 6396 // deduce the types of the rest of the arguments accordingly. Walk 6397 // the remaining arguments, converting them to the deduced value type. 6398 for (unsigned i = 0; i != NumFixed; ++i) { 6399 ExprResult Arg = TheCall->getArg(i+1); 6400 6401 // GCC does an implicit conversion to the pointer or integer ValType. This 6402 // can fail in some cases (1i -> int**), check for this error case now. 6403 // Initialize the argument. 6404 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6405 ValType, /*consume*/ false); 6406 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6407 if (Arg.isInvalid()) 6408 return ExprError(); 6409 6410 // Okay, we have something that *can* be converted to the right type. Check 6411 // to see if there is a potentially weird extension going on here. This can 6412 // happen when you do an atomic operation on something like an char* and 6413 // pass in 42. The 42 gets converted to char. This is even more strange 6414 // for things like 45.123 -> char, etc. 6415 // FIXME: Do this check. 6416 TheCall->setArg(i+1, Arg.get()); 6417 } 6418 6419 // Create a new DeclRefExpr to refer to the new decl. 6420 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6421 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6422 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6423 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6424 6425 // Set the callee in the CallExpr. 6426 // FIXME: This loses syntactic information. 6427 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6428 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6429 CK_BuiltinFnToFnPtr); 6430 TheCall->setCallee(PromotedCall.get()); 6431 6432 // Change the result type of the call to match the original value type. This 6433 // is arbitrary, but the codegen for these builtins ins design to handle it 6434 // gracefully. 6435 TheCall->setType(ResultType); 6436 6437 // Prohibit problematic uses of bit-precise integer types with atomic 6438 // builtins. The arguments would have already been converted to the first 6439 // argument's type, so only need to check the first argument. 6440 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6441 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6442 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6443 return ExprError(); 6444 } 6445 6446 return TheCallResult; 6447 } 6448 6449 /// SemaBuiltinNontemporalOverloaded - We have a call to 6450 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6451 /// overloaded function based on the pointer type of its last argument. 6452 /// 6453 /// This function goes through and does final semantic checking for these 6454 /// builtins. 6455 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6456 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6457 DeclRefExpr *DRE = 6458 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6459 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6460 unsigned BuiltinID = FDecl->getBuiltinID(); 6461 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6462 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6463 "Unexpected nontemporal load/store builtin!"); 6464 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6465 unsigned numArgs = isStore ? 2 : 1; 6466 6467 // Ensure that we have the proper number of arguments. 6468 if (checkArgCount(*this, TheCall, numArgs)) 6469 return ExprError(); 6470 6471 // Inspect the last argument of the nontemporal builtin. This should always 6472 // be a pointer type, from which we imply the type of the memory access. 6473 // Because it is a pointer type, we don't have to worry about any implicit 6474 // casts here. 6475 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6476 ExprResult PointerArgResult = 6477 DefaultFunctionArrayLvalueConversion(PointerArg); 6478 6479 if (PointerArgResult.isInvalid()) 6480 return ExprError(); 6481 PointerArg = PointerArgResult.get(); 6482 TheCall->setArg(numArgs - 1, PointerArg); 6483 6484 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6485 if (!pointerType) { 6486 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6487 << PointerArg->getType() << PointerArg->getSourceRange(); 6488 return ExprError(); 6489 } 6490 6491 QualType ValType = pointerType->getPointeeType(); 6492 6493 // Strip any qualifiers off ValType. 6494 ValType = ValType.getUnqualifiedType(); 6495 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6496 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6497 !ValType->isVectorType()) { 6498 Diag(DRE->getBeginLoc(), 6499 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6500 << PointerArg->getType() << PointerArg->getSourceRange(); 6501 return ExprError(); 6502 } 6503 6504 if (!isStore) { 6505 TheCall->setType(ValType); 6506 return TheCallResult; 6507 } 6508 6509 ExprResult ValArg = TheCall->getArg(0); 6510 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6511 Context, ValType, /*consume*/ false); 6512 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6513 if (ValArg.isInvalid()) 6514 return ExprError(); 6515 6516 TheCall->setArg(0, ValArg.get()); 6517 TheCall->setType(Context.VoidTy); 6518 return TheCallResult; 6519 } 6520 6521 /// CheckObjCString - Checks that the argument to the builtin 6522 /// CFString constructor is correct 6523 /// Note: It might also make sense to do the UTF-16 conversion here (would 6524 /// simplify the backend). 6525 bool Sema::CheckObjCString(Expr *Arg) { 6526 Arg = Arg->IgnoreParenCasts(); 6527 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6528 6529 if (!Literal || !Literal->isAscii()) { 6530 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6531 << Arg->getSourceRange(); 6532 return true; 6533 } 6534 6535 if (Literal->containsNonAsciiOrNull()) { 6536 StringRef String = Literal->getString(); 6537 unsigned NumBytes = String.size(); 6538 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6539 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6540 llvm::UTF16 *ToPtr = &ToBuf[0]; 6541 6542 llvm::ConversionResult Result = 6543 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6544 ToPtr + NumBytes, llvm::strictConversion); 6545 // Check for conversion failure. 6546 if (Result != llvm::conversionOK) 6547 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6548 << Arg->getSourceRange(); 6549 } 6550 return false; 6551 } 6552 6553 /// CheckObjCString - Checks that the format string argument to the os_log() 6554 /// and os_trace() functions is correct, and converts it to const char *. 6555 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6556 Arg = Arg->IgnoreParenCasts(); 6557 auto *Literal = dyn_cast<StringLiteral>(Arg); 6558 if (!Literal) { 6559 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6560 Literal = ObjcLiteral->getString(); 6561 } 6562 } 6563 6564 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6565 return ExprError( 6566 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6567 << Arg->getSourceRange()); 6568 } 6569 6570 ExprResult Result(Literal); 6571 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6572 InitializedEntity Entity = 6573 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6574 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6575 return Result; 6576 } 6577 6578 /// Check that the user is calling the appropriate va_start builtin for the 6579 /// target and calling convention. 6580 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6581 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6582 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6583 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6584 TT.getArch() == llvm::Triple::aarch64_32); 6585 bool IsWindows = TT.isOSWindows(); 6586 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6587 if (IsX64 || IsAArch64) { 6588 CallingConv CC = CC_C; 6589 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6590 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6591 if (IsMSVAStart) { 6592 // Don't allow this in System V ABI functions. 6593 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6594 return S.Diag(Fn->getBeginLoc(), 6595 diag::err_ms_va_start_used_in_sysv_function); 6596 } else { 6597 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6598 // On x64 Windows, don't allow this in System V ABI functions. 6599 // (Yes, that means there's no corresponding way to support variadic 6600 // System V ABI functions on Windows.) 6601 if ((IsWindows && CC == CC_X86_64SysV) || 6602 (!IsWindows && CC == CC_Win64)) 6603 return S.Diag(Fn->getBeginLoc(), 6604 diag::err_va_start_used_in_wrong_abi_function) 6605 << !IsWindows; 6606 } 6607 return false; 6608 } 6609 6610 if (IsMSVAStart) 6611 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6612 return false; 6613 } 6614 6615 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6616 ParmVarDecl **LastParam = nullptr) { 6617 // Determine whether the current function, block, or obj-c method is variadic 6618 // and get its parameter list. 6619 bool IsVariadic = false; 6620 ArrayRef<ParmVarDecl *> Params; 6621 DeclContext *Caller = S.CurContext; 6622 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6623 IsVariadic = Block->isVariadic(); 6624 Params = Block->parameters(); 6625 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6626 IsVariadic = FD->isVariadic(); 6627 Params = FD->parameters(); 6628 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6629 IsVariadic = MD->isVariadic(); 6630 // FIXME: This isn't correct for methods (results in bogus warning). 6631 Params = MD->parameters(); 6632 } else if (isa<CapturedDecl>(Caller)) { 6633 // We don't support va_start in a CapturedDecl. 6634 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6635 return true; 6636 } else { 6637 // This must be some other declcontext that parses exprs. 6638 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6639 return true; 6640 } 6641 6642 if (!IsVariadic) { 6643 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6644 return true; 6645 } 6646 6647 if (LastParam) 6648 *LastParam = Params.empty() ? nullptr : Params.back(); 6649 6650 return false; 6651 } 6652 6653 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6654 /// for validity. Emit an error and return true on failure; return false 6655 /// on success. 6656 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6657 Expr *Fn = TheCall->getCallee(); 6658 6659 if (checkVAStartABI(*this, BuiltinID, Fn)) 6660 return true; 6661 6662 if (checkArgCount(*this, TheCall, 2)) 6663 return true; 6664 6665 // Type-check the first argument normally. 6666 if (checkBuiltinArgument(*this, TheCall, 0)) 6667 return true; 6668 6669 // Check that the current function is variadic, and get its last parameter. 6670 ParmVarDecl *LastParam; 6671 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6672 return true; 6673 6674 // Verify that the second argument to the builtin is the last argument of the 6675 // current function or method. 6676 bool SecondArgIsLastNamedArgument = false; 6677 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6678 6679 // These are valid if SecondArgIsLastNamedArgument is false after the next 6680 // block. 6681 QualType Type; 6682 SourceLocation ParamLoc; 6683 bool IsCRegister = false; 6684 6685 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6686 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6687 SecondArgIsLastNamedArgument = PV == LastParam; 6688 6689 Type = PV->getType(); 6690 ParamLoc = PV->getLocation(); 6691 IsCRegister = 6692 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6693 } 6694 } 6695 6696 if (!SecondArgIsLastNamedArgument) 6697 Diag(TheCall->getArg(1)->getBeginLoc(), 6698 diag::warn_second_arg_of_va_start_not_last_named_param); 6699 else if (IsCRegister || Type->isReferenceType() || 6700 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6701 // Promotable integers are UB, but enumerations need a bit of 6702 // extra checking to see what their promotable type actually is. 6703 if (!Type->isPromotableIntegerType()) 6704 return false; 6705 if (!Type->isEnumeralType()) 6706 return true; 6707 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6708 return !(ED && 6709 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6710 }()) { 6711 unsigned Reason = 0; 6712 if (Type->isReferenceType()) Reason = 1; 6713 else if (IsCRegister) Reason = 2; 6714 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6715 Diag(ParamLoc, diag::note_parameter_type) << Type; 6716 } 6717 6718 TheCall->setType(Context.VoidTy); 6719 return false; 6720 } 6721 6722 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6723 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6724 const LangOptions &LO = getLangOpts(); 6725 6726 if (LO.CPlusPlus) 6727 return Arg->getType() 6728 .getCanonicalType() 6729 .getTypePtr() 6730 ->getPointeeType() 6731 .withoutLocalFastQualifiers() == Context.CharTy; 6732 6733 // In C, allow aliasing through `char *`, this is required for AArch64 at 6734 // least. 6735 return true; 6736 }; 6737 6738 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6739 // const char *named_addr); 6740 6741 Expr *Func = Call->getCallee(); 6742 6743 if (Call->getNumArgs() < 3) 6744 return Diag(Call->getEndLoc(), 6745 diag::err_typecheck_call_too_few_args_at_least) 6746 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6747 6748 // Type-check the first argument normally. 6749 if (checkBuiltinArgument(*this, Call, 0)) 6750 return true; 6751 6752 // Check that the current function is variadic. 6753 if (checkVAStartIsInVariadicFunction(*this, Func)) 6754 return true; 6755 6756 // __va_start on Windows does not validate the parameter qualifiers 6757 6758 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6759 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6760 6761 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6762 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6763 6764 const QualType &ConstCharPtrTy = 6765 Context.getPointerType(Context.CharTy.withConst()); 6766 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6767 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6768 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6769 << 0 /* qualifier difference */ 6770 << 3 /* parameter mismatch */ 6771 << 2 << Arg1->getType() << ConstCharPtrTy; 6772 6773 const QualType SizeTy = Context.getSizeType(); 6774 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6775 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6776 << Arg2->getType() << SizeTy << 1 /* different class */ 6777 << 0 /* qualifier difference */ 6778 << 3 /* parameter mismatch */ 6779 << 3 << Arg2->getType() << SizeTy; 6780 6781 return false; 6782 } 6783 6784 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6785 /// friends. This is declared to take (...), so we have to check everything. 6786 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6787 if (checkArgCount(*this, TheCall, 2)) 6788 return true; 6789 6790 ExprResult OrigArg0 = TheCall->getArg(0); 6791 ExprResult OrigArg1 = TheCall->getArg(1); 6792 6793 // Do standard promotions between the two arguments, returning their common 6794 // type. 6795 QualType Res = UsualArithmeticConversions( 6796 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6797 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6798 return true; 6799 6800 // Make sure any conversions are pushed back into the call; this is 6801 // type safe since unordered compare builtins are declared as "_Bool 6802 // foo(...)". 6803 TheCall->setArg(0, OrigArg0.get()); 6804 TheCall->setArg(1, OrigArg1.get()); 6805 6806 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6807 return false; 6808 6809 // If the common type isn't a real floating type, then the arguments were 6810 // invalid for this operation. 6811 if (Res.isNull() || !Res->isRealFloatingType()) 6812 return Diag(OrigArg0.get()->getBeginLoc(), 6813 diag::err_typecheck_call_invalid_ordered_compare) 6814 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6815 << SourceRange(OrigArg0.get()->getBeginLoc(), 6816 OrigArg1.get()->getEndLoc()); 6817 6818 return false; 6819 } 6820 6821 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6822 /// __builtin_isnan and friends. This is declared to take (...), so we have 6823 /// to check everything. We expect the last argument to be a floating point 6824 /// value. 6825 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6826 if (checkArgCount(*this, TheCall, NumArgs)) 6827 return true; 6828 6829 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6830 // on all preceding parameters just being int. Try all of those. 6831 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6832 Expr *Arg = TheCall->getArg(i); 6833 6834 if (Arg->isTypeDependent()) 6835 return false; 6836 6837 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6838 6839 if (Res.isInvalid()) 6840 return true; 6841 TheCall->setArg(i, Res.get()); 6842 } 6843 6844 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6845 6846 if (OrigArg->isTypeDependent()) 6847 return false; 6848 6849 // Usual Unary Conversions will convert half to float, which we want for 6850 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6851 // type how it is, but do normal L->Rvalue conversions. 6852 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6853 OrigArg = UsualUnaryConversions(OrigArg).get(); 6854 else 6855 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6856 TheCall->setArg(NumArgs - 1, OrigArg); 6857 6858 // This operation requires a non-_Complex floating-point number. 6859 if (!OrigArg->getType()->isRealFloatingType()) 6860 return Diag(OrigArg->getBeginLoc(), 6861 diag::err_typecheck_call_invalid_unary_fp) 6862 << OrigArg->getType() << OrigArg->getSourceRange(); 6863 6864 return false; 6865 } 6866 6867 /// Perform semantic analysis for a call to __builtin_complex. 6868 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6869 if (checkArgCount(*this, TheCall, 2)) 6870 return true; 6871 6872 bool Dependent = false; 6873 for (unsigned I = 0; I != 2; ++I) { 6874 Expr *Arg = TheCall->getArg(I); 6875 QualType T = Arg->getType(); 6876 if (T->isDependentType()) { 6877 Dependent = true; 6878 continue; 6879 } 6880 6881 // Despite supporting _Complex int, GCC requires a real floating point type 6882 // for the operands of __builtin_complex. 6883 if (!T->isRealFloatingType()) { 6884 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6885 << Arg->getType() << Arg->getSourceRange(); 6886 } 6887 6888 ExprResult Converted = DefaultLvalueConversion(Arg); 6889 if (Converted.isInvalid()) 6890 return true; 6891 TheCall->setArg(I, Converted.get()); 6892 } 6893 6894 if (Dependent) { 6895 TheCall->setType(Context.DependentTy); 6896 return false; 6897 } 6898 6899 Expr *Real = TheCall->getArg(0); 6900 Expr *Imag = TheCall->getArg(1); 6901 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6902 return Diag(Real->getBeginLoc(), 6903 diag::err_typecheck_call_different_arg_types) 6904 << Real->getType() << Imag->getType() 6905 << Real->getSourceRange() << Imag->getSourceRange(); 6906 } 6907 6908 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6909 // don't allow this builtin to form those types either. 6910 // FIXME: Should we allow these types? 6911 if (Real->getType()->isFloat16Type()) 6912 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6913 << "_Float16"; 6914 if (Real->getType()->isHalfType()) 6915 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6916 << "half"; 6917 6918 TheCall->setType(Context.getComplexType(Real->getType())); 6919 return false; 6920 } 6921 6922 // Customized Sema Checking for VSX builtins that have the following signature: 6923 // vector [...] builtinName(vector [...], vector [...], const int); 6924 // Which takes the same type of vectors (any legal vector type) for the first 6925 // two arguments and takes compile time constant for the third argument. 6926 // Example builtins are : 6927 // vector double vec_xxpermdi(vector double, vector double, int); 6928 // vector short vec_xxsldwi(vector short, vector short, int); 6929 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6930 unsigned ExpectedNumArgs = 3; 6931 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6932 return true; 6933 6934 // Check the third argument is a compile time constant 6935 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6936 return Diag(TheCall->getBeginLoc(), 6937 diag::err_vsx_builtin_nonconstant_argument) 6938 << 3 /* argument index */ << TheCall->getDirectCallee() 6939 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6940 TheCall->getArg(2)->getEndLoc()); 6941 6942 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6943 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6944 6945 // Check the type of argument 1 and argument 2 are vectors. 6946 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6947 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6948 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6949 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6950 << TheCall->getDirectCallee() 6951 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6952 TheCall->getArg(1)->getEndLoc()); 6953 } 6954 6955 // Check the first two arguments are the same type. 6956 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6957 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6958 << TheCall->getDirectCallee() 6959 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6960 TheCall->getArg(1)->getEndLoc()); 6961 } 6962 6963 // When default clang type checking is turned off and the customized type 6964 // checking is used, the returning type of the function must be explicitly 6965 // set. Otherwise it is _Bool by default. 6966 TheCall->setType(Arg1Ty); 6967 6968 return false; 6969 } 6970 6971 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6972 // This is declared to take (...), so we have to check everything. 6973 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6974 if (TheCall->getNumArgs() < 2) 6975 return ExprError(Diag(TheCall->getEndLoc(), 6976 diag::err_typecheck_call_too_few_args_at_least) 6977 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6978 << TheCall->getSourceRange()); 6979 6980 // Determine which of the following types of shufflevector we're checking: 6981 // 1) unary, vector mask: (lhs, mask) 6982 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6983 QualType resType = TheCall->getArg(0)->getType(); 6984 unsigned numElements = 0; 6985 6986 if (!TheCall->getArg(0)->isTypeDependent() && 6987 !TheCall->getArg(1)->isTypeDependent()) { 6988 QualType LHSType = TheCall->getArg(0)->getType(); 6989 QualType RHSType = TheCall->getArg(1)->getType(); 6990 6991 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6992 return ExprError( 6993 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6994 << TheCall->getDirectCallee() 6995 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6996 TheCall->getArg(1)->getEndLoc())); 6997 6998 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6999 unsigned numResElements = TheCall->getNumArgs() - 2; 7000 7001 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7002 // with mask. If so, verify that RHS is an integer vector type with the 7003 // same number of elts as lhs. 7004 if (TheCall->getNumArgs() == 2) { 7005 if (!RHSType->hasIntegerRepresentation() || 7006 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7007 return ExprError(Diag(TheCall->getBeginLoc(), 7008 diag::err_vec_builtin_incompatible_vector) 7009 << TheCall->getDirectCallee() 7010 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7011 TheCall->getArg(1)->getEndLoc())); 7012 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7013 return ExprError(Diag(TheCall->getBeginLoc(), 7014 diag::err_vec_builtin_incompatible_vector) 7015 << TheCall->getDirectCallee() 7016 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7017 TheCall->getArg(1)->getEndLoc())); 7018 } else if (numElements != numResElements) { 7019 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7020 resType = Context.getVectorType(eltType, numResElements, 7021 VectorType::GenericVector); 7022 } 7023 } 7024 7025 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7026 if (TheCall->getArg(i)->isTypeDependent() || 7027 TheCall->getArg(i)->isValueDependent()) 7028 continue; 7029 7030 Optional<llvm::APSInt> Result; 7031 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7032 return ExprError(Diag(TheCall->getBeginLoc(), 7033 diag::err_shufflevector_nonconstant_argument) 7034 << TheCall->getArg(i)->getSourceRange()); 7035 7036 // Allow -1 which will be translated to undef in the IR. 7037 if (Result->isSigned() && Result->isAllOnes()) 7038 continue; 7039 7040 if (Result->getActiveBits() > 64 || 7041 Result->getZExtValue() >= numElements * 2) 7042 return ExprError(Diag(TheCall->getBeginLoc(), 7043 diag::err_shufflevector_argument_too_large) 7044 << TheCall->getArg(i)->getSourceRange()); 7045 } 7046 7047 SmallVector<Expr*, 32> exprs; 7048 7049 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7050 exprs.push_back(TheCall->getArg(i)); 7051 TheCall->setArg(i, nullptr); 7052 } 7053 7054 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7055 TheCall->getCallee()->getBeginLoc(), 7056 TheCall->getRParenLoc()); 7057 } 7058 7059 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7060 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7061 SourceLocation BuiltinLoc, 7062 SourceLocation RParenLoc) { 7063 ExprValueKind VK = VK_PRValue; 7064 ExprObjectKind OK = OK_Ordinary; 7065 QualType DstTy = TInfo->getType(); 7066 QualType SrcTy = E->getType(); 7067 7068 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7069 return ExprError(Diag(BuiltinLoc, 7070 diag::err_convertvector_non_vector) 7071 << E->getSourceRange()); 7072 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7073 return ExprError(Diag(BuiltinLoc, 7074 diag::err_convertvector_non_vector_type)); 7075 7076 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7077 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7078 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7079 if (SrcElts != DstElts) 7080 return ExprError(Diag(BuiltinLoc, 7081 diag::err_convertvector_incompatible_vector) 7082 << E->getSourceRange()); 7083 } 7084 7085 return new (Context) 7086 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7087 } 7088 7089 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7090 // This is declared to take (const void*, ...) and can take two 7091 // optional constant int args. 7092 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7093 unsigned NumArgs = TheCall->getNumArgs(); 7094 7095 if (NumArgs > 3) 7096 return Diag(TheCall->getEndLoc(), 7097 diag::err_typecheck_call_too_many_args_at_most) 7098 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7099 7100 // Argument 0 is checked for us and the remaining arguments must be 7101 // constant integers. 7102 for (unsigned i = 1; i != NumArgs; ++i) 7103 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7104 return true; 7105 7106 return false; 7107 } 7108 7109 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7110 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7111 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7112 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7113 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7114 if (checkArgCount(*this, TheCall, 1)) 7115 return true; 7116 Expr *Arg = TheCall->getArg(0); 7117 if (Arg->isInstantiationDependent()) 7118 return false; 7119 7120 QualType ArgTy = Arg->getType(); 7121 if (!ArgTy->hasFloatingRepresentation()) 7122 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7123 << ArgTy; 7124 if (Arg->isLValue()) { 7125 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7126 TheCall->setArg(0, FirstArg.get()); 7127 } 7128 TheCall->setType(TheCall->getArg(0)->getType()); 7129 return false; 7130 } 7131 7132 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7133 // __assume does not evaluate its arguments, and should warn if its argument 7134 // has side effects. 7135 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7136 Expr *Arg = TheCall->getArg(0); 7137 if (Arg->isInstantiationDependent()) return false; 7138 7139 if (Arg->HasSideEffects(Context)) 7140 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7141 << Arg->getSourceRange() 7142 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7143 7144 return false; 7145 } 7146 7147 /// Handle __builtin_alloca_with_align. This is declared 7148 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7149 /// than 8. 7150 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7151 // The alignment must be a constant integer. 7152 Expr *Arg = TheCall->getArg(1); 7153 7154 // We can't check the value of a dependent argument. 7155 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7156 if (const auto *UE = 7157 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7158 if (UE->getKind() == UETT_AlignOf || 7159 UE->getKind() == UETT_PreferredAlignOf) 7160 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7161 << Arg->getSourceRange(); 7162 7163 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7164 7165 if (!Result.isPowerOf2()) 7166 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7167 << Arg->getSourceRange(); 7168 7169 if (Result < Context.getCharWidth()) 7170 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7171 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7172 7173 if (Result > std::numeric_limits<int32_t>::max()) 7174 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7175 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7176 } 7177 7178 return false; 7179 } 7180 7181 /// Handle __builtin_assume_aligned. This is declared 7182 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7183 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7184 unsigned NumArgs = TheCall->getNumArgs(); 7185 7186 if (NumArgs > 3) 7187 return Diag(TheCall->getEndLoc(), 7188 diag::err_typecheck_call_too_many_args_at_most) 7189 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7190 7191 // The alignment must be a constant integer. 7192 Expr *Arg = TheCall->getArg(1); 7193 7194 // We can't check the value of a dependent argument. 7195 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7196 llvm::APSInt Result; 7197 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7198 return true; 7199 7200 if (!Result.isPowerOf2()) 7201 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7202 << Arg->getSourceRange(); 7203 7204 if (Result > Sema::MaximumAlignment) 7205 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7206 << Arg->getSourceRange() << Sema::MaximumAlignment; 7207 } 7208 7209 if (NumArgs > 2) { 7210 ExprResult Arg(TheCall->getArg(2)); 7211 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7212 Context.getSizeType(), false); 7213 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7214 if (Arg.isInvalid()) return true; 7215 TheCall->setArg(2, Arg.get()); 7216 } 7217 7218 return false; 7219 } 7220 7221 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7222 unsigned BuiltinID = 7223 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7224 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7225 7226 unsigned NumArgs = TheCall->getNumArgs(); 7227 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7228 if (NumArgs < NumRequiredArgs) { 7229 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7230 << 0 /* function call */ << NumRequiredArgs << NumArgs 7231 << TheCall->getSourceRange(); 7232 } 7233 if (NumArgs >= NumRequiredArgs + 0x100) { 7234 return Diag(TheCall->getEndLoc(), 7235 diag::err_typecheck_call_too_many_args_at_most) 7236 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7237 << TheCall->getSourceRange(); 7238 } 7239 unsigned i = 0; 7240 7241 // For formatting call, check buffer arg. 7242 if (!IsSizeCall) { 7243 ExprResult Arg(TheCall->getArg(i)); 7244 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7245 Context, Context.VoidPtrTy, false); 7246 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7247 if (Arg.isInvalid()) 7248 return true; 7249 TheCall->setArg(i, Arg.get()); 7250 i++; 7251 } 7252 7253 // Check string literal arg. 7254 unsigned FormatIdx = i; 7255 { 7256 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7257 if (Arg.isInvalid()) 7258 return true; 7259 TheCall->setArg(i, Arg.get()); 7260 i++; 7261 } 7262 7263 // Make sure variadic args are scalar. 7264 unsigned FirstDataArg = i; 7265 while (i < NumArgs) { 7266 ExprResult Arg = DefaultVariadicArgumentPromotion( 7267 TheCall->getArg(i), VariadicFunction, nullptr); 7268 if (Arg.isInvalid()) 7269 return true; 7270 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7271 if (ArgSize.getQuantity() >= 0x100) { 7272 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7273 << i << (int)ArgSize.getQuantity() << 0xff 7274 << TheCall->getSourceRange(); 7275 } 7276 TheCall->setArg(i, Arg.get()); 7277 i++; 7278 } 7279 7280 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7281 // call to avoid duplicate diagnostics. 7282 if (!IsSizeCall) { 7283 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7284 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7285 bool Success = CheckFormatArguments( 7286 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7287 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7288 CheckedVarArgs); 7289 if (!Success) 7290 return true; 7291 } 7292 7293 if (IsSizeCall) { 7294 TheCall->setType(Context.getSizeType()); 7295 } else { 7296 TheCall->setType(Context.VoidPtrTy); 7297 } 7298 return false; 7299 } 7300 7301 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7302 /// TheCall is a constant expression. 7303 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7304 llvm::APSInt &Result) { 7305 Expr *Arg = TheCall->getArg(ArgNum); 7306 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7307 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7308 7309 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7310 7311 Optional<llvm::APSInt> R; 7312 if (!(R = Arg->getIntegerConstantExpr(Context))) 7313 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7314 << FDecl->getDeclName() << Arg->getSourceRange(); 7315 Result = *R; 7316 return false; 7317 } 7318 7319 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7320 /// TheCall is a constant expression in the range [Low, High]. 7321 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7322 int Low, int High, bool RangeIsError) { 7323 if (isConstantEvaluated()) 7324 return false; 7325 llvm::APSInt Result; 7326 7327 // We can't check the value of a dependent argument. 7328 Expr *Arg = TheCall->getArg(ArgNum); 7329 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7330 return false; 7331 7332 // Check constant-ness first. 7333 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7334 return true; 7335 7336 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7337 if (RangeIsError) 7338 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7339 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7340 else 7341 // Defer the warning until we know if the code will be emitted so that 7342 // dead code can ignore this. 7343 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7344 PDiag(diag::warn_argument_invalid_range) 7345 << toString(Result, 10) << Low << High 7346 << Arg->getSourceRange()); 7347 } 7348 7349 return false; 7350 } 7351 7352 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7353 /// TheCall is a constant expression is a multiple of Num.. 7354 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7355 unsigned Num) { 7356 llvm::APSInt Result; 7357 7358 // We can't check the value of a dependent argument. 7359 Expr *Arg = TheCall->getArg(ArgNum); 7360 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7361 return false; 7362 7363 // Check constant-ness first. 7364 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7365 return true; 7366 7367 if (Result.getSExtValue() % Num != 0) 7368 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7369 << Num << Arg->getSourceRange(); 7370 7371 return false; 7372 } 7373 7374 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7375 /// constant expression representing a power of 2. 7376 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7377 llvm::APSInt Result; 7378 7379 // We can't check the value of a dependent argument. 7380 Expr *Arg = TheCall->getArg(ArgNum); 7381 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7382 return false; 7383 7384 // Check constant-ness first. 7385 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7386 return true; 7387 7388 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7389 // and only if x is a power of 2. 7390 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7391 return false; 7392 7393 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7394 << Arg->getSourceRange(); 7395 } 7396 7397 static bool IsShiftedByte(llvm::APSInt Value) { 7398 if (Value.isNegative()) 7399 return false; 7400 7401 // Check if it's a shifted byte, by shifting it down 7402 while (true) { 7403 // If the value fits in the bottom byte, the check passes. 7404 if (Value < 0x100) 7405 return true; 7406 7407 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7408 // fails. 7409 if ((Value & 0xFF) != 0) 7410 return false; 7411 7412 // If the bottom 8 bits are all 0, but something above that is nonzero, 7413 // then shifting the value right by 8 bits won't affect whether it's a 7414 // shifted byte or not. So do that, and go round again. 7415 Value >>= 8; 7416 } 7417 } 7418 7419 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7420 /// a constant expression representing an arbitrary byte value shifted left by 7421 /// a multiple of 8 bits. 7422 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7423 unsigned ArgBits) { 7424 llvm::APSInt Result; 7425 7426 // We can't check the value of a dependent argument. 7427 Expr *Arg = TheCall->getArg(ArgNum); 7428 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7429 return false; 7430 7431 // Check constant-ness first. 7432 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7433 return true; 7434 7435 // Truncate to the given size. 7436 Result = Result.getLoBits(ArgBits); 7437 Result.setIsUnsigned(true); 7438 7439 if (IsShiftedByte(Result)) 7440 return false; 7441 7442 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7443 << Arg->getSourceRange(); 7444 } 7445 7446 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7447 /// TheCall is a constant expression representing either a shifted byte value, 7448 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7449 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7450 /// Arm MVE intrinsics. 7451 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7452 int ArgNum, 7453 unsigned ArgBits) { 7454 llvm::APSInt Result; 7455 7456 // We can't check the value of a dependent argument. 7457 Expr *Arg = TheCall->getArg(ArgNum); 7458 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7459 return false; 7460 7461 // Check constant-ness first. 7462 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7463 return true; 7464 7465 // Truncate to the given size. 7466 Result = Result.getLoBits(ArgBits); 7467 Result.setIsUnsigned(true); 7468 7469 // Check to see if it's in either of the required forms. 7470 if (IsShiftedByte(Result) || 7471 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7472 return false; 7473 7474 return Diag(TheCall->getBeginLoc(), 7475 diag::err_argument_not_shifted_byte_or_xxff) 7476 << Arg->getSourceRange(); 7477 } 7478 7479 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7480 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7481 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7482 if (checkArgCount(*this, TheCall, 2)) 7483 return true; 7484 Expr *Arg0 = TheCall->getArg(0); 7485 Expr *Arg1 = TheCall->getArg(1); 7486 7487 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7488 if (FirstArg.isInvalid()) 7489 return true; 7490 QualType FirstArgType = FirstArg.get()->getType(); 7491 if (!FirstArgType->isAnyPointerType()) 7492 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7493 << "first" << FirstArgType << Arg0->getSourceRange(); 7494 TheCall->setArg(0, FirstArg.get()); 7495 7496 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7497 if (SecArg.isInvalid()) 7498 return true; 7499 QualType SecArgType = SecArg.get()->getType(); 7500 if (!SecArgType->isIntegerType()) 7501 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7502 << "second" << SecArgType << Arg1->getSourceRange(); 7503 7504 // Derive the return type from the pointer argument. 7505 TheCall->setType(FirstArgType); 7506 return false; 7507 } 7508 7509 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7510 if (checkArgCount(*this, TheCall, 2)) 7511 return true; 7512 7513 Expr *Arg0 = TheCall->getArg(0); 7514 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7515 if (FirstArg.isInvalid()) 7516 return true; 7517 QualType FirstArgType = FirstArg.get()->getType(); 7518 if (!FirstArgType->isAnyPointerType()) 7519 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7520 << "first" << FirstArgType << Arg0->getSourceRange(); 7521 TheCall->setArg(0, FirstArg.get()); 7522 7523 // Derive the return type from the pointer argument. 7524 TheCall->setType(FirstArgType); 7525 7526 // Second arg must be an constant in range [0,15] 7527 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7528 } 7529 7530 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7531 if (checkArgCount(*this, TheCall, 2)) 7532 return true; 7533 Expr *Arg0 = TheCall->getArg(0); 7534 Expr *Arg1 = TheCall->getArg(1); 7535 7536 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7537 if (FirstArg.isInvalid()) 7538 return true; 7539 QualType FirstArgType = FirstArg.get()->getType(); 7540 if (!FirstArgType->isAnyPointerType()) 7541 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7542 << "first" << FirstArgType << Arg0->getSourceRange(); 7543 7544 QualType SecArgType = Arg1->getType(); 7545 if (!SecArgType->isIntegerType()) 7546 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7547 << "second" << SecArgType << Arg1->getSourceRange(); 7548 TheCall->setType(Context.IntTy); 7549 return false; 7550 } 7551 7552 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7553 BuiltinID == AArch64::BI__builtin_arm_stg) { 7554 if (checkArgCount(*this, TheCall, 1)) 7555 return true; 7556 Expr *Arg0 = TheCall->getArg(0); 7557 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7558 if (FirstArg.isInvalid()) 7559 return true; 7560 7561 QualType FirstArgType = FirstArg.get()->getType(); 7562 if (!FirstArgType->isAnyPointerType()) 7563 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7564 << "first" << FirstArgType << Arg0->getSourceRange(); 7565 TheCall->setArg(0, FirstArg.get()); 7566 7567 // Derive the return type from the pointer argument. 7568 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7569 TheCall->setType(FirstArgType); 7570 return false; 7571 } 7572 7573 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7574 Expr *ArgA = TheCall->getArg(0); 7575 Expr *ArgB = TheCall->getArg(1); 7576 7577 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7578 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7579 7580 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7581 return true; 7582 7583 QualType ArgTypeA = ArgExprA.get()->getType(); 7584 QualType ArgTypeB = ArgExprB.get()->getType(); 7585 7586 auto isNull = [&] (Expr *E) -> bool { 7587 return E->isNullPointerConstant( 7588 Context, Expr::NPC_ValueDependentIsNotNull); }; 7589 7590 // argument should be either a pointer or null 7591 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7592 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7593 << "first" << ArgTypeA << ArgA->getSourceRange(); 7594 7595 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7596 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7597 << "second" << ArgTypeB << ArgB->getSourceRange(); 7598 7599 // Ensure Pointee types are compatible 7600 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7601 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7602 QualType pointeeA = ArgTypeA->getPointeeType(); 7603 QualType pointeeB = ArgTypeB->getPointeeType(); 7604 if (!Context.typesAreCompatible( 7605 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7606 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7607 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7608 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7609 << ArgB->getSourceRange(); 7610 } 7611 } 7612 7613 // at least one argument should be pointer type 7614 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7615 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7616 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7617 7618 if (isNull(ArgA)) // adopt type of the other pointer 7619 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7620 7621 if (isNull(ArgB)) 7622 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7623 7624 TheCall->setArg(0, ArgExprA.get()); 7625 TheCall->setArg(1, ArgExprB.get()); 7626 TheCall->setType(Context.LongLongTy); 7627 return false; 7628 } 7629 assert(false && "Unhandled ARM MTE intrinsic"); 7630 return true; 7631 } 7632 7633 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7634 /// TheCall is an ARM/AArch64 special register string literal. 7635 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7636 int ArgNum, unsigned ExpectedFieldNum, 7637 bool AllowName) { 7638 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7639 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7640 BuiltinID == ARM::BI__builtin_arm_rsr || 7641 BuiltinID == ARM::BI__builtin_arm_rsrp || 7642 BuiltinID == ARM::BI__builtin_arm_wsr || 7643 BuiltinID == ARM::BI__builtin_arm_wsrp; 7644 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7645 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7646 BuiltinID == AArch64::BI__builtin_arm_rsr || 7647 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7648 BuiltinID == AArch64::BI__builtin_arm_wsr || 7649 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7650 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7651 7652 // We can't check the value of a dependent argument. 7653 Expr *Arg = TheCall->getArg(ArgNum); 7654 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7655 return false; 7656 7657 // Check if the argument is a string literal. 7658 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7659 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7660 << Arg->getSourceRange(); 7661 7662 // Check the type of special register given. 7663 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7664 SmallVector<StringRef, 6> Fields; 7665 Reg.split(Fields, ":"); 7666 7667 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7668 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7669 << Arg->getSourceRange(); 7670 7671 // If the string is the name of a register then we cannot check that it is 7672 // valid here but if the string is of one the forms described in ACLE then we 7673 // can check that the supplied fields are integers and within the valid 7674 // ranges. 7675 if (Fields.size() > 1) { 7676 bool FiveFields = Fields.size() == 5; 7677 7678 bool ValidString = true; 7679 if (IsARMBuiltin) { 7680 ValidString &= Fields[0].startswith_insensitive("cp") || 7681 Fields[0].startswith_insensitive("p"); 7682 if (ValidString) 7683 Fields[0] = Fields[0].drop_front( 7684 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7685 7686 ValidString &= Fields[2].startswith_insensitive("c"); 7687 if (ValidString) 7688 Fields[2] = Fields[2].drop_front(1); 7689 7690 if (FiveFields) { 7691 ValidString &= Fields[3].startswith_insensitive("c"); 7692 if (ValidString) 7693 Fields[3] = Fields[3].drop_front(1); 7694 } 7695 } 7696 7697 SmallVector<int, 5> Ranges; 7698 if (FiveFields) 7699 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7700 else 7701 Ranges.append({15, 7, 15}); 7702 7703 for (unsigned i=0; i<Fields.size(); ++i) { 7704 int IntField; 7705 ValidString &= !Fields[i].getAsInteger(10, IntField); 7706 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7707 } 7708 7709 if (!ValidString) 7710 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7711 << Arg->getSourceRange(); 7712 } else if (IsAArch64Builtin && Fields.size() == 1) { 7713 // If the register name is one of those that appear in the condition below 7714 // and the special register builtin being used is one of the write builtins, 7715 // then we require that the argument provided for writing to the register 7716 // is an integer constant expression. This is because it will be lowered to 7717 // an MSR (immediate) instruction, so we need to know the immediate at 7718 // compile time. 7719 if (TheCall->getNumArgs() != 2) 7720 return false; 7721 7722 std::string RegLower = Reg.lower(); 7723 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7724 RegLower != "pan" && RegLower != "uao") 7725 return false; 7726 7727 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7728 } 7729 7730 return false; 7731 } 7732 7733 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7734 /// Emit an error and return true on failure; return false on success. 7735 /// TypeStr is a string containing the type descriptor of the value returned by 7736 /// the builtin and the descriptors of the expected type of the arguments. 7737 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 7738 const char *TypeStr) { 7739 7740 assert((TypeStr[0] != '\0') && 7741 "Invalid types in PPC MMA builtin declaration"); 7742 7743 switch (BuiltinID) { 7744 default: 7745 // This function is called in CheckPPCBuiltinFunctionCall where the 7746 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 7747 // we are isolating the pair vector memop builtins that can be used with mma 7748 // off so the default case is every builtin that requires mma and paired 7749 // vector memops. 7750 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7751 diag::err_ppc_builtin_only_on_arch, "10") || 7752 SemaFeatureCheck(*this, TheCall, "mma", 7753 diag::err_ppc_builtin_only_on_arch, "10")) 7754 return true; 7755 break; 7756 case PPC::BI__builtin_vsx_lxvp: 7757 case PPC::BI__builtin_vsx_stxvp: 7758 case PPC::BI__builtin_vsx_assemble_pair: 7759 case PPC::BI__builtin_vsx_disassemble_pair: 7760 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7761 diag::err_ppc_builtin_only_on_arch, "10")) 7762 return true; 7763 break; 7764 } 7765 7766 unsigned Mask = 0; 7767 unsigned ArgNum = 0; 7768 7769 // The first type in TypeStr is the type of the value returned by the 7770 // builtin. So we first read that type and change the type of TheCall. 7771 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7772 TheCall->setType(type); 7773 7774 while (*TypeStr != '\0') { 7775 Mask = 0; 7776 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7777 if (ArgNum >= TheCall->getNumArgs()) { 7778 ArgNum++; 7779 break; 7780 } 7781 7782 Expr *Arg = TheCall->getArg(ArgNum); 7783 QualType PassedType = Arg->getType(); 7784 QualType StrippedRVType = PassedType.getCanonicalType(); 7785 7786 // Strip Restrict/Volatile qualifiers. 7787 if (StrippedRVType.isRestrictQualified() || 7788 StrippedRVType.isVolatileQualified()) 7789 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 7790 7791 // The only case where the argument type and expected type are allowed to 7792 // mismatch is if the argument type is a non-void pointer (or array) and 7793 // expected type is a void pointer. 7794 if (StrippedRVType != ExpectedType) 7795 if (!(ExpectedType->isVoidPointerType() && 7796 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 7797 return Diag(Arg->getBeginLoc(), 7798 diag::err_typecheck_convert_incompatible) 7799 << PassedType << ExpectedType << 1 << 0 << 0; 7800 7801 // If the value of the Mask is not 0, we have a constraint in the size of 7802 // the integer argument so here we ensure the argument is a constant that 7803 // is in the valid range. 7804 if (Mask != 0 && 7805 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7806 return true; 7807 7808 ArgNum++; 7809 } 7810 7811 // In case we exited early from the previous loop, there are other types to 7812 // read from TypeStr. So we need to read them all to ensure we have the right 7813 // number of arguments in TheCall and if it is not the case, to display a 7814 // better error message. 7815 while (*TypeStr != '\0') { 7816 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7817 ArgNum++; 7818 } 7819 if (checkArgCount(*this, TheCall, ArgNum)) 7820 return true; 7821 7822 return false; 7823 } 7824 7825 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7826 /// This checks that the target supports __builtin_longjmp and 7827 /// that val is a constant 1. 7828 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7829 if (!Context.getTargetInfo().hasSjLjLowering()) 7830 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7831 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7832 7833 Expr *Arg = TheCall->getArg(1); 7834 llvm::APSInt Result; 7835 7836 // TODO: This is less than ideal. Overload this to take a value. 7837 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7838 return true; 7839 7840 if (Result != 1) 7841 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7842 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7843 7844 return false; 7845 } 7846 7847 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7848 /// This checks that the target supports __builtin_setjmp. 7849 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7850 if (!Context.getTargetInfo().hasSjLjLowering()) 7851 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7852 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7853 return false; 7854 } 7855 7856 namespace { 7857 7858 class UncoveredArgHandler { 7859 enum { Unknown = -1, AllCovered = -2 }; 7860 7861 signed FirstUncoveredArg = Unknown; 7862 SmallVector<const Expr *, 4> DiagnosticExprs; 7863 7864 public: 7865 UncoveredArgHandler() = default; 7866 7867 bool hasUncoveredArg() const { 7868 return (FirstUncoveredArg >= 0); 7869 } 7870 7871 unsigned getUncoveredArg() const { 7872 assert(hasUncoveredArg() && "no uncovered argument"); 7873 return FirstUncoveredArg; 7874 } 7875 7876 void setAllCovered() { 7877 // A string has been found with all arguments covered, so clear out 7878 // the diagnostics. 7879 DiagnosticExprs.clear(); 7880 FirstUncoveredArg = AllCovered; 7881 } 7882 7883 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7884 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7885 7886 // Don't update if a previous string covers all arguments. 7887 if (FirstUncoveredArg == AllCovered) 7888 return; 7889 7890 // UncoveredArgHandler tracks the highest uncovered argument index 7891 // and with it all the strings that match this index. 7892 if (NewFirstUncoveredArg == FirstUncoveredArg) 7893 DiagnosticExprs.push_back(StrExpr); 7894 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7895 DiagnosticExprs.clear(); 7896 DiagnosticExprs.push_back(StrExpr); 7897 FirstUncoveredArg = NewFirstUncoveredArg; 7898 } 7899 } 7900 7901 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7902 }; 7903 7904 enum StringLiteralCheckType { 7905 SLCT_NotALiteral, 7906 SLCT_UncheckedLiteral, 7907 SLCT_CheckedLiteral 7908 }; 7909 7910 } // namespace 7911 7912 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7913 BinaryOperatorKind BinOpKind, 7914 bool AddendIsRight) { 7915 unsigned BitWidth = Offset.getBitWidth(); 7916 unsigned AddendBitWidth = Addend.getBitWidth(); 7917 // There might be negative interim results. 7918 if (Addend.isUnsigned()) { 7919 Addend = Addend.zext(++AddendBitWidth); 7920 Addend.setIsSigned(true); 7921 } 7922 // Adjust the bit width of the APSInts. 7923 if (AddendBitWidth > BitWidth) { 7924 Offset = Offset.sext(AddendBitWidth); 7925 BitWidth = AddendBitWidth; 7926 } else if (BitWidth > AddendBitWidth) { 7927 Addend = Addend.sext(BitWidth); 7928 } 7929 7930 bool Ov = false; 7931 llvm::APSInt ResOffset = Offset; 7932 if (BinOpKind == BO_Add) 7933 ResOffset = Offset.sadd_ov(Addend, Ov); 7934 else { 7935 assert(AddendIsRight && BinOpKind == BO_Sub && 7936 "operator must be add or sub with addend on the right"); 7937 ResOffset = Offset.ssub_ov(Addend, Ov); 7938 } 7939 7940 // We add an offset to a pointer here so we should support an offset as big as 7941 // possible. 7942 if (Ov) { 7943 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7944 "index (intermediate) result too big"); 7945 Offset = Offset.sext(2 * BitWidth); 7946 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7947 return; 7948 } 7949 7950 Offset = ResOffset; 7951 } 7952 7953 namespace { 7954 7955 // This is a wrapper class around StringLiteral to support offsetted string 7956 // literals as format strings. It takes the offset into account when returning 7957 // the string and its length or the source locations to display notes correctly. 7958 class FormatStringLiteral { 7959 const StringLiteral *FExpr; 7960 int64_t Offset; 7961 7962 public: 7963 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7964 : FExpr(fexpr), Offset(Offset) {} 7965 7966 StringRef getString() const { 7967 return FExpr->getString().drop_front(Offset); 7968 } 7969 7970 unsigned getByteLength() const { 7971 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7972 } 7973 7974 unsigned getLength() const { return FExpr->getLength() - Offset; } 7975 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7976 7977 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7978 7979 QualType getType() const { return FExpr->getType(); } 7980 7981 bool isAscii() const { return FExpr->isAscii(); } 7982 bool isWide() const { return FExpr->isWide(); } 7983 bool isUTF8() const { return FExpr->isUTF8(); } 7984 bool isUTF16() const { return FExpr->isUTF16(); } 7985 bool isUTF32() const { return FExpr->isUTF32(); } 7986 bool isPascal() const { return FExpr->isPascal(); } 7987 7988 SourceLocation getLocationOfByte( 7989 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7990 const TargetInfo &Target, unsigned *StartToken = nullptr, 7991 unsigned *StartTokenByteOffset = nullptr) const { 7992 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7993 StartToken, StartTokenByteOffset); 7994 } 7995 7996 SourceLocation getBeginLoc() const LLVM_READONLY { 7997 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7998 } 7999 8000 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8001 }; 8002 8003 } // namespace 8004 8005 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8006 const Expr *OrigFormatExpr, 8007 ArrayRef<const Expr *> Args, 8008 bool HasVAListArg, unsigned format_idx, 8009 unsigned firstDataArg, 8010 Sema::FormatStringType Type, 8011 bool inFunctionCall, 8012 Sema::VariadicCallType CallType, 8013 llvm::SmallBitVector &CheckedVarArgs, 8014 UncoveredArgHandler &UncoveredArg, 8015 bool IgnoreStringsWithoutSpecifiers); 8016 8017 // Determine if an expression is a string literal or constant string. 8018 // If this function returns false on the arguments to a function expecting a 8019 // format string, we will usually need to emit a warning. 8020 // True string literals are then checked by CheckFormatString. 8021 static StringLiteralCheckType 8022 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8023 bool HasVAListArg, unsigned format_idx, 8024 unsigned firstDataArg, Sema::FormatStringType Type, 8025 Sema::VariadicCallType CallType, bool InFunctionCall, 8026 llvm::SmallBitVector &CheckedVarArgs, 8027 UncoveredArgHandler &UncoveredArg, 8028 llvm::APSInt Offset, 8029 bool IgnoreStringsWithoutSpecifiers = false) { 8030 if (S.isConstantEvaluated()) 8031 return SLCT_NotALiteral; 8032 tryAgain: 8033 assert(Offset.isSigned() && "invalid offset"); 8034 8035 if (E->isTypeDependent() || E->isValueDependent()) 8036 return SLCT_NotALiteral; 8037 8038 E = E->IgnoreParenCasts(); 8039 8040 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8041 // Technically -Wformat-nonliteral does not warn about this case. 8042 // The behavior of printf and friends in this case is implementation 8043 // dependent. Ideally if the format string cannot be null then 8044 // it should have a 'nonnull' attribute in the function prototype. 8045 return SLCT_UncheckedLiteral; 8046 8047 switch (E->getStmtClass()) { 8048 case Stmt::BinaryConditionalOperatorClass: 8049 case Stmt::ConditionalOperatorClass: { 8050 // The expression is a literal if both sub-expressions were, and it was 8051 // completely checked only if both sub-expressions were checked. 8052 const AbstractConditionalOperator *C = 8053 cast<AbstractConditionalOperator>(E); 8054 8055 // Determine whether it is necessary to check both sub-expressions, for 8056 // example, because the condition expression is a constant that can be 8057 // evaluated at compile time. 8058 bool CheckLeft = true, CheckRight = true; 8059 8060 bool Cond; 8061 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8062 S.isConstantEvaluated())) { 8063 if (Cond) 8064 CheckRight = false; 8065 else 8066 CheckLeft = false; 8067 } 8068 8069 // We need to maintain the offsets for the right and the left hand side 8070 // separately to check if every possible indexed expression is a valid 8071 // string literal. They might have different offsets for different string 8072 // literals in the end. 8073 StringLiteralCheckType Left; 8074 if (!CheckLeft) 8075 Left = SLCT_UncheckedLiteral; 8076 else { 8077 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8078 HasVAListArg, format_idx, firstDataArg, 8079 Type, CallType, InFunctionCall, 8080 CheckedVarArgs, UncoveredArg, Offset, 8081 IgnoreStringsWithoutSpecifiers); 8082 if (Left == SLCT_NotALiteral || !CheckRight) { 8083 return Left; 8084 } 8085 } 8086 8087 StringLiteralCheckType Right = checkFormatStringExpr( 8088 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8089 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8090 IgnoreStringsWithoutSpecifiers); 8091 8092 return (CheckLeft && Left < Right) ? Left : Right; 8093 } 8094 8095 case Stmt::ImplicitCastExprClass: 8096 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8097 goto tryAgain; 8098 8099 case Stmt::OpaqueValueExprClass: 8100 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8101 E = src; 8102 goto tryAgain; 8103 } 8104 return SLCT_NotALiteral; 8105 8106 case Stmt::PredefinedExprClass: 8107 // While __func__, etc., are technically not string literals, they 8108 // cannot contain format specifiers and thus are not a security 8109 // liability. 8110 return SLCT_UncheckedLiteral; 8111 8112 case Stmt::DeclRefExprClass: { 8113 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8114 8115 // As an exception, do not flag errors for variables binding to 8116 // const string literals. 8117 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8118 bool isConstant = false; 8119 QualType T = DR->getType(); 8120 8121 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8122 isConstant = AT->getElementType().isConstant(S.Context); 8123 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8124 isConstant = T.isConstant(S.Context) && 8125 PT->getPointeeType().isConstant(S.Context); 8126 } else if (T->isObjCObjectPointerType()) { 8127 // In ObjC, there is usually no "const ObjectPointer" type, 8128 // so don't check if the pointee type is constant. 8129 isConstant = T.isConstant(S.Context); 8130 } 8131 8132 if (isConstant) { 8133 if (const Expr *Init = VD->getAnyInitializer()) { 8134 // Look through initializers like const char c[] = { "foo" } 8135 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8136 if (InitList->isStringLiteralInit()) 8137 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8138 } 8139 return checkFormatStringExpr(S, Init, Args, 8140 HasVAListArg, format_idx, 8141 firstDataArg, Type, CallType, 8142 /*InFunctionCall*/ false, CheckedVarArgs, 8143 UncoveredArg, Offset); 8144 } 8145 } 8146 8147 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8148 // special check to see if the format string is a function parameter 8149 // of the function calling the printf function. If the function 8150 // has an attribute indicating it is a printf-like function, then we 8151 // should suppress warnings concerning non-literals being used in a call 8152 // to a vprintf function. For example: 8153 // 8154 // void 8155 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8156 // va_list ap; 8157 // va_start(ap, fmt); 8158 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8159 // ... 8160 // } 8161 if (HasVAListArg) { 8162 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8163 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8164 int PVIndex = PV->getFunctionScopeIndex() + 1; 8165 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8166 // adjust for implicit parameter 8167 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8168 if (MD->isInstance()) 8169 ++PVIndex; 8170 // We also check if the formats are compatible. 8171 // We can't pass a 'scanf' string to a 'printf' function. 8172 if (PVIndex == PVFormat->getFormatIdx() && 8173 Type == S.GetFormatStringType(PVFormat)) 8174 return SLCT_UncheckedLiteral; 8175 } 8176 } 8177 } 8178 } 8179 } 8180 8181 return SLCT_NotALiteral; 8182 } 8183 8184 case Stmt::CallExprClass: 8185 case Stmt::CXXMemberCallExprClass: { 8186 const CallExpr *CE = cast<CallExpr>(E); 8187 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8188 bool IsFirst = true; 8189 StringLiteralCheckType CommonResult; 8190 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8191 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8192 StringLiteralCheckType Result = checkFormatStringExpr( 8193 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8194 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8195 IgnoreStringsWithoutSpecifiers); 8196 if (IsFirst) { 8197 CommonResult = Result; 8198 IsFirst = false; 8199 } 8200 } 8201 if (!IsFirst) 8202 return CommonResult; 8203 8204 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8205 unsigned BuiltinID = FD->getBuiltinID(); 8206 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8207 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8208 const Expr *Arg = CE->getArg(0); 8209 return checkFormatStringExpr(S, Arg, Args, 8210 HasVAListArg, format_idx, 8211 firstDataArg, Type, CallType, 8212 InFunctionCall, CheckedVarArgs, 8213 UncoveredArg, Offset, 8214 IgnoreStringsWithoutSpecifiers); 8215 } 8216 } 8217 } 8218 8219 return SLCT_NotALiteral; 8220 } 8221 case Stmt::ObjCMessageExprClass: { 8222 const auto *ME = cast<ObjCMessageExpr>(E); 8223 if (const auto *MD = ME->getMethodDecl()) { 8224 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8225 // As a special case heuristic, if we're using the method -[NSBundle 8226 // localizedStringForKey:value:table:], ignore any key strings that lack 8227 // format specifiers. The idea is that if the key doesn't have any 8228 // format specifiers then its probably just a key to map to the 8229 // localized strings. If it does have format specifiers though, then its 8230 // likely that the text of the key is the format string in the 8231 // programmer's language, and should be checked. 8232 const ObjCInterfaceDecl *IFace; 8233 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8234 IFace->getIdentifier()->isStr("NSBundle") && 8235 MD->getSelector().isKeywordSelector( 8236 {"localizedStringForKey", "value", "table"})) { 8237 IgnoreStringsWithoutSpecifiers = true; 8238 } 8239 8240 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8241 return checkFormatStringExpr( 8242 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8243 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8244 IgnoreStringsWithoutSpecifiers); 8245 } 8246 } 8247 8248 return SLCT_NotALiteral; 8249 } 8250 case Stmt::ObjCStringLiteralClass: 8251 case Stmt::StringLiteralClass: { 8252 const StringLiteral *StrE = nullptr; 8253 8254 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8255 StrE = ObjCFExpr->getString(); 8256 else 8257 StrE = cast<StringLiteral>(E); 8258 8259 if (StrE) { 8260 if (Offset.isNegative() || Offset > StrE->getLength()) { 8261 // TODO: It would be better to have an explicit warning for out of 8262 // bounds literals. 8263 return SLCT_NotALiteral; 8264 } 8265 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8266 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8267 firstDataArg, Type, InFunctionCall, CallType, 8268 CheckedVarArgs, UncoveredArg, 8269 IgnoreStringsWithoutSpecifiers); 8270 return SLCT_CheckedLiteral; 8271 } 8272 8273 return SLCT_NotALiteral; 8274 } 8275 case Stmt::BinaryOperatorClass: { 8276 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8277 8278 // A string literal + an int offset is still a string literal. 8279 if (BinOp->isAdditiveOp()) { 8280 Expr::EvalResult LResult, RResult; 8281 8282 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8283 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8284 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8285 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8286 8287 if (LIsInt != RIsInt) { 8288 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8289 8290 if (LIsInt) { 8291 if (BinOpKind == BO_Add) { 8292 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8293 E = BinOp->getRHS(); 8294 goto tryAgain; 8295 } 8296 } else { 8297 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8298 E = BinOp->getLHS(); 8299 goto tryAgain; 8300 } 8301 } 8302 } 8303 8304 return SLCT_NotALiteral; 8305 } 8306 case Stmt::UnaryOperatorClass: { 8307 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8308 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8309 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8310 Expr::EvalResult IndexResult; 8311 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8312 Expr::SE_NoSideEffects, 8313 S.isConstantEvaluated())) { 8314 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8315 /*RHS is int*/ true); 8316 E = ASE->getBase(); 8317 goto tryAgain; 8318 } 8319 } 8320 8321 return SLCT_NotALiteral; 8322 } 8323 8324 default: 8325 return SLCT_NotALiteral; 8326 } 8327 } 8328 8329 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8330 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8331 .Case("scanf", FST_Scanf) 8332 .Cases("printf", "printf0", FST_Printf) 8333 .Cases("NSString", "CFString", FST_NSString) 8334 .Case("strftime", FST_Strftime) 8335 .Case("strfmon", FST_Strfmon) 8336 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8337 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8338 .Case("os_trace", FST_OSLog) 8339 .Case("os_log", FST_OSLog) 8340 .Default(FST_Unknown); 8341 } 8342 8343 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8344 /// functions) for correct use of format strings. 8345 /// Returns true if a format string has been fully checked. 8346 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8347 ArrayRef<const Expr *> Args, 8348 bool IsCXXMember, 8349 VariadicCallType CallType, 8350 SourceLocation Loc, SourceRange Range, 8351 llvm::SmallBitVector &CheckedVarArgs) { 8352 FormatStringInfo FSI; 8353 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8354 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8355 FSI.FirstDataArg, GetFormatStringType(Format), 8356 CallType, Loc, Range, CheckedVarArgs); 8357 return false; 8358 } 8359 8360 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8361 bool HasVAListArg, unsigned format_idx, 8362 unsigned firstDataArg, FormatStringType Type, 8363 VariadicCallType CallType, 8364 SourceLocation Loc, SourceRange Range, 8365 llvm::SmallBitVector &CheckedVarArgs) { 8366 // CHECK: printf/scanf-like function is called with no format string. 8367 if (format_idx >= Args.size()) { 8368 Diag(Loc, diag::warn_missing_format_string) << Range; 8369 return false; 8370 } 8371 8372 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8373 8374 // CHECK: format string is not a string literal. 8375 // 8376 // Dynamically generated format strings are difficult to 8377 // automatically vet at compile time. Requiring that format strings 8378 // are string literals: (1) permits the checking of format strings by 8379 // the compiler and thereby (2) can practically remove the source of 8380 // many format string exploits. 8381 8382 // Format string can be either ObjC string (e.g. @"%d") or 8383 // C string (e.g. "%d") 8384 // ObjC string uses the same format specifiers as C string, so we can use 8385 // the same format string checking logic for both ObjC and C strings. 8386 UncoveredArgHandler UncoveredArg; 8387 StringLiteralCheckType CT = 8388 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8389 format_idx, firstDataArg, Type, CallType, 8390 /*IsFunctionCall*/ true, CheckedVarArgs, 8391 UncoveredArg, 8392 /*no string offset*/ llvm::APSInt(64, false) = 0); 8393 8394 // Generate a diagnostic where an uncovered argument is detected. 8395 if (UncoveredArg.hasUncoveredArg()) { 8396 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8397 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8398 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8399 } 8400 8401 if (CT != SLCT_NotALiteral) 8402 // Literal format string found, check done! 8403 return CT == SLCT_CheckedLiteral; 8404 8405 // Strftime is particular as it always uses a single 'time' argument, 8406 // so it is safe to pass a non-literal string. 8407 if (Type == FST_Strftime) 8408 return false; 8409 8410 // Do not emit diag when the string param is a macro expansion and the 8411 // format is either NSString or CFString. This is a hack to prevent 8412 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8413 // which are usually used in place of NS and CF string literals. 8414 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8415 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8416 return false; 8417 8418 // If there are no arguments specified, warn with -Wformat-security, otherwise 8419 // warn only with -Wformat-nonliteral. 8420 if (Args.size() == firstDataArg) { 8421 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8422 << OrigFormatExpr->getSourceRange(); 8423 switch (Type) { 8424 default: 8425 break; 8426 case FST_Kprintf: 8427 case FST_FreeBSDKPrintf: 8428 case FST_Printf: 8429 Diag(FormatLoc, diag::note_format_security_fixit) 8430 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8431 break; 8432 case FST_NSString: 8433 Diag(FormatLoc, diag::note_format_security_fixit) 8434 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8435 break; 8436 } 8437 } else { 8438 Diag(FormatLoc, diag::warn_format_nonliteral) 8439 << OrigFormatExpr->getSourceRange(); 8440 } 8441 return false; 8442 } 8443 8444 namespace { 8445 8446 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8447 protected: 8448 Sema &S; 8449 const FormatStringLiteral *FExpr; 8450 const Expr *OrigFormatExpr; 8451 const Sema::FormatStringType FSType; 8452 const unsigned FirstDataArg; 8453 const unsigned NumDataArgs; 8454 const char *Beg; // Start of format string. 8455 const bool HasVAListArg; 8456 ArrayRef<const Expr *> Args; 8457 unsigned FormatIdx; 8458 llvm::SmallBitVector CoveredArgs; 8459 bool usesPositionalArgs = false; 8460 bool atFirstArg = true; 8461 bool inFunctionCall; 8462 Sema::VariadicCallType CallType; 8463 llvm::SmallBitVector &CheckedVarArgs; 8464 UncoveredArgHandler &UncoveredArg; 8465 8466 public: 8467 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8468 const Expr *origFormatExpr, 8469 const Sema::FormatStringType type, unsigned firstDataArg, 8470 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8471 ArrayRef<const Expr *> Args, unsigned formatIdx, 8472 bool inFunctionCall, Sema::VariadicCallType callType, 8473 llvm::SmallBitVector &CheckedVarArgs, 8474 UncoveredArgHandler &UncoveredArg) 8475 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8476 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8477 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8478 inFunctionCall(inFunctionCall), CallType(callType), 8479 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8480 CoveredArgs.resize(numDataArgs); 8481 CoveredArgs.reset(); 8482 } 8483 8484 void DoneProcessing(); 8485 8486 void HandleIncompleteSpecifier(const char *startSpecifier, 8487 unsigned specifierLen) override; 8488 8489 void HandleInvalidLengthModifier( 8490 const analyze_format_string::FormatSpecifier &FS, 8491 const analyze_format_string::ConversionSpecifier &CS, 8492 const char *startSpecifier, unsigned specifierLen, 8493 unsigned DiagID); 8494 8495 void HandleNonStandardLengthModifier( 8496 const analyze_format_string::FormatSpecifier &FS, 8497 const char *startSpecifier, unsigned specifierLen); 8498 8499 void HandleNonStandardConversionSpecifier( 8500 const analyze_format_string::ConversionSpecifier &CS, 8501 const char *startSpecifier, unsigned specifierLen); 8502 8503 void HandlePosition(const char *startPos, unsigned posLen) override; 8504 8505 void HandleInvalidPosition(const char *startSpecifier, 8506 unsigned specifierLen, 8507 analyze_format_string::PositionContext p) override; 8508 8509 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8510 8511 void HandleNullChar(const char *nullCharacter) override; 8512 8513 template <typename Range> 8514 static void 8515 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8516 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8517 bool IsStringLocation, Range StringRange, 8518 ArrayRef<FixItHint> Fixit = None); 8519 8520 protected: 8521 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8522 const char *startSpec, 8523 unsigned specifierLen, 8524 const char *csStart, unsigned csLen); 8525 8526 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8527 const char *startSpec, 8528 unsigned specifierLen); 8529 8530 SourceRange getFormatStringRange(); 8531 CharSourceRange getSpecifierRange(const char *startSpecifier, 8532 unsigned specifierLen); 8533 SourceLocation getLocationOfByte(const char *x); 8534 8535 const Expr *getDataArg(unsigned i) const; 8536 8537 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8538 const analyze_format_string::ConversionSpecifier &CS, 8539 const char *startSpecifier, unsigned specifierLen, 8540 unsigned argIndex); 8541 8542 template <typename Range> 8543 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8544 bool IsStringLocation, Range StringRange, 8545 ArrayRef<FixItHint> Fixit = None); 8546 }; 8547 8548 } // namespace 8549 8550 SourceRange CheckFormatHandler::getFormatStringRange() { 8551 return OrigFormatExpr->getSourceRange(); 8552 } 8553 8554 CharSourceRange CheckFormatHandler:: 8555 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8556 SourceLocation Start = getLocationOfByte(startSpecifier); 8557 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8558 8559 // Advance the end SourceLocation by one due to half-open ranges. 8560 End = End.getLocWithOffset(1); 8561 8562 return CharSourceRange::getCharRange(Start, End); 8563 } 8564 8565 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8566 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8567 S.getLangOpts(), S.Context.getTargetInfo()); 8568 } 8569 8570 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8571 unsigned specifierLen){ 8572 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8573 getLocationOfByte(startSpecifier), 8574 /*IsStringLocation*/true, 8575 getSpecifierRange(startSpecifier, specifierLen)); 8576 } 8577 8578 void CheckFormatHandler::HandleInvalidLengthModifier( 8579 const analyze_format_string::FormatSpecifier &FS, 8580 const analyze_format_string::ConversionSpecifier &CS, 8581 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8582 using namespace analyze_format_string; 8583 8584 const LengthModifier &LM = FS.getLengthModifier(); 8585 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8586 8587 // See if we know how to fix this length modifier. 8588 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8589 if (FixedLM) { 8590 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8591 getLocationOfByte(LM.getStart()), 8592 /*IsStringLocation*/true, 8593 getSpecifierRange(startSpecifier, specifierLen)); 8594 8595 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8596 << FixedLM->toString() 8597 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8598 8599 } else { 8600 FixItHint Hint; 8601 if (DiagID == diag::warn_format_nonsensical_length) 8602 Hint = FixItHint::CreateRemoval(LMRange); 8603 8604 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8605 getLocationOfByte(LM.getStart()), 8606 /*IsStringLocation*/true, 8607 getSpecifierRange(startSpecifier, specifierLen), 8608 Hint); 8609 } 8610 } 8611 8612 void CheckFormatHandler::HandleNonStandardLengthModifier( 8613 const analyze_format_string::FormatSpecifier &FS, 8614 const char *startSpecifier, unsigned specifierLen) { 8615 using namespace analyze_format_string; 8616 8617 const LengthModifier &LM = FS.getLengthModifier(); 8618 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8619 8620 // See if we know how to fix this length modifier. 8621 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8622 if (FixedLM) { 8623 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8624 << LM.toString() << 0, 8625 getLocationOfByte(LM.getStart()), 8626 /*IsStringLocation*/true, 8627 getSpecifierRange(startSpecifier, specifierLen)); 8628 8629 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8630 << FixedLM->toString() 8631 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8632 8633 } else { 8634 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8635 << LM.toString() << 0, 8636 getLocationOfByte(LM.getStart()), 8637 /*IsStringLocation*/true, 8638 getSpecifierRange(startSpecifier, specifierLen)); 8639 } 8640 } 8641 8642 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8643 const analyze_format_string::ConversionSpecifier &CS, 8644 const char *startSpecifier, unsigned specifierLen) { 8645 using namespace analyze_format_string; 8646 8647 // See if we know how to fix this conversion specifier. 8648 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8649 if (FixedCS) { 8650 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8651 << CS.toString() << /*conversion specifier*/1, 8652 getLocationOfByte(CS.getStart()), 8653 /*IsStringLocation*/true, 8654 getSpecifierRange(startSpecifier, specifierLen)); 8655 8656 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8657 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8658 << FixedCS->toString() 8659 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8660 } else { 8661 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8662 << CS.toString() << /*conversion specifier*/1, 8663 getLocationOfByte(CS.getStart()), 8664 /*IsStringLocation*/true, 8665 getSpecifierRange(startSpecifier, specifierLen)); 8666 } 8667 } 8668 8669 void CheckFormatHandler::HandlePosition(const char *startPos, 8670 unsigned posLen) { 8671 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8672 getLocationOfByte(startPos), 8673 /*IsStringLocation*/true, 8674 getSpecifierRange(startPos, posLen)); 8675 } 8676 8677 void 8678 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8679 analyze_format_string::PositionContext p) { 8680 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8681 << (unsigned) p, 8682 getLocationOfByte(startPos), /*IsStringLocation*/true, 8683 getSpecifierRange(startPos, posLen)); 8684 } 8685 8686 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8687 unsigned posLen) { 8688 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8689 getLocationOfByte(startPos), 8690 /*IsStringLocation*/true, 8691 getSpecifierRange(startPos, posLen)); 8692 } 8693 8694 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8695 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8696 // The presence of a null character is likely an error. 8697 EmitFormatDiagnostic( 8698 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8699 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8700 getFormatStringRange()); 8701 } 8702 } 8703 8704 // Note that this may return NULL if there was an error parsing or building 8705 // one of the argument expressions. 8706 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8707 return Args[FirstDataArg + i]; 8708 } 8709 8710 void CheckFormatHandler::DoneProcessing() { 8711 // Does the number of data arguments exceed the number of 8712 // format conversions in the format string? 8713 if (!HasVAListArg) { 8714 // Find any arguments that weren't covered. 8715 CoveredArgs.flip(); 8716 signed notCoveredArg = CoveredArgs.find_first(); 8717 if (notCoveredArg >= 0) { 8718 assert((unsigned)notCoveredArg < NumDataArgs); 8719 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8720 } else { 8721 UncoveredArg.setAllCovered(); 8722 } 8723 } 8724 } 8725 8726 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8727 const Expr *ArgExpr) { 8728 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8729 "Invalid state"); 8730 8731 if (!ArgExpr) 8732 return; 8733 8734 SourceLocation Loc = ArgExpr->getBeginLoc(); 8735 8736 if (S.getSourceManager().isInSystemMacro(Loc)) 8737 return; 8738 8739 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8740 for (auto E : DiagnosticExprs) 8741 PDiag << E->getSourceRange(); 8742 8743 CheckFormatHandler::EmitFormatDiagnostic( 8744 S, IsFunctionCall, DiagnosticExprs[0], 8745 PDiag, Loc, /*IsStringLocation*/false, 8746 DiagnosticExprs[0]->getSourceRange()); 8747 } 8748 8749 bool 8750 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8751 SourceLocation Loc, 8752 const char *startSpec, 8753 unsigned specifierLen, 8754 const char *csStart, 8755 unsigned csLen) { 8756 bool keepGoing = true; 8757 if (argIndex < NumDataArgs) { 8758 // Consider the argument coverered, even though the specifier doesn't 8759 // make sense. 8760 CoveredArgs.set(argIndex); 8761 } 8762 else { 8763 // If argIndex exceeds the number of data arguments we 8764 // don't issue a warning because that is just a cascade of warnings (and 8765 // they may have intended '%%' anyway). We don't want to continue processing 8766 // the format string after this point, however, as we will like just get 8767 // gibberish when trying to match arguments. 8768 keepGoing = false; 8769 } 8770 8771 StringRef Specifier(csStart, csLen); 8772 8773 // If the specifier in non-printable, it could be the first byte of a UTF-8 8774 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8775 // hex value. 8776 std::string CodePointStr; 8777 if (!llvm::sys::locale::isPrint(*csStart)) { 8778 llvm::UTF32 CodePoint; 8779 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8780 const llvm::UTF8 *E = 8781 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8782 llvm::ConversionResult Result = 8783 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8784 8785 if (Result != llvm::conversionOK) { 8786 unsigned char FirstChar = *csStart; 8787 CodePoint = (llvm::UTF32)FirstChar; 8788 } 8789 8790 llvm::raw_string_ostream OS(CodePointStr); 8791 if (CodePoint < 256) 8792 OS << "\\x" << llvm::format("%02x", CodePoint); 8793 else if (CodePoint <= 0xFFFF) 8794 OS << "\\u" << llvm::format("%04x", CodePoint); 8795 else 8796 OS << "\\U" << llvm::format("%08x", CodePoint); 8797 OS.flush(); 8798 Specifier = CodePointStr; 8799 } 8800 8801 EmitFormatDiagnostic( 8802 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8803 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8804 8805 return keepGoing; 8806 } 8807 8808 void 8809 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8810 const char *startSpec, 8811 unsigned specifierLen) { 8812 EmitFormatDiagnostic( 8813 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8814 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8815 } 8816 8817 bool 8818 CheckFormatHandler::CheckNumArgs( 8819 const analyze_format_string::FormatSpecifier &FS, 8820 const analyze_format_string::ConversionSpecifier &CS, 8821 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8822 8823 if (argIndex >= NumDataArgs) { 8824 PartialDiagnostic PDiag = FS.usesPositionalArg() 8825 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8826 << (argIndex+1) << NumDataArgs) 8827 : S.PDiag(diag::warn_printf_insufficient_data_args); 8828 EmitFormatDiagnostic( 8829 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8830 getSpecifierRange(startSpecifier, specifierLen)); 8831 8832 // Since more arguments than conversion tokens are given, by extension 8833 // all arguments are covered, so mark this as so. 8834 UncoveredArg.setAllCovered(); 8835 return false; 8836 } 8837 return true; 8838 } 8839 8840 template<typename Range> 8841 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8842 SourceLocation Loc, 8843 bool IsStringLocation, 8844 Range StringRange, 8845 ArrayRef<FixItHint> FixIt) { 8846 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8847 Loc, IsStringLocation, StringRange, FixIt); 8848 } 8849 8850 /// If the format string is not within the function call, emit a note 8851 /// so that the function call and string are in diagnostic messages. 8852 /// 8853 /// \param InFunctionCall if true, the format string is within the function 8854 /// call and only one diagnostic message will be produced. Otherwise, an 8855 /// extra note will be emitted pointing to location of the format string. 8856 /// 8857 /// \param ArgumentExpr the expression that is passed as the format string 8858 /// argument in the function call. Used for getting locations when two 8859 /// diagnostics are emitted. 8860 /// 8861 /// \param PDiag the callee should already have provided any strings for the 8862 /// diagnostic message. This function only adds locations and fixits 8863 /// to diagnostics. 8864 /// 8865 /// \param Loc primary location for diagnostic. If two diagnostics are 8866 /// required, one will be at Loc and a new SourceLocation will be created for 8867 /// the other one. 8868 /// 8869 /// \param IsStringLocation if true, Loc points to the format string should be 8870 /// used for the note. Otherwise, Loc points to the argument list and will 8871 /// be used with PDiag. 8872 /// 8873 /// \param StringRange some or all of the string to highlight. This is 8874 /// templated so it can accept either a CharSourceRange or a SourceRange. 8875 /// 8876 /// \param FixIt optional fix it hint for the format string. 8877 template <typename Range> 8878 void CheckFormatHandler::EmitFormatDiagnostic( 8879 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8880 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8881 Range StringRange, ArrayRef<FixItHint> FixIt) { 8882 if (InFunctionCall) { 8883 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8884 D << StringRange; 8885 D << FixIt; 8886 } else { 8887 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8888 << ArgumentExpr->getSourceRange(); 8889 8890 const Sema::SemaDiagnosticBuilder &Note = 8891 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8892 diag::note_format_string_defined); 8893 8894 Note << StringRange; 8895 Note << FixIt; 8896 } 8897 } 8898 8899 //===--- CHECK: Printf format string checking ------------------------------===// 8900 8901 namespace { 8902 8903 class CheckPrintfHandler : public CheckFormatHandler { 8904 public: 8905 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8906 const Expr *origFormatExpr, 8907 const Sema::FormatStringType type, unsigned firstDataArg, 8908 unsigned numDataArgs, bool isObjC, const char *beg, 8909 bool hasVAListArg, ArrayRef<const Expr *> Args, 8910 unsigned formatIdx, bool inFunctionCall, 8911 Sema::VariadicCallType CallType, 8912 llvm::SmallBitVector &CheckedVarArgs, 8913 UncoveredArgHandler &UncoveredArg) 8914 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8915 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8916 inFunctionCall, CallType, CheckedVarArgs, 8917 UncoveredArg) {} 8918 8919 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8920 8921 /// Returns true if '%@' specifiers are allowed in the format string. 8922 bool allowsObjCArg() const { 8923 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8924 FSType == Sema::FST_OSTrace; 8925 } 8926 8927 bool HandleInvalidPrintfConversionSpecifier( 8928 const analyze_printf::PrintfSpecifier &FS, 8929 const char *startSpecifier, 8930 unsigned specifierLen) override; 8931 8932 void handleInvalidMaskType(StringRef MaskType) override; 8933 8934 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8935 const char *startSpecifier, unsigned specifierLen, 8936 const TargetInfo &Target) override; 8937 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8938 const char *StartSpecifier, 8939 unsigned SpecifierLen, 8940 const Expr *E); 8941 8942 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8943 const char *startSpecifier, unsigned specifierLen); 8944 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8945 const analyze_printf::OptionalAmount &Amt, 8946 unsigned type, 8947 const char *startSpecifier, unsigned specifierLen); 8948 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8949 const analyze_printf::OptionalFlag &flag, 8950 const char *startSpecifier, unsigned specifierLen); 8951 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8952 const analyze_printf::OptionalFlag &ignoredFlag, 8953 const analyze_printf::OptionalFlag &flag, 8954 const char *startSpecifier, unsigned specifierLen); 8955 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8956 const Expr *E); 8957 8958 void HandleEmptyObjCModifierFlag(const char *startFlag, 8959 unsigned flagLen) override; 8960 8961 void HandleInvalidObjCModifierFlag(const char *startFlag, 8962 unsigned flagLen) override; 8963 8964 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8965 const char *flagsEnd, 8966 const char *conversionPosition) 8967 override; 8968 }; 8969 8970 } // namespace 8971 8972 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8973 const analyze_printf::PrintfSpecifier &FS, 8974 const char *startSpecifier, 8975 unsigned specifierLen) { 8976 const analyze_printf::PrintfConversionSpecifier &CS = 8977 FS.getConversionSpecifier(); 8978 8979 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8980 getLocationOfByte(CS.getStart()), 8981 startSpecifier, specifierLen, 8982 CS.getStart(), CS.getLength()); 8983 } 8984 8985 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8986 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8987 } 8988 8989 bool CheckPrintfHandler::HandleAmount( 8990 const analyze_format_string::OptionalAmount &Amt, 8991 unsigned k, const char *startSpecifier, 8992 unsigned specifierLen) { 8993 if (Amt.hasDataArgument()) { 8994 if (!HasVAListArg) { 8995 unsigned argIndex = Amt.getArgIndex(); 8996 if (argIndex >= NumDataArgs) { 8997 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8998 << k, 8999 getLocationOfByte(Amt.getStart()), 9000 /*IsStringLocation*/true, 9001 getSpecifierRange(startSpecifier, specifierLen)); 9002 // Don't do any more checking. We will just emit 9003 // spurious errors. 9004 return false; 9005 } 9006 9007 // Type check the data argument. It should be an 'int'. 9008 // Although not in conformance with C99, we also allow the argument to be 9009 // an 'unsigned int' as that is a reasonably safe case. GCC also 9010 // doesn't emit a warning for that case. 9011 CoveredArgs.set(argIndex); 9012 const Expr *Arg = getDataArg(argIndex); 9013 if (!Arg) 9014 return false; 9015 9016 QualType T = Arg->getType(); 9017 9018 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9019 assert(AT.isValid()); 9020 9021 if (!AT.matchesType(S.Context, T)) { 9022 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9023 << k << AT.getRepresentativeTypeName(S.Context) 9024 << T << Arg->getSourceRange(), 9025 getLocationOfByte(Amt.getStart()), 9026 /*IsStringLocation*/true, 9027 getSpecifierRange(startSpecifier, specifierLen)); 9028 // Don't do any more checking. We will just emit 9029 // spurious errors. 9030 return false; 9031 } 9032 } 9033 } 9034 return true; 9035 } 9036 9037 void CheckPrintfHandler::HandleInvalidAmount( 9038 const analyze_printf::PrintfSpecifier &FS, 9039 const analyze_printf::OptionalAmount &Amt, 9040 unsigned type, 9041 const char *startSpecifier, 9042 unsigned specifierLen) { 9043 const analyze_printf::PrintfConversionSpecifier &CS = 9044 FS.getConversionSpecifier(); 9045 9046 FixItHint fixit = 9047 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9048 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9049 Amt.getConstantLength())) 9050 : FixItHint(); 9051 9052 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9053 << type << CS.toString(), 9054 getLocationOfByte(Amt.getStart()), 9055 /*IsStringLocation*/true, 9056 getSpecifierRange(startSpecifier, specifierLen), 9057 fixit); 9058 } 9059 9060 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9061 const analyze_printf::OptionalFlag &flag, 9062 const char *startSpecifier, 9063 unsigned specifierLen) { 9064 // Warn about pointless flag with a fixit removal. 9065 const analyze_printf::PrintfConversionSpecifier &CS = 9066 FS.getConversionSpecifier(); 9067 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9068 << flag.toString() << CS.toString(), 9069 getLocationOfByte(flag.getPosition()), 9070 /*IsStringLocation*/true, 9071 getSpecifierRange(startSpecifier, specifierLen), 9072 FixItHint::CreateRemoval( 9073 getSpecifierRange(flag.getPosition(), 1))); 9074 } 9075 9076 void CheckPrintfHandler::HandleIgnoredFlag( 9077 const analyze_printf::PrintfSpecifier &FS, 9078 const analyze_printf::OptionalFlag &ignoredFlag, 9079 const analyze_printf::OptionalFlag &flag, 9080 const char *startSpecifier, 9081 unsigned specifierLen) { 9082 // Warn about ignored flag with a fixit removal. 9083 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9084 << ignoredFlag.toString() << flag.toString(), 9085 getLocationOfByte(ignoredFlag.getPosition()), 9086 /*IsStringLocation*/true, 9087 getSpecifierRange(startSpecifier, specifierLen), 9088 FixItHint::CreateRemoval( 9089 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9090 } 9091 9092 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9093 unsigned flagLen) { 9094 // Warn about an empty flag. 9095 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9096 getLocationOfByte(startFlag), 9097 /*IsStringLocation*/true, 9098 getSpecifierRange(startFlag, flagLen)); 9099 } 9100 9101 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9102 unsigned flagLen) { 9103 // Warn about an invalid flag. 9104 auto Range = getSpecifierRange(startFlag, flagLen); 9105 StringRef flag(startFlag, flagLen); 9106 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9107 getLocationOfByte(startFlag), 9108 /*IsStringLocation*/true, 9109 Range, FixItHint::CreateRemoval(Range)); 9110 } 9111 9112 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9113 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9114 // Warn about using '[...]' without a '@' conversion. 9115 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9116 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9117 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9118 getLocationOfByte(conversionPosition), 9119 /*IsStringLocation*/true, 9120 Range, FixItHint::CreateRemoval(Range)); 9121 } 9122 9123 // Determines if the specified is a C++ class or struct containing 9124 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9125 // "c_str()"). 9126 template<typename MemberKind> 9127 static llvm::SmallPtrSet<MemberKind*, 1> 9128 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9129 const RecordType *RT = Ty->getAs<RecordType>(); 9130 llvm::SmallPtrSet<MemberKind*, 1> Results; 9131 9132 if (!RT) 9133 return Results; 9134 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9135 if (!RD || !RD->getDefinition()) 9136 return Results; 9137 9138 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9139 Sema::LookupMemberName); 9140 R.suppressDiagnostics(); 9141 9142 // We just need to include all members of the right kind turned up by the 9143 // filter, at this point. 9144 if (S.LookupQualifiedName(R, RT->getDecl())) 9145 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9146 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9147 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9148 Results.insert(FK); 9149 } 9150 return Results; 9151 } 9152 9153 /// Check if we could call '.c_str()' on an object. 9154 /// 9155 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9156 /// allow the call, or if it would be ambiguous). 9157 bool Sema::hasCStrMethod(const Expr *E) { 9158 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9159 9160 MethodSet Results = 9161 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9162 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9163 MI != ME; ++MI) 9164 if ((*MI)->getMinRequiredArguments() == 0) 9165 return true; 9166 return false; 9167 } 9168 9169 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9170 // better diagnostic if so. AT is assumed to be valid. 9171 // Returns true when a c_str() conversion method is found. 9172 bool CheckPrintfHandler::checkForCStrMembers( 9173 const analyze_printf::ArgType &AT, const Expr *E) { 9174 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9175 9176 MethodSet Results = 9177 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9178 9179 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9180 MI != ME; ++MI) { 9181 const CXXMethodDecl *Method = *MI; 9182 if (Method->getMinRequiredArguments() == 0 && 9183 AT.matchesType(S.Context, Method->getReturnType())) { 9184 // FIXME: Suggest parens if the expression needs them. 9185 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9186 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9187 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9188 return true; 9189 } 9190 } 9191 9192 return false; 9193 } 9194 9195 bool CheckPrintfHandler::HandlePrintfSpecifier( 9196 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9197 unsigned specifierLen, const TargetInfo &Target) { 9198 using namespace analyze_format_string; 9199 using namespace analyze_printf; 9200 9201 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9202 9203 if (FS.consumesDataArgument()) { 9204 if (atFirstArg) { 9205 atFirstArg = false; 9206 usesPositionalArgs = FS.usesPositionalArg(); 9207 } 9208 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9209 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9210 startSpecifier, specifierLen); 9211 return false; 9212 } 9213 } 9214 9215 // First check if the field width, precision, and conversion specifier 9216 // have matching data arguments. 9217 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9218 startSpecifier, specifierLen)) { 9219 return false; 9220 } 9221 9222 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9223 startSpecifier, specifierLen)) { 9224 return false; 9225 } 9226 9227 if (!CS.consumesDataArgument()) { 9228 // FIXME: Technically specifying a precision or field width here 9229 // makes no sense. Worth issuing a warning at some point. 9230 return true; 9231 } 9232 9233 // Consume the argument. 9234 unsigned argIndex = FS.getArgIndex(); 9235 if (argIndex < NumDataArgs) { 9236 // The check to see if the argIndex is valid will come later. 9237 // We set the bit here because we may exit early from this 9238 // function if we encounter some other error. 9239 CoveredArgs.set(argIndex); 9240 } 9241 9242 // FreeBSD kernel extensions. 9243 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9244 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9245 // We need at least two arguments. 9246 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9247 return false; 9248 9249 // Claim the second argument. 9250 CoveredArgs.set(argIndex + 1); 9251 9252 // Type check the first argument (int for %b, pointer for %D) 9253 const Expr *Ex = getDataArg(argIndex); 9254 const analyze_printf::ArgType &AT = 9255 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9256 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9257 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9258 EmitFormatDiagnostic( 9259 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9260 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9261 << false << Ex->getSourceRange(), 9262 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9263 getSpecifierRange(startSpecifier, specifierLen)); 9264 9265 // Type check the second argument (char * for both %b and %D) 9266 Ex = getDataArg(argIndex + 1); 9267 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9268 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9269 EmitFormatDiagnostic( 9270 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9271 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9272 << false << Ex->getSourceRange(), 9273 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9274 getSpecifierRange(startSpecifier, specifierLen)); 9275 9276 return true; 9277 } 9278 9279 // Check for using an Objective-C specific conversion specifier 9280 // in a non-ObjC literal. 9281 if (!allowsObjCArg() && CS.isObjCArg()) { 9282 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9283 specifierLen); 9284 } 9285 9286 // %P can only be used with os_log. 9287 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9288 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9289 specifierLen); 9290 } 9291 9292 // %n is not allowed with os_log. 9293 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9294 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9295 getLocationOfByte(CS.getStart()), 9296 /*IsStringLocation*/ false, 9297 getSpecifierRange(startSpecifier, specifierLen)); 9298 9299 return true; 9300 } 9301 9302 // Only scalars are allowed for os_trace. 9303 if (FSType == Sema::FST_OSTrace && 9304 (CS.getKind() == ConversionSpecifier::PArg || 9305 CS.getKind() == ConversionSpecifier::sArg || 9306 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9307 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9308 specifierLen); 9309 } 9310 9311 // Check for use of public/private annotation outside of os_log(). 9312 if (FSType != Sema::FST_OSLog) { 9313 if (FS.isPublic().isSet()) { 9314 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9315 << "public", 9316 getLocationOfByte(FS.isPublic().getPosition()), 9317 /*IsStringLocation*/ false, 9318 getSpecifierRange(startSpecifier, specifierLen)); 9319 } 9320 if (FS.isPrivate().isSet()) { 9321 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9322 << "private", 9323 getLocationOfByte(FS.isPrivate().getPosition()), 9324 /*IsStringLocation*/ false, 9325 getSpecifierRange(startSpecifier, specifierLen)); 9326 } 9327 } 9328 9329 const llvm::Triple &Triple = Target.getTriple(); 9330 if (CS.getKind() == ConversionSpecifier::nArg && 9331 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9332 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9333 getLocationOfByte(CS.getStart()), 9334 /*IsStringLocation*/ false, 9335 getSpecifierRange(startSpecifier, specifierLen)); 9336 } 9337 9338 // Check for invalid use of field width 9339 if (!FS.hasValidFieldWidth()) { 9340 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9341 startSpecifier, specifierLen); 9342 } 9343 9344 // Check for invalid use of precision 9345 if (!FS.hasValidPrecision()) { 9346 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9347 startSpecifier, specifierLen); 9348 } 9349 9350 // Precision is mandatory for %P specifier. 9351 if (CS.getKind() == ConversionSpecifier::PArg && 9352 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9353 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9354 getLocationOfByte(startSpecifier), 9355 /*IsStringLocation*/ false, 9356 getSpecifierRange(startSpecifier, specifierLen)); 9357 } 9358 9359 // Check each flag does not conflict with any other component. 9360 if (!FS.hasValidThousandsGroupingPrefix()) 9361 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9362 if (!FS.hasValidLeadingZeros()) 9363 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9364 if (!FS.hasValidPlusPrefix()) 9365 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9366 if (!FS.hasValidSpacePrefix()) 9367 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9368 if (!FS.hasValidAlternativeForm()) 9369 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9370 if (!FS.hasValidLeftJustified()) 9371 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9372 9373 // Check that flags are not ignored by another flag 9374 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9375 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9376 startSpecifier, specifierLen); 9377 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9378 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9379 startSpecifier, specifierLen); 9380 9381 // Check the length modifier is valid with the given conversion specifier. 9382 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9383 S.getLangOpts())) 9384 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9385 diag::warn_format_nonsensical_length); 9386 else if (!FS.hasStandardLengthModifier()) 9387 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9388 else if (!FS.hasStandardLengthConversionCombination()) 9389 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9390 diag::warn_format_non_standard_conversion_spec); 9391 9392 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9393 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9394 9395 // The remaining checks depend on the data arguments. 9396 if (HasVAListArg) 9397 return true; 9398 9399 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9400 return false; 9401 9402 const Expr *Arg = getDataArg(argIndex); 9403 if (!Arg) 9404 return true; 9405 9406 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9407 } 9408 9409 static bool requiresParensToAddCast(const Expr *E) { 9410 // FIXME: We should have a general way to reason about operator 9411 // precedence and whether parens are actually needed here. 9412 // Take care of a few common cases where they aren't. 9413 const Expr *Inside = E->IgnoreImpCasts(); 9414 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9415 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9416 9417 switch (Inside->getStmtClass()) { 9418 case Stmt::ArraySubscriptExprClass: 9419 case Stmt::CallExprClass: 9420 case Stmt::CharacterLiteralClass: 9421 case Stmt::CXXBoolLiteralExprClass: 9422 case Stmt::DeclRefExprClass: 9423 case Stmt::FloatingLiteralClass: 9424 case Stmt::IntegerLiteralClass: 9425 case Stmt::MemberExprClass: 9426 case Stmt::ObjCArrayLiteralClass: 9427 case Stmt::ObjCBoolLiteralExprClass: 9428 case Stmt::ObjCBoxedExprClass: 9429 case Stmt::ObjCDictionaryLiteralClass: 9430 case Stmt::ObjCEncodeExprClass: 9431 case Stmt::ObjCIvarRefExprClass: 9432 case Stmt::ObjCMessageExprClass: 9433 case Stmt::ObjCPropertyRefExprClass: 9434 case Stmt::ObjCStringLiteralClass: 9435 case Stmt::ObjCSubscriptRefExprClass: 9436 case Stmt::ParenExprClass: 9437 case Stmt::StringLiteralClass: 9438 case Stmt::UnaryOperatorClass: 9439 return false; 9440 default: 9441 return true; 9442 } 9443 } 9444 9445 static std::pair<QualType, StringRef> 9446 shouldNotPrintDirectly(const ASTContext &Context, 9447 QualType IntendedTy, 9448 const Expr *E) { 9449 // Use a 'while' to peel off layers of typedefs. 9450 QualType TyTy = IntendedTy; 9451 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9452 StringRef Name = UserTy->getDecl()->getName(); 9453 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9454 .Case("CFIndex", Context.getNSIntegerType()) 9455 .Case("NSInteger", Context.getNSIntegerType()) 9456 .Case("NSUInteger", Context.getNSUIntegerType()) 9457 .Case("SInt32", Context.IntTy) 9458 .Case("UInt32", Context.UnsignedIntTy) 9459 .Default(QualType()); 9460 9461 if (!CastTy.isNull()) 9462 return std::make_pair(CastTy, Name); 9463 9464 TyTy = UserTy->desugar(); 9465 } 9466 9467 // Strip parens if necessary. 9468 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9469 return shouldNotPrintDirectly(Context, 9470 PE->getSubExpr()->getType(), 9471 PE->getSubExpr()); 9472 9473 // If this is a conditional expression, then its result type is constructed 9474 // via usual arithmetic conversions and thus there might be no necessary 9475 // typedef sugar there. Recurse to operands to check for NSInteger & 9476 // Co. usage condition. 9477 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9478 QualType TrueTy, FalseTy; 9479 StringRef TrueName, FalseName; 9480 9481 std::tie(TrueTy, TrueName) = 9482 shouldNotPrintDirectly(Context, 9483 CO->getTrueExpr()->getType(), 9484 CO->getTrueExpr()); 9485 std::tie(FalseTy, FalseName) = 9486 shouldNotPrintDirectly(Context, 9487 CO->getFalseExpr()->getType(), 9488 CO->getFalseExpr()); 9489 9490 if (TrueTy == FalseTy) 9491 return std::make_pair(TrueTy, TrueName); 9492 else if (TrueTy.isNull()) 9493 return std::make_pair(FalseTy, FalseName); 9494 else if (FalseTy.isNull()) 9495 return std::make_pair(TrueTy, TrueName); 9496 } 9497 9498 return std::make_pair(QualType(), StringRef()); 9499 } 9500 9501 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9502 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9503 /// type do not count. 9504 static bool 9505 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9506 QualType From = ICE->getSubExpr()->getType(); 9507 QualType To = ICE->getType(); 9508 // It's an integer promotion if the destination type is the promoted 9509 // source type. 9510 if (ICE->getCastKind() == CK_IntegralCast && 9511 From->isPromotableIntegerType() && 9512 S.Context.getPromotedIntegerType(From) == To) 9513 return true; 9514 // Look through vector types, since we do default argument promotion for 9515 // those in OpenCL. 9516 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9517 From = VecTy->getElementType(); 9518 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9519 To = VecTy->getElementType(); 9520 // It's a floating promotion if the source type is a lower rank. 9521 return ICE->getCastKind() == CK_FloatingCast && 9522 S.Context.getFloatingTypeOrder(From, To) < 0; 9523 } 9524 9525 bool 9526 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9527 const char *StartSpecifier, 9528 unsigned SpecifierLen, 9529 const Expr *E) { 9530 using namespace analyze_format_string; 9531 using namespace analyze_printf; 9532 9533 // Now type check the data expression that matches the 9534 // format specifier. 9535 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9536 if (!AT.isValid()) 9537 return true; 9538 9539 QualType ExprTy = E->getType(); 9540 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9541 ExprTy = TET->getUnderlyingExpr()->getType(); 9542 } 9543 9544 // Diagnose attempts to print a boolean value as a character. Unlike other 9545 // -Wformat diagnostics, this is fine from a type perspective, but it still 9546 // doesn't make sense. 9547 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9548 E->isKnownToHaveBooleanValue()) { 9549 const CharSourceRange &CSR = 9550 getSpecifierRange(StartSpecifier, SpecifierLen); 9551 SmallString<4> FSString; 9552 llvm::raw_svector_ostream os(FSString); 9553 FS.toString(os); 9554 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9555 << FSString, 9556 E->getExprLoc(), false, CSR); 9557 return true; 9558 } 9559 9560 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9561 if (Match == analyze_printf::ArgType::Match) 9562 return true; 9563 9564 // Look through argument promotions for our error message's reported type. 9565 // This includes the integral and floating promotions, but excludes array 9566 // and function pointer decay (seeing that an argument intended to be a 9567 // string has type 'char [6]' is probably more confusing than 'char *') and 9568 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9569 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9570 if (isArithmeticArgumentPromotion(S, ICE)) { 9571 E = ICE->getSubExpr(); 9572 ExprTy = E->getType(); 9573 9574 // Check if we didn't match because of an implicit cast from a 'char' 9575 // or 'short' to an 'int'. This is done because printf is a varargs 9576 // function. 9577 if (ICE->getType() == S.Context.IntTy || 9578 ICE->getType() == S.Context.UnsignedIntTy) { 9579 // All further checking is done on the subexpression 9580 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9581 AT.matchesType(S.Context, ExprTy); 9582 if (ImplicitMatch == analyze_printf::ArgType::Match) 9583 return true; 9584 if (ImplicitMatch == ArgType::NoMatchPedantic || 9585 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9586 Match = ImplicitMatch; 9587 } 9588 } 9589 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9590 // Special case for 'a', which has type 'int' in C. 9591 // Note, however, that we do /not/ want to treat multibyte constants like 9592 // 'MooV' as characters! This form is deprecated but still exists. In 9593 // addition, don't treat expressions as of type 'char' if one byte length 9594 // modifier is provided. 9595 if (ExprTy == S.Context.IntTy && 9596 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9597 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9598 ExprTy = S.Context.CharTy; 9599 } 9600 9601 // Look through enums to their underlying type. 9602 bool IsEnum = false; 9603 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9604 ExprTy = EnumTy->getDecl()->getIntegerType(); 9605 IsEnum = true; 9606 } 9607 9608 // %C in an Objective-C context prints a unichar, not a wchar_t. 9609 // If the argument is an integer of some kind, believe the %C and suggest 9610 // a cast instead of changing the conversion specifier. 9611 QualType IntendedTy = ExprTy; 9612 if (isObjCContext() && 9613 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9614 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9615 !ExprTy->isCharType()) { 9616 // 'unichar' is defined as a typedef of unsigned short, but we should 9617 // prefer using the typedef if it is visible. 9618 IntendedTy = S.Context.UnsignedShortTy; 9619 9620 // While we are here, check if the value is an IntegerLiteral that happens 9621 // to be within the valid range. 9622 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9623 const llvm::APInt &V = IL->getValue(); 9624 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9625 return true; 9626 } 9627 9628 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9629 Sema::LookupOrdinaryName); 9630 if (S.LookupName(Result, S.getCurScope())) { 9631 NamedDecl *ND = Result.getFoundDecl(); 9632 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9633 if (TD->getUnderlyingType() == IntendedTy) 9634 IntendedTy = S.Context.getTypedefType(TD); 9635 } 9636 } 9637 } 9638 9639 // Special-case some of Darwin's platform-independence types by suggesting 9640 // casts to primitive types that are known to be large enough. 9641 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9642 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9643 QualType CastTy; 9644 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9645 if (!CastTy.isNull()) { 9646 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9647 // (long in ASTContext). Only complain to pedants. 9648 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9649 (AT.isSizeT() || AT.isPtrdiffT()) && 9650 AT.matchesType(S.Context, CastTy)) 9651 Match = ArgType::NoMatchPedantic; 9652 IntendedTy = CastTy; 9653 ShouldNotPrintDirectly = true; 9654 } 9655 } 9656 9657 // We may be able to offer a FixItHint if it is a supported type. 9658 PrintfSpecifier fixedFS = FS; 9659 bool Success = 9660 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9661 9662 if (Success) { 9663 // Get the fix string from the fixed format specifier 9664 SmallString<16> buf; 9665 llvm::raw_svector_ostream os(buf); 9666 fixedFS.toString(os); 9667 9668 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9669 9670 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9671 unsigned Diag; 9672 switch (Match) { 9673 case ArgType::Match: llvm_unreachable("expected non-matching"); 9674 case ArgType::NoMatchPedantic: 9675 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9676 break; 9677 case ArgType::NoMatchTypeConfusion: 9678 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9679 break; 9680 case ArgType::NoMatch: 9681 Diag = diag::warn_format_conversion_argument_type_mismatch; 9682 break; 9683 } 9684 9685 // In this case, the specifier is wrong and should be changed to match 9686 // the argument. 9687 EmitFormatDiagnostic(S.PDiag(Diag) 9688 << AT.getRepresentativeTypeName(S.Context) 9689 << IntendedTy << IsEnum << E->getSourceRange(), 9690 E->getBeginLoc(), 9691 /*IsStringLocation*/ false, SpecRange, 9692 FixItHint::CreateReplacement(SpecRange, os.str())); 9693 } else { 9694 // The canonical type for formatting this value is different from the 9695 // actual type of the expression. (This occurs, for example, with Darwin's 9696 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9697 // should be printed as 'long' for 64-bit compatibility.) 9698 // Rather than emitting a normal format/argument mismatch, we want to 9699 // add a cast to the recommended type (and correct the format string 9700 // if necessary). 9701 SmallString<16> CastBuf; 9702 llvm::raw_svector_ostream CastFix(CastBuf); 9703 CastFix << "("; 9704 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9705 CastFix << ")"; 9706 9707 SmallVector<FixItHint,4> Hints; 9708 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9709 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9710 9711 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9712 // If there's already a cast present, just replace it. 9713 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9714 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9715 9716 } else if (!requiresParensToAddCast(E)) { 9717 // If the expression has high enough precedence, 9718 // just write the C-style cast. 9719 Hints.push_back( 9720 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9721 } else { 9722 // Otherwise, add parens around the expression as well as the cast. 9723 CastFix << "("; 9724 Hints.push_back( 9725 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9726 9727 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9728 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9729 } 9730 9731 if (ShouldNotPrintDirectly) { 9732 // The expression has a type that should not be printed directly. 9733 // We extract the name from the typedef because we don't want to show 9734 // the underlying type in the diagnostic. 9735 StringRef Name; 9736 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9737 Name = TypedefTy->getDecl()->getName(); 9738 else 9739 Name = CastTyName; 9740 unsigned Diag = Match == ArgType::NoMatchPedantic 9741 ? diag::warn_format_argument_needs_cast_pedantic 9742 : diag::warn_format_argument_needs_cast; 9743 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9744 << E->getSourceRange(), 9745 E->getBeginLoc(), /*IsStringLocation=*/false, 9746 SpecRange, Hints); 9747 } else { 9748 // In this case, the expression could be printed using a different 9749 // specifier, but we've decided that the specifier is probably correct 9750 // and we should cast instead. Just use the normal warning message. 9751 EmitFormatDiagnostic( 9752 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9753 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9754 << E->getSourceRange(), 9755 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9756 } 9757 } 9758 } else { 9759 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9760 SpecifierLen); 9761 // Since the warning for passing non-POD types to variadic functions 9762 // was deferred until now, we emit a warning for non-POD 9763 // arguments here. 9764 switch (S.isValidVarArgType(ExprTy)) { 9765 case Sema::VAK_Valid: 9766 case Sema::VAK_ValidInCXX11: { 9767 unsigned Diag; 9768 switch (Match) { 9769 case ArgType::Match: llvm_unreachable("expected non-matching"); 9770 case ArgType::NoMatchPedantic: 9771 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9772 break; 9773 case ArgType::NoMatchTypeConfusion: 9774 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9775 break; 9776 case ArgType::NoMatch: 9777 Diag = diag::warn_format_conversion_argument_type_mismatch; 9778 break; 9779 } 9780 9781 EmitFormatDiagnostic( 9782 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9783 << IsEnum << CSR << E->getSourceRange(), 9784 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9785 break; 9786 } 9787 case Sema::VAK_Undefined: 9788 case Sema::VAK_MSVCUndefined: 9789 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9790 << S.getLangOpts().CPlusPlus11 << ExprTy 9791 << CallType 9792 << AT.getRepresentativeTypeName(S.Context) << CSR 9793 << E->getSourceRange(), 9794 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9795 checkForCStrMembers(AT, E); 9796 break; 9797 9798 case Sema::VAK_Invalid: 9799 if (ExprTy->isObjCObjectType()) 9800 EmitFormatDiagnostic( 9801 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9802 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9803 << AT.getRepresentativeTypeName(S.Context) << CSR 9804 << E->getSourceRange(), 9805 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9806 else 9807 // FIXME: If this is an initializer list, suggest removing the braces 9808 // or inserting a cast to the target type. 9809 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9810 << isa<InitListExpr>(E) << ExprTy << CallType 9811 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9812 break; 9813 } 9814 9815 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9816 "format string specifier index out of range"); 9817 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9818 } 9819 9820 return true; 9821 } 9822 9823 //===--- CHECK: Scanf format string checking ------------------------------===// 9824 9825 namespace { 9826 9827 class CheckScanfHandler : public CheckFormatHandler { 9828 public: 9829 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9830 const Expr *origFormatExpr, Sema::FormatStringType type, 9831 unsigned firstDataArg, unsigned numDataArgs, 9832 const char *beg, bool hasVAListArg, 9833 ArrayRef<const Expr *> Args, unsigned formatIdx, 9834 bool inFunctionCall, Sema::VariadicCallType CallType, 9835 llvm::SmallBitVector &CheckedVarArgs, 9836 UncoveredArgHandler &UncoveredArg) 9837 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9838 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9839 inFunctionCall, CallType, CheckedVarArgs, 9840 UncoveredArg) {} 9841 9842 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9843 const char *startSpecifier, 9844 unsigned specifierLen) override; 9845 9846 bool HandleInvalidScanfConversionSpecifier( 9847 const analyze_scanf::ScanfSpecifier &FS, 9848 const char *startSpecifier, 9849 unsigned specifierLen) override; 9850 9851 void HandleIncompleteScanList(const char *start, const char *end) override; 9852 }; 9853 9854 } // namespace 9855 9856 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9857 const char *end) { 9858 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9859 getLocationOfByte(end), /*IsStringLocation*/true, 9860 getSpecifierRange(start, end - start)); 9861 } 9862 9863 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9864 const analyze_scanf::ScanfSpecifier &FS, 9865 const char *startSpecifier, 9866 unsigned specifierLen) { 9867 const analyze_scanf::ScanfConversionSpecifier &CS = 9868 FS.getConversionSpecifier(); 9869 9870 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9871 getLocationOfByte(CS.getStart()), 9872 startSpecifier, specifierLen, 9873 CS.getStart(), CS.getLength()); 9874 } 9875 9876 bool CheckScanfHandler::HandleScanfSpecifier( 9877 const analyze_scanf::ScanfSpecifier &FS, 9878 const char *startSpecifier, 9879 unsigned specifierLen) { 9880 using namespace analyze_scanf; 9881 using namespace analyze_format_string; 9882 9883 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9884 9885 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9886 // be used to decide if we are using positional arguments consistently. 9887 if (FS.consumesDataArgument()) { 9888 if (atFirstArg) { 9889 atFirstArg = false; 9890 usesPositionalArgs = FS.usesPositionalArg(); 9891 } 9892 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9893 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9894 startSpecifier, specifierLen); 9895 return false; 9896 } 9897 } 9898 9899 // Check if the field with is non-zero. 9900 const OptionalAmount &Amt = FS.getFieldWidth(); 9901 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9902 if (Amt.getConstantAmount() == 0) { 9903 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9904 Amt.getConstantLength()); 9905 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9906 getLocationOfByte(Amt.getStart()), 9907 /*IsStringLocation*/true, R, 9908 FixItHint::CreateRemoval(R)); 9909 } 9910 } 9911 9912 if (!FS.consumesDataArgument()) { 9913 // FIXME: Technically specifying a precision or field width here 9914 // makes no sense. Worth issuing a warning at some point. 9915 return true; 9916 } 9917 9918 // Consume the argument. 9919 unsigned argIndex = FS.getArgIndex(); 9920 if (argIndex < NumDataArgs) { 9921 // The check to see if the argIndex is valid will come later. 9922 // We set the bit here because we may exit early from this 9923 // function if we encounter some other error. 9924 CoveredArgs.set(argIndex); 9925 } 9926 9927 // Check the length modifier is valid with the given conversion specifier. 9928 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9929 S.getLangOpts())) 9930 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9931 diag::warn_format_nonsensical_length); 9932 else if (!FS.hasStandardLengthModifier()) 9933 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9934 else if (!FS.hasStandardLengthConversionCombination()) 9935 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9936 diag::warn_format_non_standard_conversion_spec); 9937 9938 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9939 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9940 9941 // The remaining checks depend on the data arguments. 9942 if (HasVAListArg) 9943 return true; 9944 9945 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9946 return false; 9947 9948 // Check that the argument type matches the format specifier. 9949 const Expr *Ex = getDataArg(argIndex); 9950 if (!Ex) 9951 return true; 9952 9953 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9954 9955 if (!AT.isValid()) { 9956 return true; 9957 } 9958 9959 analyze_format_string::ArgType::MatchKind Match = 9960 AT.matchesType(S.Context, Ex->getType()); 9961 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9962 if (Match == analyze_format_string::ArgType::Match) 9963 return true; 9964 9965 ScanfSpecifier fixedFS = FS; 9966 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9967 S.getLangOpts(), S.Context); 9968 9969 unsigned Diag = 9970 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9971 : diag::warn_format_conversion_argument_type_mismatch; 9972 9973 if (Success) { 9974 // Get the fix string from the fixed format specifier. 9975 SmallString<128> buf; 9976 llvm::raw_svector_ostream os(buf); 9977 fixedFS.toString(os); 9978 9979 EmitFormatDiagnostic( 9980 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9981 << Ex->getType() << false << Ex->getSourceRange(), 9982 Ex->getBeginLoc(), 9983 /*IsStringLocation*/ false, 9984 getSpecifierRange(startSpecifier, specifierLen), 9985 FixItHint::CreateReplacement( 9986 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9987 } else { 9988 EmitFormatDiagnostic(S.PDiag(Diag) 9989 << AT.getRepresentativeTypeName(S.Context) 9990 << Ex->getType() << false << Ex->getSourceRange(), 9991 Ex->getBeginLoc(), 9992 /*IsStringLocation*/ false, 9993 getSpecifierRange(startSpecifier, specifierLen)); 9994 } 9995 9996 return true; 9997 } 9998 9999 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10000 const Expr *OrigFormatExpr, 10001 ArrayRef<const Expr *> Args, 10002 bool HasVAListArg, unsigned format_idx, 10003 unsigned firstDataArg, 10004 Sema::FormatStringType Type, 10005 bool inFunctionCall, 10006 Sema::VariadicCallType CallType, 10007 llvm::SmallBitVector &CheckedVarArgs, 10008 UncoveredArgHandler &UncoveredArg, 10009 bool IgnoreStringsWithoutSpecifiers) { 10010 // CHECK: is the format string a wide literal? 10011 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10012 CheckFormatHandler::EmitFormatDiagnostic( 10013 S, inFunctionCall, Args[format_idx], 10014 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10015 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10016 return; 10017 } 10018 10019 // Str - The format string. NOTE: this is NOT null-terminated! 10020 StringRef StrRef = FExpr->getString(); 10021 const char *Str = StrRef.data(); 10022 // Account for cases where the string literal is truncated in a declaration. 10023 const ConstantArrayType *T = 10024 S.Context.getAsConstantArrayType(FExpr->getType()); 10025 assert(T && "String literal not of constant array type!"); 10026 size_t TypeSize = T->getSize().getZExtValue(); 10027 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10028 const unsigned numDataArgs = Args.size() - firstDataArg; 10029 10030 if (IgnoreStringsWithoutSpecifiers && 10031 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10032 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10033 return; 10034 10035 // Emit a warning if the string literal is truncated and does not contain an 10036 // embedded null character. 10037 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10038 CheckFormatHandler::EmitFormatDiagnostic( 10039 S, inFunctionCall, Args[format_idx], 10040 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10041 FExpr->getBeginLoc(), 10042 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10043 return; 10044 } 10045 10046 // CHECK: empty format string? 10047 if (StrLen == 0 && numDataArgs > 0) { 10048 CheckFormatHandler::EmitFormatDiagnostic( 10049 S, inFunctionCall, Args[format_idx], 10050 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10051 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10052 return; 10053 } 10054 10055 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10056 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10057 Type == Sema::FST_OSTrace) { 10058 CheckPrintfHandler H( 10059 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10060 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10061 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10062 CheckedVarArgs, UncoveredArg); 10063 10064 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10065 S.getLangOpts(), 10066 S.Context.getTargetInfo(), 10067 Type == Sema::FST_FreeBSDKPrintf)) 10068 H.DoneProcessing(); 10069 } else if (Type == Sema::FST_Scanf) { 10070 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10071 numDataArgs, Str, HasVAListArg, Args, format_idx, 10072 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10073 10074 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10075 S.getLangOpts(), 10076 S.Context.getTargetInfo())) 10077 H.DoneProcessing(); 10078 } // TODO: handle other formats 10079 } 10080 10081 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10082 // Str - The format string. NOTE: this is NOT null-terminated! 10083 StringRef StrRef = FExpr->getString(); 10084 const char *Str = StrRef.data(); 10085 // Account for cases where the string literal is truncated in a declaration. 10086 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10087 assert(T && "String literal not of constant array type!"); 10088 size_t TypeSize = T->getSize().getZExtValue(); 10089 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10090 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10091 getLangOpts(), 10092 Context.getTargetInfo()); 10093 } 10094 10095 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10096 10097 // Returns the related absolute value function that is larger, of 0 if one 10098 // does not exist. 10099 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10100 switch (AbsFunction) { 10101 default: 10102 return 0; 10103 10104 case Builtin::BI__builtin_abs: 10105 return Builtin::BI__builtin_labs; 10106 case Builtin::BI__builtin_labs: 10107 return Builtin::BI__builtin_llabs; 10108 case Builtin::BI__builtin_llabs: 10109 return 0; 10110 10111 case Builtin::BI__builtin_fabsf: 10112 return Builtin::BI__builtin_fabs; 10113 case Builtin::BI__builtin_fabs: 10114 return Builtin::BI__builtin_fabsl; 10115 case Builtin::BI__builtin_fabsl: 10116 return 0; 10117 10118 case Builtin::BI__builtin_cabsf: 10119 return Builtin::BI__builtin_cabs; 10120 case Builtin::BI__builtin_cabs: 10121 return Builtin::BI__builtin_cabsl; 10122 case Builtin::BI__builtin_cabsl: 10123 return 0; 10124 10125 case Builtin::BIabs: 10126 return Builtin::BIlabs; 10127 case Builtin::BIlabs: 10128 return Builtin::BIllabs; 10129 case Builtin::BIllabs: 10130 return 0; 10131 10132 case Builtin::BIfabsf: 10133 return Builtin::BIfabs; 10134 case Builtin::BIfabs: 10135 return Builtin::BIfabsl; 10136 case Builtin::BIfabsl: 10137 return 0; 10138 10139 case Builtin::BIcabsf: 10140 return Builtin::BIcabs; 10141 case Builtin::BIcabs: 10142 return Builtin::BIcabsl; 10143 case Builtin::BIcabsl: 10144 return 0; 10145 } 10146 } 10147 10148 // Returns the argument type of the absolute value function. 10149 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10150 unsigned AbsType) { 10151 if (AbsType == 0) 10152 return QualType(); 10153 10154 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10155 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10156 if (Error != ASTContext::GE_None) 10157 return QualType(); 10158 10159 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10160 if (!FT) 10161 return QualType(); 10162 10163 if (FT->getNumParams() != 1) 10164 return QualType(); 10165 10166 return FT->getParamType(0); 10167 } 10168 10169 // Returns the best absolute value function, or zero, based on type and 10170 // current absolute value function. 10171 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10172 unsigned AbsFunctionKind) { 10173 unsigned BestKind = 0; 10174 uint64_t ArgSize = Context.getTypeSize(ArgType); 10175 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10176 Kind = getLargerAbsoluteValueFunction(Kind)) { 10177 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10178 if (Context.getTypeSize(ParamType) >= ArgSize) { 10179 if (BestKind == 0) 10180 BestKind = Kind; 10181 else if (Context.hasSameType(ParamType, ArgType)) { 10182 BestKind = Kind; 10183 break; 10184 } 10185 } 10186 } 10187 return BestKind; 10188 } 10189 10190 enum AbsoluteValueKind { 10191 AVK_Integer, 10192 AVK_Floating, 10193 AVK_Complex 10194 }; 10195 10196 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10197 if (T->isIntegralOrEnumerationType()) 10198 return AVK_Integer; 10199 if (T->isRealFloatingType()) 10200 return AVK_Floating; 10201 if (T->isAnyComplexType()) 10202 return AVK_Complex; 10203 10204 llvm_unreachable("Type not integer, floating, or complex"); 10205 } 10206 10207 // Changes the absolute value function to a different type. Preserves whether 10208 // the function is a builtin. 10209 static unsigned changeAbsFunction(unsigned AbsKind, 10210 AbsoluteValueKind ValueKind) { 10211 switch (ValueKind) { 10212 case AVK_Integer: 10213 switch (AbsKind) { 10214 default: 10215 return 0; 10216 case Builtin::BI__builtin_fabsf: 10217 case Builtin::BI__builtin_fabs: 10218 case Builtin::BI__builtin_fabsl: 10219 case Builtin::BI__builtin_cabsf: 10220 case Builtin::BI__builtin_cabs: 10221 case Builtin::BI__builtin_cabsl: 10222 return Builtin::BI__builtin_abs; 10223 case Builtin::BIfabsf: 10224 case Builtin::BIfabs: 10225 case Builtin::BIfabsl: 10226 case Builtin::BIcabsf: 10227 case Builtin::BIcabs: 10228 case Builtin::BIcabsl: 10229 return Builtin::BIabs; 10230 } 10231 case AVK_Floating: 10232 switch (AbsKind) { 10233 default: 10234 return 0; 10235 case Builtin::BI__builtin_abs: 10236 case Builtin::BI__builtin_labs: 10237 case Builtin::BI__builtin_llabs: 10238 case Builtin::BI__builtin_cabsf: 10239 case Builtin::BI__builtin_cabs: 10240 case Builtin::BI__builtin_cabsl: 10241 return Builtin::BI__builtin_fabsf; 10242 case Builtin::BIabs: 10243 case Builtin::BIlabs: 10244 case Builtin::BIllabs: 10245 case Builtin::BIcabsf: 10246 case Builtin::BIcabs: 10247 case Builtin::BIcabsl: 10248 return Builtin::BIfabsf; 10249 } 10250 case AVK_Complex: 10251 switch (AbsKind) { 10252 default: 10253 return 0; 10254 case Builtin::BI__builtin_abs: 10255 case Builtin::BI__builtin_labs: 10256 case Builtin::BI__builtin_llabs: 10257 case Builtin::BI__builtin_fabsf: 10258 case Builtin::BI__builtin_fabs: 10259 case Builtin::BI__builtin_fabsl: 10260 return Builtin::BI__builtin_cabsf; 10261 case Builtin::BIabs: 10262 case Builtin::BIlabs: 10263 case Builtin::BIllabs: 10264 case Builtin::BIfabsf: 10265 case Builtin::BIfabs: 10266 case Builtin::BIfabsl: 10267 return Builtin::BIcabsf; 10268 } 10269 } 10270 llvm_unreachable("Unable to convert function"); 10271 } 10272 10273 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10274 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10275 if (!FnInfo) 10276 return 0; 10277 10278 switch (FDecl->getBuiltinID()) { 10279 default: 10280 return 0; 10281 case Builtin::BI__builtin_abs: 10282 case Builtin::BI__builtin_fabs: 10283 case Builtin::BI__builtin_fabsf: 10284 case Builtin::BI__builtin_fabsl: 10285 case Builtin::BI__builtin_labs: 10286 case Builtin::BI__builtin_llabs: 10287 case Builtin::BI__builtin_cabs: 10288 case Builtin::BI__builtin_cabsf: 10289 case Builtin::BI__builtin_cabsl: 10290 case Builtin::BIabs: 10291 case Builtin::BIlabs: 10292 case Builtin::BIllabs: 10293 case Builtin::BIfabs: 10294 case Builtin::BIfabsf: 10295 case Builtin::BIfabsl: 10296 case Builtin::BIcabs: 10297 case Builtin::BIcabsf: 10298 case Builtin::BIcabsl: 10299 return FDecl->getBuiltinID(); 10300 } 10301 llvm_unreachable("Unknown Builtin type"); 10302 } 10303 10304 // If the replacement is valid, emit a note with replacement function. 10305 // Additionally, suggest including the proper header if not already included. 10306 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10307 unsigned AbsKind, QualType ArgType) { 10308 bool EmitHeaderHint = true; 10309 const char *HeaderName = nullptr; 10310 const char *FunctionName = nullptr; 10311 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10312 FunctionName = "std::abs"; 10313 if (ArgType->isIntegralOrEnumerationType()) { 10314 HeaderName = "cstdlib"; 10315 } else if (ArgType->isRealFloatingType()) { 10316 HeaderName = "cmath"; 10317 } else { 10318 llvm_unreachable("Invalid Type"); 10319 } 10320 10321 // Lookup all std::abs 10322 if (NamespaceDecl *Std = S.getStdNamespace()) { 10323 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10324 R.suppressDiagnostics(); 10325 S.LookupQualifiedName(R, Std); 10326 10327 for (const auto *I : R) { 10328 const FunctionDecl *FDecl = nullptr; 10329 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10330 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10331 } else { 10332 FDecl = dyn_cast<FunctionDecl>(I); 10333 } 10334 if (!FDecl) 10335 continue; 10336 10337 // Found std::abs(), check that they are the right ones. 10338 if (FDecl->getNumParams() != 1) 10339 continue; 10340 10341 // Check that the parameter type can handle the argument. 10342 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10343 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10344 S.Context.getTypeSize(ArgType) <= 10345 S.Context.getTypeSize(ParamType)) { 10346 // Found a function, don't need the header hint. 10347 EmitHeaderHint = false; 10348 break; 10349 } 10350 } 10351 } 10352 } else { 10353 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10354 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10355 10356 if (HeaderName) { 10357 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10358 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10359 R.suppressDiagnostics(); 10360 S.LookupName(R, S.getCurScope()); 10361 10362 if (R.isSingleResult()) { 10363 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10364 if (FD && FD->getBuiltinID() == AbsKind) { 10365 EmitHeaderHint = false; 10366 } else { 10367 return; 10368 } 10369 } else if (!R.empty()) { 10370 return; 10371 } 10372 } 10373 } 10374 10375 S.Diag(Loc, diag::note_replace_abs_function) 10376 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10377 10378 if (!HeaderName) 10379 return; 10380 10381 if (!EmitHeaderHint) 10382 return; 10383 10384 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10385 << FunctionName; 10386 } 10387 10388 template <std::size_t StrLen> 10389 static bool IsStdFunction(const FunctionDecl *FDecl, 10390 const char (&Str)[StrLen]) { 10391 if (!FDecl) 10392 return false; 10393 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10394 return false; 10395 if (!FDecl->isInStdNamespace()) 10396 return false; 10397 10398 return true; 10399 } 10400 10401 // Warn when using the wrong abs() function. 10402 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10403 const FunctionDecl *FDecl) { 10404 if (Call->getNumArgs() != 1) 10405 return; 10406 10407 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10408 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10409 if (AbsKind == 0 && !IsStdAbs) 10410 return; 10411 10412 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10413 QualType ParamType = Call->getArg(0)->getType(); 10414 10415 // Unsigned types cannot be negative. Suggest removing the absolute value 10416 // function call. 10417 if (ArgType->isUnsignedIntegerType()) { 10418 const char *FunctionName = 10419 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10420 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10421 Diag(Call->getExprLoc(), diag::note_remove_abs) 10422 << FunctionName 10423 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10424 return; 10425 } 10426 10427 // Taking the absolute value of a pointer is very suspicious, they probably 10428 // wanted to index into an array, dereference a pointer, call a function, etc. 10429 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10430 unsigned DiagType = 0; 10431 if (ArgType->isFunctionType()) 10432 DiagType = 1; 10433 else if (ArgType->isArrayType()) 10434 DiagType = 2; 10435 10436 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10437 return; 10438 } 10439 10440 // std::abs has overloads which prevent most of the absolute value problems 10441 // from occurring. 10442 if (IsStdAbs) 10443 return; 10444 10445 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10446 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10447 10448 // The argument and parameter are the same kind. Check if they are the right 10449 // size. 10450 if (ArgValueKind == ParamValueKind) { 10451 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10452 return; 10453 10454 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10455 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10456 << FDecl << ArgType << ParamType; 10457 10458 if (NewAbsKind == 0) 10459 return; 10460 10461 emitReplacement(*this, Call->getExprLoc(), 10462 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10463 return; 10464 } 10465 10466 // ArgValueKind != ParamValueKind 10467 // The wrong type of absolute value function was used. Attempt to find the 10468 // proper one. 10469 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10470 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10471 if (NewAbsKind == 0) 10472 return; 10473 10474 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10475 << FDecl << ParamValueKind << ArgValueKind; 10476 10477 emitReplacement(*this, Call->getExprLoc(), 10478 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10479 } 10480 10481 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10482 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10483 const FunctionDecl *FDecl) { 10484 if (!Call || !FDecl) return; 10485 10486 // Ignore template specializations and macros. 10487 if (inTemplateInstantiation()) return; 10488 if (Call->getExprLoc().isMacroID()) return; 10489 10490 // Only care about the one template argument, two function parameter std::max 10491 if (Call->getNumArgs() != 2) return; 10492 if (!IsStdFunction(FDecl, "max")) return; 10493 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10494 if (!ArgList) return; 10495 if (ArgList->size() != 1) return; 10496 10497 // Check that template type argument is unsigned integer. 10498 const auto& TA = ArgList->get(0); 10499 if (TA.getKind() != TemplateArgument::Type) return; 10500 QualType ArgType = TA.getAsType(); 10501 if (!ArgType->isUnsignedIntegerType()) return; 10502 10503 // See if either argument is a literal zero. 10504 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10505 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10506 if (!MTE) return false; 10507 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10508 if (!Num) return false; 10509 if (Num->getValue() != 0) return false; 10510 return true; 10511 }; 10512 10513 const Expr *FirstArg = Call->getArg(0); 10514 const Expr *SecondArg = Call->getArg(1); 10515 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10516 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10517 10518 // Only warn when exactly one argument is zero. 10519 if (IsFirstArgZero == IsSecondArgZero) return; 10520 10521 SourceRange FirstRange = FirstArg->getSourceRange(); 10522 SourceRange SecondRange = SecondArg->getSourceRange(); 10523 10524 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10525 10526 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10527 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10528 10529 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10530 SourceRange RemovalRange; 10531 if (IsFirstArgZero) { 10532 RemovalRange = SourceRange(FirstRange.getBegin(), 10533 SecondRange.getBegin().getLocWithOffset(-1)); 10534 } else { 10535 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10536 SecondRange.getEnd()); 10537 } 10538 10539 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10540 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10541 << FixItHint::CreateRemoval(RemovalRange); 10542 } 10543 10544 //===--- CHECK: Standard memory functions ---------------------------------===// 10545 10546 /// Takes the expression passed to the size_t parameter of functions 10547 /// such as memcmp, strncat, etc and warns if it's a comparison. 10548 /// 10549 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10550 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10551 IdentifierInfo *FnName, 10552 SourceLocation FnLoc, 10553 SourceLocation RParenLoc) { 10554 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10555 if (!Size) 10556 return false; 10557 10558 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10559 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10560 return false; 10561 10562 SourceRange SizeRange = Size->getSourceRange(); 10563 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10564 << SizeRange << FnName; 10565 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10566 << FnName 10567 << FixItHint::CreateInsertion( 10568 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10569 << FixItHint::CreateRemoval(RParenLoc); 10570 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10571 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10572 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10573 ")"); 10574 10575 return true; 10576 } 10577 10578 /// Determine whether the given type is or contains a dynamic class type 10579 /// (e.g., whether it has a vtable). 10580 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10581 bool &IsContained) { 10582 // Look through array types while ignoring qualifiers. 10583 const Type *Ty = T->getBaseElementTypeUnsafe(); 10584 IsContained = false; 10585 10586 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10587 RD = RD ? RD->getDefinition() : nullptr; 10588 if (!RD || RD->isInvalidDecl()) 10589 return nullptr; 10590 10591 if (RD->isDynamicClass()) 10592 return RD; 10593 10594 // Check all the fields. If any bases were dynamic, the class is dynamic. 10595 // It's impossible for a class to transitively contain itself by value, so 10596 // infinite recursion is impossible. 10597 for (auto *FD : RD->fields()) { 10598 bool SubContained; 10599 if (const CXXRecordDecl *ContainedRD = 10600 getContainedDynamicClass(FD->getType(), SubContained)) { 10601 IsContained = true; 10602 return ContainedRD; 10603 } 10604 } 10605 10606 return nullptr; 10607 } 10608 10609 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10610 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10611 if (Unary->getKind() == UETT_SizeOf) 10612 return Unary; 10613 return nullptr; 10614 } 10615 10616 /// If E is a sizeof expression, returns its argument expression, 10617 /// otherwise returns NULL. 10618 static const Expr *getSizeOfExprArg(const Expr *E) { 10619 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10620 if (!SizeOf->isArgumentType()) 10621 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10622 return nullptr; 10623 } 10624 10625 /// If E is a sizeof expression, returns its argument type. 10626 static QualType getSizeOfArgType(const Expr *E) { 10627 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10628 return SizeOf->getTypeOfArgument(); 10629 return QualType(); 10630 } 10631 10632 namespace { 10633 10634 struct SearchNonTrivialToInitializeField 10635 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10636 using Super = 10637 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10638 10639 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10640 10641 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10642 SourceLocation SL) { 10643 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10644 asDerived().visitArray(PDIK, AT, SL); 10645 return; 10646 } 10647 10648 Super::visitWithKind(PDIK, FT, SL); 10649 } 10650 10651 void visitARCStrong(QualType FT, SourceLocation SL) { 10652 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10653 } 10654 void visitARCWeak(QualType FT, SourceLocation SL) { 10655 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10656 } 10657 void visitStruct(QualType FT, SourceLocation SL) { 10658 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10659 visit(FD->getType(), FD->getLocation()); 10660 } 10661 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10662 const ArrayType *AT, SourceLocation SL) { 10663 visit(getContext().getBaseElementType(AT), SL); 10664 } 10665 void visitTrivial(QualType FT, SourceLocation SL) {} 10666 10667 static void diag(QualType RT, const Expr *E, Sema &S) { 10668 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10669 } 10670 10671 ASTContext &getContext() { return S.getASTContext(); } 10672 10673 const Expr *E; 10674 Sema &S; 10675 }; 10676 10677 struct SearchNonTrivialToCopyField 10678 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10679 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10680 10681 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10682 10683 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10684 SourceLocation SL) { 10685 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10686 asDerived().visitArray(PCK, AT, SL); 10687 return; 10688 } 10689 10690 Super::visitWithKind(PCK, FT, SL); 10691 } 10692 10693 void visitARCStrong(QualType FT, SourceLocation SL) { 10694 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10695 } 10696 void visitARCWeak(QualType FT, SourceLocation SL) { 10697 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10698 } 10699 void visitStruct(QualType FT, SourceLocation SL) { 10700 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10701 visit(FD->getType(), FD->getLocation()); 10702 } 10703 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10704 SourceLocation SL) { 10705 visit(getContext().getBaseElementType(AT), SL); 10706 } 10707 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10708 SourceLocation SL) {} 10709 void visitTrivial(QualType FT, SourceLocation SL) {} 10710 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10711 10712 static void diag(QualType RT, const Expr *E, Sema &S) { 10713 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10714 } 10715 10716 ASTContext &getContext() { return S.getASTContext(); } 10717 10718 const Expr *E; 10719 Sema &S; 10720 }; 10721 10722 } 10723 10724 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10725 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10726 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10727 10728 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10729 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10730 return false; 10731 10732 return doesExprLikelyComputeSize(BO->getLHS()) || 10733 doesExprLikelyComputeSize(BO->getRHS()); 10734 } 10735 10736 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10737 } 10738 10739 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10740 /// 10741 /// \code 10742 /// #define MACRO 0 10743 /// foo(MACRO); 10744 /// foo(0); 10745 /// \endcode 10746 /// 10747 /// This should return true for the first call to foo, but not for the second 10748 /// (regardless of whether foo is a macro or function). 10749 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10750 SourceLocation CallLoc, 10751 SourceLocation ArgLoc) { 10752 if (!CallLoc.isMacroID()) 10753 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10754 10755 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10756 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10757 } 10758 10759 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10760 /// last two arguments transposed. 10761 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10762 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10763 return; 10764 10765 const Expr *SizeArg = 10766 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10767 10768 auto isLiteralZero = [](const Expr *E) { 10769 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10770 }; 10771 10772 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10773 SourceLocation CallLoc = Call->getRParenLoc(); 10774 SourceManager &SM = S.getSourceManager(); 10775 if (isLiteralZero(SizeArg) && 10776 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10777 10778 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10779 10780 // Some platforms #define bzero to __builtin_memset. See if this is the 10781 // case, and if so, emit a better diagnostic. 10782 if (BId == Builtin::BIbzero || 10783 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10784 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10785 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10786 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10787 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10788 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10789 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10790 } 10791 return; 10792 } 10793 10794 // If the second argument to a memset is a sizeof expression and the third 10795 // isn't, this is also likely an error. This should catch 10796 // 'memset(buf, sizeof(buf), 0xff)'. 10797 if (BId == Builtin::BImemset && 10798 doesExprLikelyComputeSize(Call->getArg(1)) && 10799 !doesExprLikelyComputeSize(Call->getArg(2))) { 10800 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10801 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10802 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10803 return; 10804 } 10805 } 10806 10807 /// Check for dangerous or invalid arguments to memset(). 10808 /// 10809 /// This issues warnings on known problematic, dangerous or unspecified 10810 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10811 /// function calls. 10812 /// 10813 /// \param Call The call expression to diagnose. 10814 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10815 unsigned BId, 10816 IdentifierInfo *FnName) { 10817 assert(BId != 0); 10818 10819 // It is possible to have a non-standard definition of memset. Validate 10820 // we have enough arguments, and if not, abort further checking. 10821 unsigned ExpectedNumArgs = 10822 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10823 if (Call->getNumArgs() < ExpectedNumArgs) 10824 return; 10825 10826 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10827 BId == Builtin::BIstrndup ? 1 : 2); 10828 unsigned LenArg = 10829 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10830 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10831 10832 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10833 Call->getBeginLoc(), Call->getRParenLoc())) 10834 return; 10835 10836 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10837 CheckMemaccessSize(*this, BId, Call); 10838 10839 // We have special checking when the length is a sizeof expression. 10840 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10841 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10842 llvm::FoldingSetNodeID SizeOfArgID; 10843 10844 // Although widely used, 'bzero' is not a standard function. Be more strict 10845 // with the argument types before allowing diagnostics and only allow the 10846 // form bzero(ptr, sizeof(...)). 10847 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10848 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10849 return; 10850 10851 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10852 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10853 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10854 10855 QualType DestTy = Dest->getType(); 10856 QualType PointeeTy; 10857 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10858 PointeeTy = DestPtrTy->getPointeeType(); 10859 10860 // Never warn about void type pointers. This can be used to suppress 10861 // false positives. 10862 if (PointeeTy->isVoidType()) 10863 continue; 10864 10865 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10866 // actually comparing the expressions for equality. Because computing the 10867 // expression IDs can be expensive, we only do this if the diagnostic is 10868 // enabled. 10869 if (SizeOfArg && 10870 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10871 SizeOfArg->getExprLoc())) { 10872 // We only compute IDs for expressions if the warning is enabled, and 10873 // cache the sizeof arg's ID. 10874 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10875 SizeOfArg->Profile(SizeOfArgID, Context, true); 10876 llvm::FoldingSetNodeID DestID; 10877 Dest->Profile(DestID, Context, true); 10878 if (DestID == SizeOfArgID) { 10879 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10880 // over sizeof(src) as well. 10881 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10882 StringRef ReadableName = FnName->getName(); 10883 10884 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10885 if (UnaryOp->getOpcode() == UO_AddrOf) 10886 ActionIdx = 1; // If its an address-of operator, just remove it. 10887 if (!PointeeTy->isIncompleteType() && 10888 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10889 ActionIdx = 2; // If the pointee's size is sizeof(char), 10890 // suggest an explicit length. 10891 10892 // If the function is defined as a builtin macro, do not show macro 10893 // expansion. 10894 SourceLocation SL = SizeOfArg->getExprLoc(); 10895 SourceRange DSR = Dest->getSourceRange(); 10896 SourceRange SSR = SizeOfArg->getSourceRange(); 10897 SourceManager &SM = getSourceManager(); 10898 10899 if (SM.isMacroArgExpansion(SL)) { 10900 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10901 SL = SM.getSpellingLoc(SL); 10902 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10903 SM.getSpellingLoc(DSR.getEnd())); 10904 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10905 SM.getSpellingLoc(SSR.getEnd())); 10906 } 10907 10908 DiagRuntimeBehavior(SL, SizeOfArg, 10909 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10910 << ReadableName 10911 << PointeeTy 10912 << DestTy 10913 << DSR 10914 << SSR); 10915 DiagRuntimeBehavior(SL, SizeOfArg, 10916 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10917 << ActionIdx 10918 << SSR); 10919 10920 break; 10921 } 10922 } 10923 10924 // Also check for cases where the sizeof argument is the exact same 10925 // type as the memory argument, and where it points to a user-defined 10926 // record type. 10927 if (SizeOfArgTy != QualType()) { 10928 if (PointeeTy->isRecordType() && 10929 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10930 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10931 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10932 << FnName << SizeOfArgTy << ArgIdx 10933 << PointeeTy << Dest->getSourceRange() 10934 << LenExpr->getSourceRange()); 10935 break; 10936 } 10937 } 10938 } else if (DestTy->isArrayType()) { 10939 PointeeTy = DestTy; 10940 } 10941 10942 if (PointeeTy == QualType()) 10943 continue; 10944 10945 // Always complain about dynamic classes. 10946 bool IsContained; 10947 if (const CXXRecordDecl *ContainedRD = 10948 getContainedDynamicClass(PointeeTy, IsContained)) { 10949 10950 unsigned OperationType = 0; 10951 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10952 // "overwritten" if we're warning about the destination for any call 10953 // but memcmp; otherwise a verb appropriate to the call. 10954 if (ArgIdx != 0 || IsCmp) { 10955 if (BId == Builtin::BImemcpy) 10956 OperationType = 1; 10957 else if(BId == Builtin::BImemmove) 10958 OperationType = 2; 10959 else if (IsCmp) 10960 OperationType = 3; 10961 } 10962 10963 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10964 PDiag(diag::warn_dyn_class_memaccess) 10965 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10966 << IsContained << ContainedRD << OperationType 10967 << Call->getCallee()->getSourceRange()); 10968 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10969 BId != Builtin::BImemset) 10970 DiagRuntimeBehavior( 10971 Dest->getExprLoc(), Dest, 10972 PDiag(diag::warn_arc_object_memaccess) 10973 << ArgIdx << FnName << PointeeTy 10974 << Call->getCallee()->getSourceRange()); 10975 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10976 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10977 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10978 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10979 PDiag(diag::warn_cstruct_memaccess) 10980 << ArgIdx << FnName << PointeeTy << 0); 10981 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10982 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10983 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10984 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10985 PDiag(diag::warn_cstruct_memaccess) 10986 << ArgIdx << FnName << PointeeTy << 1); 10987 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10988 } else { 10989 continue; 10990 } 10991 } else 10992 continue; 10993 10994 DiagRuntimeBehavior( 10995 Dest->getExprLoc(), Dest, 10996 PDiag(diag::note_bad_memaccess_silence) 10997 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10998 break; 10999 } 11000 } 11001 11002 // A little helper routine: ignore addition and subtraction of integer literals. 11003 // This intentionally does not ignore all integer constant expressions because 11004 // we don't want to remove sizeof(). 11005 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11006 Ex = Ex->IgnoreParenCasts(); 11007 11008 while (true) { 11009 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11010 if (!BO || !BO->isAdditiveOp()) 11011 break; 11012 11013 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11014 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11015 11016 if (isa<IntegerLiteral>(RHS)) 11017 Ex = LHS; 11018 else if (isa<IntegerLiteral>(LHS)) 11019 Ex = RHS; 11020 else 11021 break; 11022 } 11023 11024 return Ex; 11025 } 11026 11027 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11028 ASTContext &Context) { 11029 // Only handle constant-sized or VLAs, but not flexible members. 11030 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11031 // Only issue the FIXIT for arrays of size > 1. 11032 if (CAT->getSize().getSExtValue() <= 1) 11033 return false; 11034 } else if (!Ty->isVariableArrayType()) { 11035 return false; 11036 } 11037 return true; 11038 } 11039 11040 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11041 // be the size of the source, instead of the destination. 11042 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11043 IdentifierInfo *FnName) { 11044 11045 // Don't crash if the user has the wrong number of arguments 11046 unsigned NumArgs = Call->getNumArgs(); 11047 if ((NumArgs != 3) && (NumArgs != 4)) 11048 return; 11049 11050 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11051 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11052 const Expr *CompareWithSrc = nullptr; 11053 11054 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11055 Call->getBeginLoc(), Call->getRParenLoc())) 11056 return; 11057 11058 // Look for 'strlcpy(dst, x, sizeof(x))' 11059 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11060 CompareWithSrc = Ex; 11061 else { 11062 // Look for 'strlcpy(dst, x, strlen(x))' 11063 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11064 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11065 SizeCall->getNumArgs() == 1) 11066 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11067 } 11068 } 11069 11070 if (!CompareWithSrc) 11071 return; 11072 11073 // Determine if the argument to sizeof/strlen is equal to the source 11074 // argument. In principle there's all kinds of things you could do 11075 // here, for instance creating an == expression and evaluating it with 11076 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11077 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11078 if (!SrcArgDRE) 11079 return; 11080 11081 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11082 if (!CompareWithSrcDRE || 11083 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11084 return; 11085 11086 const Expr *OriginalSizeArg = Call->getArg(2); 11087 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11088 << OriginalSizeArg->getSourceRange() << FnName; 11089 11090 // Output a FIXIT hint if the destination is an array (rather than a 11091 // pointer to an array). This could be enhanced to handle some 11092 // pointers if we know the actual size, like if DstArg is 'array+2' 11093 // we could say 'sizeof(array)-2'. 11094 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11095 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11096 return; 11097 11098 SmallString<128> sizeString; 11099 llvm::raw_svector_ostream OS(sizeString); 11100 OS << "sizeof("; 11101 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11102 OS << ")"; 11103 11104 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11105 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11106 OS.str()); 11107 } 11108 11109 /// Check if two expressions refer to the same declaration. 11110 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11111 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11112 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11113 return D1->getDecl() == D2->getDecl(); 11114 return false; 11115 } 11116 11117 static const Expr *getStrlenExprArg(const Expr *E) { 11118 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11119 const FunctionDecl *FD = CE->getDirectCallee(); 11120 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11121 return nullptr; 11122 return CE->getArg(0)->IgnoreParenCasts(); 11123 } 11124 return nullptr; 11125 } 11126 11127 // Warn on anti-patterns as the 'size' argument to strncat. 11128 // The correct size argument should look like following: 11129 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11130 void Sema::CheckStrncatArguments(const CallExpr *CE, 11131 IdentifierInfo *FnName) { 11132 // Don't crash if the user has the wrong number of arguments. 11133 if (CE->getNumArgs() < 3) 11134 return; 11135 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11136 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11137 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11138 11139 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11140 CE->getRParenLoc())) 11141 return; 11142 11143 // Identify common expressions, which are wrongly used as the size argument 11144 // to strncat and may lead to buffer overflows. 11145 unsigned PatternType = 0; 11146 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11147 // - sizeof(dst) 11148 if (referToTheSameDecl(SizeOfArg, DstArg)) 11149 PatternType = 1; 11150 // - sizeof(src) 11151 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11152 PatternType = 2; 11153 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11154 if (BE->getOpcode() == BO_Sub) { 11155 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11156 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11157 // - sizeof(dst) - strlen(dst) 11158 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11159 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11160 PatternType = 1; 11161 // - sizeof(src) - (anything) 11162 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11163 PatternType = 2; 11164 } 11165 } 11166 11167 if (PatternType == 0) 11168 return; 11169 11170 // Generate the diagnostic. 11171 SourceLocation SL = LenArg->getBeginLoc(); 11172 SourceRange SR = LenArg->getSourceRange(); 11173 SourceManager &SM = getSourceManager(); 11174 11175 // If the function is defined as a builtin macro, do not show macro expansion. 11176 if (SM.isMacroArgExpansion(SL)) { 11177 SL = SM.getSpellingLoc(SL); 11178 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11179 SM.getSpellingLoc(SR.getEnd())); 11180 } 11181 11182 // Check if the destination is an array (rather than a pointer to an array). 11183 QualType DstTy = DstArg->getType(); 11184 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11185 Context); 11186 if (!isKnownSizeArray) { 11187 if (PatternType == 1) 11188 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11189 else 11190 Diag(SL, diag::warn_strncat_src_size) << SR; 11191 return; 11192 } 11193 11194 if (PatternType == 1) 11195 Diag(SL, diag::warn_strncat_large_size) << SR; 11196 else 11197 Diag(SL, diag::warn_strncat_src_size) << SR; 11198 11199 SmallString<128> sizeString; 11200 llvm::raw_svector_ostream OS(sizeString); 11201 OS << "sizeof("; 11202 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11203 OS << ") - "; 11204 OS << "strlen("; 11205 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11206 OS << ") - 1"; 11207 11208 Diag(SL, diag::note_strncat_wrong_size) 11209 << FixItHint::CreateReplacement(SR, OS.str()); 11210 } 11211 11212 namespace { 11213 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11214 const UnaryOperator *UnaryExpr, const Decl *D) { 11215 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11216 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11217 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11218 return; 11219 } 11220 } 11221 11222 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11223 const UnaryOperator *UnaryExpr) { 11224 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11225 const Decl *D = Lvalue->getDecl(); 11226 if (isa<DeclaratorDecl>(D)) 11227 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11228 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11229 } 11230 11231 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11232 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11233 Lvalue->getMemberDecl()); 11234 } 11235 11236 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11237 const UnaryOperator *UnaryExpr) { 11238 const auto *Lambda = dyn_cast<LambdaExpr>( 11239 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11240 if (!Lambda) 11241 return; 11242 11243 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11244 << CalleeName << 2 /*object: lambda expression*/; 11245 } 11246 11247 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11248 const DeclRefExpr *Lvalue) { 11249 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11250 if (Var == nullptr) 11251 return; 11252 11253 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11254 << CalleeName << 0 /*object: */ << Var; 11255 } 11256 11257 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11258 const CastExpr *Cast) { 11259 SmallString<128> SizeString; 11260 llvm::raw_svector_ostream OS(SizeString); 11261 11262 clang::CastKind Kind = Cast->getCastKind(); 11263 if (Kind == clang::CK_BitCast && 11264 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11265 return; 11266 if (Kind == clang::CK_IntegralToPointer && 11267 !isa<IntegerLiteral>( 11268 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11269 return; 11270 11271 switch (Cast->getCastKind()) { 11272 case clang::CK_BitCast: 11273 case clang::CK_IntegralToPointer: 11274 case clang::CK_FunctionToPointerDecay: 11275 OS << '\''; 11276 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11277 OS << '\''; 11278 break; 11279 default: 11280 return; 11281 } 11282 11283 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11284 << CalleeName << 0 /*object: */ << OS.str(); 11285 } 11286 } // namespace 11287 11288 /// Alerts the user that they are attempting to free a non-malloc'd object. 11289 void Sema::CheckFreeArguments(const CallExpr *E) { 11290 const std::string CalleeName = 11291 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11292 11293 { // Prefer something that doesn't involve a cast to make things simpler. 11294 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11295 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11296 switch (UnaryExpr->getOpcode()) { 11297 case UnaryOperator::Opcode::UO_AddrOf: 11298 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11299 case UnaryOperator::Opcode::UO_Plus: 11300 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11301 default: 11302 break; 11303 } 11304 11305 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11306 if (Lvalue->getType()->isArrayType()) 11307 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11308 11309 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11310 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11311 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11312 return; 11313 } 11314 11315 if (isa<BlockExpr>(Arg)) { 11316 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11317 << CalleeName << 1 /*object: block*/; 11318 return; 11319 } 11320 } 11321 // Maybe the cast was important, check after the other cases. 11322 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11323 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11324 } 11325 11326 void 11327 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11328 SourceLocation ReturnLoc, 11329 bool isObjCMethod, 11330 const AttrVec *Attrs, 11331 const FunctionDecl *FD) { 11332 // Check if the return value is null but should not be. 11333 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11334 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11335 CheckNonNullExpr(*this, RetValExp)) 11336 Diag(ReturnLoc, diag::warn_null_ret) 11337 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11338 11339 // C++11 [basic.stc.dynamic.allocation]p4: 11340 // If an allocation function declared with a non-throwing 11341 // exception-specification fails to allocate storage, it shall return 11342 // a null pointer. Any other allocation function that fails to allocate 11343 // storage shall indicate failure only by throwing an exception [...] 11344 if (FD) { 11345 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11346 if (Op == OO_New || Op == OO_Array_New) { 11347 const FunctionProtoType *Proto 11348 = FD->getType()->castAs<FunctionProtoType>(); 11349 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11350 CheckNonNullExpr(*this, RetValExp)) 11351 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11352 << FD << getLangOpts().CPlusPlus11; 11353 } 11354 } 11355 11356 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11357 // here prevent the user from using a PPC MMA type as trailing return type. 11358 if (Context.getTargetInfo().getTriple().isPPC64()) 11359 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11360 } 11361 11362 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 11363 11364 /// Check for comparisons of floating point operands using != and ==. 11365 /// Issue a warning if these are no self-comparisons, as they are not likely 11366 /// to do what the programmer intended. 11367 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 11368 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11369 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11370 11371 // Special case: check for x == x (which is OK). 11372 // Do not emit warnings for such cases. 11373 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11374 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11375 if (DRL->getDecl() == DRR->getDecl()) 11376 return; 11377 11378 // Special case: check for comparisons against literals that can be exactly 11379 // represented by APFloat. In such cases, do not emit a warning. This 11380 // is a heuristic: often comparison against such literals are used to 11381 // detect if a value in a variable has not changed. This clearly can 11382 // lead to false negatives. 11383 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11384 if (FLL->isExact()) 11385 return; 11386 } else 11387 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11388 if (FLR->isExact()) 11389 return; 11390 11391 // Check for comparisons with builtin types. 11392 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11393 if (CL->getBuiltinCallee()) 11394 return; 11395 11396 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11397 if (CR->getBuiltinCallee()) 11398 return; 11399 11400 // Emit the diagnostic. 11401 Diag(Loc, diag::warn_floatingpoint_eq) 11402 << LHS->getSourceRange() << RHS->getSourceRange(); 11403 } 11404 11405 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11406 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11407 11408 namespace { 11409 11410 /// Structure recording the 'active' range of an integer-valued 11411 /// expression. 11412 struct IntRange { 11413 /// The number of bits active in the int. Note that this includes exactly one 11414 /// sign bit if !NonNegative. 11415 unsigned Width; 11416 11417 /// True if the int is known not to have negative values. If so, all leading 11418 /// bits before Width are known zero, otherwise they are known to be the 11419 /// same as the MSB within Width. 11420 bool NonNegative; 11421 11422 IntRange(unsigned Width, bool NonNegative) 11423 : Width(Width), NonNegative(NonNegative) {} 11424 11425 /// Number of bits excluding the sign bit. 11426 unsigned valueBits() const { 11427 return NonNegative ? Width : Width - 1; 11428 } 11429 11430 /// Returns the range of the bool type. 11431 static IntRange forBoolType() { 11432 return IntRange(1, true); 11433 } 11434 11435 /// Returns the range of an opaque value of the given integral type. 11436 static IntRange forValueOfType(ASTContext &C, QualType T) { 11437 return forValueOfCanonicalType(C, 11438 T->getCanonicalTypeInternal().getTypePtr()); 11439 } 11440 11441 /// Returns the range of an opaque value of a canonical integral type. 11442 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11443 assert(T->isCanonicalUnqualified()); 11444 11445 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11446 T = VT->getElementType().getTypePtr(); 11447 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11448 T = CT->getElementType().getTypePtr(); 11449 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11450 T = AT->getValueType().getTypePtr(); 11451 11452 if (!C.getLangOpts().CPlusPlus) { 11453 // For enum types in C code, use the underlying datatype. 11454 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11455 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11456 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11457 // For enum types in C++, use the known bit width of the enumerators. 11458 EnumDecl *Enum = ET->getDecl(); 11459 // In C++11, enums can have a fixed underlying type. Use this type to 11460 // compute the range. 11461 if (Enum->isFixed()) { 11462 return IntRange(C.getIntWidth(QualType(T, 0)), 11463 !ET->isSignedIntegerOrEnumerationType()); 11464 } 11465 11466 unsigned NumPositive = Enum->getNumPositiveBits(); 11467 unsigned NumNegative = Enum->getNumNegativeBits(); 11468 11469 if (NumNegative == 0) 11470 return IntRange(NumPositive, true/*NonNegative*/); 11471 else 11472 return IntRange(std::max(NumPositive + 1, NumNegative), 11473 false/*NonNegative*/); 11474 } 11475 11476 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11477 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11478 11479 const BuiltinType *BT = cast<BuiltinType>(T); 11480 assert(BT->isInteger()); 11481 11482 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11483 } 11484 11485 /// Returns the "target" range of a canonical integral type, i.e. 11486 /// the range of values expressible in the type. 11487 /// 11488 /// This matches forValueOfCanonicalType except that enums have the 11489 /// full range of their type, not the range of their enumerators. 11490 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11491 assert(T->isCanonicalUnqualified()); 11492 11493 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11494 T = VT->getElementType().getTypePtr(); 11495 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11496 T = CT->getElementType().getTypePtr(); 11497 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11498 T = AT->getValueType().getTypePtr(); 11499 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11500 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11501 11502 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11503 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11504 11505 const BuiltinType *BT = cast<BuiltinType>(T); 11506 assert(BT->isInteger()); 11507 11508 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11509 } 11510 11511 /// Returns the supremum of two ranges: i.e. their conservative merge. 11512 static IntRange join(IntRange L, IntRange R) { 11513 bool Unsigned = L.NonNegative && R.NonNegative; 11514 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11515 L.NonNegative && R.NonNegative); 11516 } 11517 11518 /// Return the range of a bitwise-AND of the two ranges. 11519 static IntRange bit_and(IntRange L, IntRange R) { 11520 unsigned Bits = std::max(L.Width, R.Width); 11521 bool NonNegative = false; 11522 if (L.NonNegative) { 11523 Bits = std::min(Bits, L.Width); 11524 NonNegative = true; 11525 } 11526 if (R.NonNegative) { 11527 Bits = std::min(Bits, R.Width); 11528 NonNegative = true; 11529 } 11530 return IntRange(Bits, NonNegative); 11531 } 11532 11533 /// Return the range of a sum of the two ranges. 11534 static IntRange sum(IntRange L, IntRange R) { 11535 bool Unsigned = L.NonNegative && R.NonNegative; 11536 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11537 Unsigned); 11538 } 11539 11540 /// Return the range of a difference of the two ranges. 11541 static IntRange difference(IntRange L, IntRange R) { 11542 // We need a 1-bit-wider range if: 11543 // 1) LHS can be negative: least value can be reduced. 11544 // 2) RHS can be negative: greatest value can be increased. 11545 bool CanWiden = !L.NonNegative || !R.NonNegative; 11546 bool Unsigned = L.NonNegative && R.Width == 0; 11547 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11548 !Unsigned, 11549 Unsigned); 11550 } 11551 11552 /// Return the range of a product of the two ranges. 11553 static IntRange product(IntRange L, IntRange R) { 11554 // If both LHS and RHS can be negative, we can form 11555 // -2^L * -2^R = 2^(L + R) 11556 // which requires L + R + 1 value bits to represent. 11557 bool CanWiden = !L.NonNegative && !R.NonNegative; 11558 bool Unsigned = L.NonNegative && R.NonNegative; 11559 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11560 Unsigned); 11561 } 11562 11563 /// Return the range of a remainder operation between the two ranges. 11564 static IntRange rem(IntRange L, IntRange R) { 11565 // The result of a remainder can't be larger than the result of 11566 // either side. The sign of the result is the sign of the LHS. 11567 bool Unsigned = L.NonNegative; 11568 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11569 Unsigned); 11570 } 11571 }; 11572 11573 } // namespace 11574 11575 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11576 unsigned MaxWidth) { 11577 if (value.isSigned() && value.isNegative()) 11578 return IntRange(value.getMinSignedBits(), false); 11579 11580 if (value.getBitWidth() > MaxWidth) 11581 value = value.trunc(MaxWidth); 11582 11583 // isNonNegative() just checks the sign bit without considering 11584 // signedness. 11585 return IntRange(value.getActiveBits(), true); 11586 } 11587 11588 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11589 unsigned MaxWidth) { 11590 if (result.isInt()) 11591 return GetValueRange(C, result.getInt(), MaxWidth); 11592 11593 if (result.isVector()) { 11594 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11595 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11596 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11597 R = IntRange::join(R, El); 11598 } 11599 return R; 11600 } 11601 11602 if (result.isComplexInt()) { 11603 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11604 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11605 return IntRange::join(R, I); 11606 } 11607 11608 // This can happen with lossless casts to intptr_t of "based" lvalues. 11609 // Assume it might use arbitrary bits. 11610 // FIXME: The only reason we need to pass the type in here is to get 11611 // the sign right on this one case. It would be nice if APValue 11612 // preserved this. 11613 assert(result.isLValue() || result.isAddrLabelDiff()); 11614 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11615 } 11616 11617 static QualType GetExprType(const Expr *E) { 11618 QualType Ty = E->getType(); 11619 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11620 Ty = AtomicRHS->getValueType(); 11621 return Ty; 11622 } 11623 11624 /// Pseudo-evaluate the given integer expression, estimating the 11625 /// range of values it might take. 11626 /// 11627 /// \param MaxWidth The width to which the value will be truncated. 11628 /// \param Approximate If \c true, return a likely range for the result: in 11629 /// particular, assume that arithmetic on narrower types doesn't leave 11630 /// those types. If \c false, return a range including all possible 11631 /// result values. 11632 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11633 bool InConstantContext, bool Approximate) { 11634 E = E->IgnoreParens(); 11635 11636 // Try a full evaluation first. 11637 Expr::EvalResult result; 11638 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11639 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11640 11641 // I think we only want to look through implicit casts here; if the 11642 // user has an explicit widening cast, we should treat the value as 11643 // being of the new, wider type. 11644 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11645 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11646 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11647 Approximate); 11648 11649 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11650 11651 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11652 CE->getCastKind() == CK_BooleanToSignedIntegral; 11653 11654 // Assume that non-integer casts can span the full range of the type. 11655 if (!isIntegerCast) 11656 return OutputTypeRange; 11657 11658 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11659 std::min(MaxWidth, OutputTypeRange.Width), 11660 InConstantContext, Approximate); 11661 11662 // Bail out if the subexpr's range is as wide as the cast type. 11663 if (SubRange.Width >= OutputTypeRange.Width) 11664 return OutputTypeRange; 11665 11666 // Otherwise, we take the smaller width, and we're non-negative if 11667 // either the output type or the subexpr is. 11668 return IntRange(SubRange.Width, 11669 SubRange.NonNegative || OutputTypeRange.NonNegative); 11670 } 11671 11672 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11673 // If we can fold the condition, just take that operand. 11674 bool CondResult; 11675 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11676 return GetExprRange(C, 11677 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11678 MaxWidth, InConstantContext, Approximate); 11679 11680 // Otherwise, conservatively merge. 11681 // GetExprRange requires an integer expression, but a throw expression 11682 // results in a void type. 11683 Expr *E = CO->getTrueExpr(); 11684 IntRange L = E->getType()->isVoidType() 11685 ? IntRange{0, true} 11686 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11687 E = CO->getFalseExpr(); 11688 IntRange R = E->getType()->isVoidType() 11689 ? IntRange{0, true} 11690 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11691 return IntRange::join(L, R); 11692 } 11693 11694 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11695 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11696 11697 switch (BO->getOpcode()) { 11698 case BO_Cmp: 11699 llvm_unreachable("builtin <=> should have class type"); 11700 11701 // Boolean-valued operations are single-bit and positive. 11702 case BO_LAnd: 11703 case BO_LOr: 11704 case BO_LT: 11705 case BO_GT: 11706 case BO_LE: 11707 case BO_GE: 11708 case BO_EQ: 11709 case BO_NE: 11710 return IntRange::forBoolType(); 11711 11712 // The type of the assignments is the type of the LHS, so the RHS 11713 // is not necessarily the same type. 11714 case BO_MulAssign: 11715 case BO_DivAssign: 11716 case BO_RemAssign: 11717 case BO_AddAssign: 11718 case BO_SubAssign: 11719 case BO_XorAssign: 11720 case BO_OrAssign: 11721 // TODO: bitfields? 11722 return IntRange::forValueOfType(C, GetExprType(E)); 11723 11724 // Simple assignments just pass through the RHS, which will have 11725 // been coerced to the LHS type. 11726 case BO_Assign: 11727 // TODO: bitfields? 11728 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11729 Approximate); 11730 11731 // Operations with opaque sources are black-listed. 11732 case BO_PtrMemD: 11733 case BO_PtrMemI: 11734 return IntRange::forValueOfType(C, GetExprType(E)); 11735 11736 // Bitwise-and uses the *infinum* of the two source ranges. 11737 case BO_And: 11738 case BO_AndAssign: 11739 Combine = IntRange::bit_and; 11740 break; 11741 11742 // Left shift gets black-listed based on a judgement call. 11743 case BO_Shl: 11744 // ...except that we want to treat '1 << (blah)' as logically 11745 // positive. It's an important idiom. 11746 if (IntegerLiteral *I 11747 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11748 if (I->getValue() == 1) { 11749 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11750 return IntRange(R.Width, /*NonNegative*/ true); 11751 } 11752 } 11753 LLVM_FALLTHROUGH; 11754 11755 case BO_ShlAssign: 11756 return IntRange::forValueOfType(C, GetExprType(E)); 11757 11758 // Right shift by a constant can narrow its left argument. 11759 case BO_Shr: 11760 case BO_ShrAssign: { 11761 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11762 Approximate); 11763 11764 // If the shift amount is a positive constant, drop the width by 11765 // that much. 11766 if (Optional<llvm::APSInt> shift = 11767 BO->getRHS()->getIntegerConstantExpr(C)) { 11768 if (shift->isNonNegative()) { 11769 unsigned zext = shift->getZExtValue(); 11770 if (zext >= L.Width) 11771 L.Width = (L.NonNegative ? 0 : 1); 11772 else 11773 L.Width -= zext; 11774 } 11775 } 11776 11777 return L; 11778 } 11779 11780 // Comma acts as its right operand. 11781 case BO_Comma: 11782 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11783 Approximate); 11784 11785 case BO_Add: 11786 if (!Approximate) 11787 Combine = IntRange::sum; 11788 break; 11789 11790 case BO_Sub: 11791 if (BO->getLHS()->getType()->isPointerType()) 11792 return IntRange::forValueOfType(C, GetExprType(E)); 11793 if (!Approximate) 11794 Combine = IntRange::difference; 11795 break; 11796 11797 case BO_Mul: 11798 if (!Approximate) 11799 Combine = IntRange::product; 11800 break; 11801 11802 // The width of a division result is mostly determined by the size 11803 // of the LHS. 11804 case BO_Div: { 11805 // Don't 'pre-truncate' the operands. 11806 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11807 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11808 Approximate); 11809 11810 // If the divisor is constant, use that. 11811 if (Optional<llvm::APSInt> divisor = 11812 BO->getRHS()->getIntegerConstantExpr(C)) { 11813 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11814 if (log2 >= L.Width) 11815 L.Width = (L.NonNegative ? 0 : 1); 11816 else 11817 L.Width = std::min(L.Width - log2, MaxWidth); 11818 return L; 11819 } 11820 11821 // Otherwise, just use the LHS's width. 11822 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11823 // could be -1. 11824 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11825 Approximate); 11826 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11827 } 11828 11829 case BO_Rem: 11830 Combine = IntRange::rem; 11831 break; 11832 11833 // The default behavior is okay for these. 11834 case BO_Xor: 11835 case BO_Or: 11836 break; 11837 } 11838 11839 // Combine the two ranges, but limit the result to the type in which we 11840 // performed the computation. 11841 QualType T = GetExprType(E); 11842 unsigned opWidth = C.getIntWidth(T); 11843 IntRange L = 11844 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11845 IntRange R = 11846 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11847 IntRange C = Combine(L, R); 11848 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11849 C.Width = std::min(C.Width, MaxWidth); 11850 return C; 11851 } 11852 11853 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11854 switch (UO->getOpcode()) { 11855 // Boolean-valued operations are white-listed. 11856 case UO_LNot: 11857 return IntRange::forBoolType(); 11858 11859 // Operations with opaque sources are black-listed. 11860 case UO_Deref: 11861 case UO_AddrOf: // should be impossible 11862 return IntRange::forValueOfType(C, GetExprType(E)); 11863 11864 default: 11865 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11866 Approximate); 11867 } 11868 } 11869 11870 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11871 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11872 Approximate); 11873 11874 if (const auto *BitField = E->getSourceBitField()) 11875 return IntRange(BitField->getBitWidthValue(C), 11876 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11877 11878 return IntRange::forValueOfType(C, GetExprType(E)); 11879 } 11880 11881 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11882 bool InConstantContext, bool Approximate) { 11883 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11884 Approximate); 11885 } 11886 11887 /// Checks whether the given value, which currently has the given 11888 /// source semantics, has the same value when coerced through the 11889 /// target semantics. 11890 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11891 const llvm::fltSemantics &Src, 11892 const llvm::fltSemantics &Tgt) { 11893 llvm::APFloat truncated = value; 11894 11895 bool ignored; 11896 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11897 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11898 11899 return truncated.bitwiseIsEqual(value); 11900 } 11901 11902 /// Checks whether the given value, which currently has the given 11903 /// source semantics, has the same value when coerced through the 11904 /// target semantics. 11905 /// 11906 /// The value might be a vector of floats (or a complex number). 11907 static bool IsSameFloatAfterCast(const APValue &value, 11908 const llvm::fltSemantics &Src, 11909 const llvm::fltSemantics &Tgt) { 11910 if (value.isFloat()) 11911 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11912 11913 if (value.isVector()) { 11914 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11915 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11916 return false; 11917 return true; 11918 } 11919 11920 assert(value.isComplexFloat()); 11921 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11922 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11923 } 11924 11925 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11926 bool IsListInit = false); 11927 11928 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11929 // Suppress cases where we are comparing against an enum constant. 11930 if (const DeclRefExpr *DR = 11931 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11932 if (isa<EnumConstantDecl>(DR->getDecl())) 11933 return true; 11934 11935 // Suppress cases where the value is expanded from a macro, unless that macro 11936 // is how a language represents a boolean literal. This is the case in both C 11937 // and Objective-C. 11938 SourceLocation BeginLoc = E->getBeginLoc(); 11939 if (BeginLoc.isMacroID()) { 11940 StringRef MacroName = Lexer::getImmediateMacroName( 11941 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11942 return MacroName != "YES" && MacroName != "NO" && 11943 MacroName != "true" && MacroName != "false"; 11944 } 11945 11946 return false; 11947 } 11948 11949 static bool isKnownToHaveUnsignedValue(Expr *E) { 11950 return E->getType()->isIntegerType() && 11951 (!E->getType()->isSignedIntegerType() || 11952 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11953 } 11954 11955 namespace { 11956 /// The promoted range of values of a type. In general this has the 11957 /// following structure: 11958 /// 11959 /// |-----------| . . . |-----------| 11960 /// ^ ^ ^ ^ 11961 /// Min HoleMin HoleMax Max 11962 /// 11963 /// ... where there is only a hole if a signed type is promoted to unsigned 11964 /// (in which case Min and Max are the smallest and largest representable 11965 /// values). 11966 struct PromotedRange { 11967 // Min, or HoleMax if there is a hole. 11968 llvm::APSInt PromotedMin; 11969 // Max, or HoleMin if there is a hole. 11970 llvm::APSInt PromotedMax; 11971 11972 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11973 if (R.Width == 0) 11974 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11975 else if (R.Width >= BitWidth && !Unsigned) { 11976 // Promotion made the type *narrower*. This happens when promoting 11977 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11978 // Treat all values of 'signed int' as being in range for now. 11979 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11980 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11981 } else { 11982 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11983 .extOrTrunc(BitWidth); 11984 PromotedMin.setIsUnsigned(Unsigned); 11985 11986 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11987 .extOrTrunc(BitWidth); 11988 PromotedMax.setIsUnsigned(Unsigned); 11989 } 11990 } 11991 11992 // Determine whether this range is contiguous (has no hole). 11993 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11994 11995 // Where a constant value is within the range. 11996 enum ComparisonResult { 11997 LT = 0x1, 11998 LE = 0x2, 11999 GT = 0x4, 12000 GE = 0x8, 12001 EQ = 0x10, 12002 NE = 0x20, 12003 InRangeFlag = 0x40, 12004 12005 Less = LE | LT | NE, 12006 Min = LE | InRangeFlag, 12007 InRange = InRangeFlag, 12008 Max = GE | InRangeFlag, 12009 Greater = GE | GT | NE, 12010 12011 OnlyValue = LE | GE | EQ | InRangeFlag, 12012 InHole = NE 12013 }; 12014 12015 ComparisonResult compare(const llvm::APSInt &Value) const { 12016 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12017 Value.isUnsigned() == PromotedMin.isUnsigned()); 12018 if (!isContiguous()) { 12019 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12020 if (Value.isMinValue()) return Min; 12021 if (Value.isMaxValue()) return Max; 12022 if (Value >= PromotedMin) return InRange; 12023 if (Value <= PromotedMax) return InRange; 12024 return InHole; 12025 } 12026 12027 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12028 case -1: return Less; 12029 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12030 case 1: 12031 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12032 case -1: return InRange; 12033 case 0: return Max; 12034 case 1: return Greater; 12035 } 12036 } 12037 12038 llvm_unreachable("impossible compare result"); 12039 } 12040 12041 static llvm::Optional<StringRef> 12042 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12043 if (Op == BO_Cmp) { 12044 ComparisonResult LTFlag = LT, GTFlag = GT; 12045 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12046 12047 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12048 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12049 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12050 return llvm::None; 12051 } 12052 12053 ComparisonResult TrueFlag, FalseFlag; 12054 if (Op == BO_EQ) { 12055 TrueFlag = EQ; 12056 FalseFlag = NE; 12057 } else if (Op == BO_NE) { 12058 TrueFlag = NE; 12059 FalseFlag = EQ; 12060 } else { 12061 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12062 TrueFlag = LT; 12063 FalseFlag = GE; 12064 } else { 12065 TrueFlag = GT; 12066 FalseFlag = LE; 12067 } 12068 if (Op == BO_GE || Op == BO_LE) 12069 std::swap(TrueFlag, FalseFlag); 12070 } 12071 if (R & TrueFlag) 12072 return StringRef("true"); 12073 if (R & FalseFlag) 12074 return StringRef("false"); 12075 return llvm::None; 12076 } 12077 }; 12078 } 12079 12080 static bool HasEnumType(Expr *E) { 12081 // Strip off implicit integral promotions. 12082 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12083 if (ICE->getCastKind() != CK_IntegralCast && 12084 ICE->getCastKind() != CK_NoOp) 12085 break; 12086 E = ICE->getSubExpr(); 12087 } 12088 12089 return E->getType()->isEnumeralType(); 12090 } 12091 12092 static int classifyConstantValue(Expr *Constant) { 12093 // The values of this enumeration are used in the diagnostics 12094 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12095 enum ConstantValueKind { 12096 Miscellaneous = 0, 12097 LiteralTrue, 12098 LiteralFalse 12099 }; 12100 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12101 return BL->getValue() ? ConstantValueKind::LiteralTrue 12102 : ConstantValueKind::LiteralFalse; 12103 return ConstantValueKind::Miscellaneous; 12104 } 12105 12106 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12107 Expr *Constant, Expr *Other, 12108 const llvm::APSInt &Value, 12109 bool RhsConstant) { 12110 if (S.inTemplateInstantiation()) 12111 return false; 12112 12113 Expr *OriginalOther = Other; 12114 12115 Constant = Constant->IgnoreParenImpCasts(); 12116 Other = Other->IgnoreParenImpCasts(); 12117 12118 // Suppress warnings on tautological comparisons between values of the same 12119 // enumeration type. There are only two ways we could warn on this: 12120 // - If the constant is outside the range of representable values of 12121 // the enumeration. In such a case, we should warn about the cast 12122 // to enumeration type, not about the comparison. 12123 // - If the constant is the maximum / minimum in-range value. For an 12124 // enumeratin type, such comparisons can be meaningful and useful. 12125 if (Constant->getType()->isEnumeralType() && 12126 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12127 return false; 12128 12129 IntRange OtherValueRange = GetExprRange( 12130 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12131 12132 QualType OtherT = Other->getType(); 12133 if (const auto *AT = OtherT->getAs<AtomicType>()) 12134 OtherT = AT->getValueType(); 12135 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12136 12137 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12138 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12139 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12140 S.NSAPIObj->isObjCBOOLType(OtherT) && 12141 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12142 12143 // Whether we're treating Other as being a bool because of the form of 12144 // expression despite it having another type (typically 'int' in C). 12145 bool OtherIsBooleanDespiteType = 12146 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12147 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12148 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12149 12150 // Check if all values in the range of possible values of this expression 12151 // lead to the same comparison outcome. 12152 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12153 Value.isUnsigned()); 12154 auto Cmp = OtherPromotedValueRange.compare(Value); 12155 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12156 if (!Result) 12157 return false; 12158 12159 // Also consider the range determined by the type alone. This allows us to 12160 // classify the warning under the proper diagnostic group. 12161 bool TautologicalTypeCompare = false; 12162 { 12163 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12164 Value.isUnsigned()); 12165 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12166 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12167 RhsConstant)) { 12168 TautologicalTypeCompare = true; 12169 Cmp = TypeCmp; 12170 Result = TypeResult; 12171 } 12172 } 12173 12174 // Don't warn if the non-constant operand actually always evaluates to the 12175 // same value. 12176 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12177 return false; 12178 12179 // Suppress the diagnostic for an in-range comparison if the constant comes 12180 // from a macro or enumerator. We don't want to diagnose 12181 // 12182 // some_long_value <= INT_MAX 12183 // 12184 // when sizeof(int) == sizeof(long). 12185 bool InRange = Cmp & PromotedRange::InRangeFlag; 12186 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12187 return false; 12188 12189 // A comparison of an unsigned bit-field against 0 is really a type problem, 12190 // even though at the type level the bit-field might promote to 'signed int'. 12191 if (Other->refersToBitField() && InRange && Value == 0 && 12192 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12193 TautologicalTypeCompare = true; 12194 12195 // If this is a comparison to an enum constant, include that 12196 // constant in the diagnostic. 12197 const EnumConstantDecl *ED = nullptr; 12198 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12199 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12200 12201 // Should be enough for uint128 (39 decimal digits) 12202 SmallString<64> PrettySourceValue; 12203 llvm::raw_svector_ostream OS(PrettySourceValue); 12204 if (ED) { 12205 OS << '\'' << *ED << "' (" << Value << ")"; 12206 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12207 Constant->IgnoreParenImpCasts())) { 12208 OS << (BL->getValue() ? "YES" : "NO"); 12209 } else { 12210 OS << Value; 12211 } 12212 12213 if (!TautologicalTypeCompare) { 12214 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12215 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12216 << E->getOpcodeStr() << OS.str() << *Result 12217 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12218 return true; 12219 } 12220 12221 if (IsObjCSignedCharBool) { 12222 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12223 S.PDiag(diag::warn_tautological_compare_objc_bool) 12224 << OS.str() << *Result); 12225 return true; 12226 } 12227 12228 // FIXME: We use a somewhat different formatting for the in-range cases and 12229 // cases involving boolean values for historical reasons. We should pick a 12230 // consistent way of presenting these diagnostics. 12231 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12232 12233 S.DiagRuntimeBehavior( 12234 E->getOperatorLoc(), E, 12235 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12236 : diag::warn_tautological_bool_compare) 12237 << OS.str() << classifyConstantValue(Constant) << OtherT 12238 << OtherIsBooleanDespiteType << *Result 12239 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12240 } else { 12241 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12242 unsigned Diag = 12243 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12244 ? (HasEnumType(OriginalOther) 12245 ? diag::warn_unsigned_enum_always_true_comparison 12246 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12247 : diag::warn_unsigned_always_true_comparison) 12248 : diag::warn_tautological_constant_compare; 12249 12250 S.Diag(E->getOperatorLoc(), Diag) 12251 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12252 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12253 } 12254 12255 return true; 12256 } 12257 12258 /// Analyze the operands of the given comparison. Implements the 12259 /// fallback case from AnalyzeComparison. 12260 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12261 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12262 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12263 } 12264 12265 /// Implements -Wsign-compare. 12266 /// 12267 /// \param E the binary operator to check for warnings 12268 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12269 // The type the comparison is being performed in. 12270 QualType T = E->getLHS()->getType(); 12271 12272 // Only analyze comparison operators where both sides have been converted to 12273 // the same type. 12274 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12275 return AnalyzeImpConvsInComparison(S, E); 12276 12277 // Don't analyze value-dependent comparisons directly. 12278 if (E->isValueDependent()) 12279 return AnalyzeImpConvsInComparison(S, E); 12280 12281 Expr *LHS = E->getLHS(); 12282 Expr *RHS = E->getRHS(); 12283 12284 if (T->isIntegralType(S.Context)) { 12285 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12286 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12287 12288 // We don't care about expressions whose result is a constant. 12289 if (RHSValue && LHSValue) 12290 return AnalyzeImpConvsInComparison(S, E); 12291 12292 // We only care about expressions where just one side is literal 12293 if ((bool)RHSValue ^ (bool)LHSValue) { 12294 // Is the constant on the RHS or LHS? 12295 const bool RhsConstant = (bool)RHSValue; 12296 Expr *Const = RhsConstant ? RHS : LHS; 12297 Expr *Other = RhsConstant ? LHS : RHS; 12298 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12299 12300 // Check whether an integer constant comparison results in a value 12301 // of 'true' or 'false'. 12302 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12303 return AnalyzeImpConvsInComparison(S, E); 12304 } 12305 } 12306 12307 if (!T->hasUnsignedIntegerRepresentation()) { 12308 // We don't do anything special if this isn't an unsigned integral 12309 // comparison: we're only interested in integral comparisons, and 12310 // signed comparisons only happen in cases we don't care to warn about. 12311 return AnalyzeImpConvsInComparison(S, E); 12312 } 12313 12314 LHS = LHS->IgnoreParenImpCasts(); 12315 RHS = RHS->IgnoreParenImpCasts(); 12316 12317 if (!S.getLangOpts().CPlusPlus) { 12318 // Avoid warning about comparison of integers with different signs when 12319 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12320 // the type of `E`. 12321 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12322 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12323 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12324 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12325 } 12326 12327 // Check to see if one of the (unmodified) operands is of different 12328 // signedness. 12329 Expr *signedOperand, *unsignedOperand; 12330 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12331 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12332 "unsigned comparison between two signed integer expressions?"); 12333 signedOperand = LHS; 12334 unsignedOperand = RHS; 12335 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12336 signedOperand = RHS; 12337 unsignedOperand = LHS; 12338 } else { 12339 return AnalyzeImpConvsInComparison(S, E); 12340 } 12341 12342 // Otherwise, calculate the effective range of the signed operand. 12343 IntRange signedRange = GetExprRange( 12344 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12345 12346 // Go ahead and analyze implicit conversions in the operands. Note 12347 // that we skip the implicit conversions on both sides. 12348 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12349 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12350 12351 // If the signed range is non-negative, -Wsign-compare won't fire. 12352 if (signedRange.NonNegative) 12353 return; 12354 12355 // For (in)equality comparisons, if the unsigned operand is a 12356 // constant which cannot collide with a overflowed signed operand, 12357 // then reinterpreting the signed operand as unsigned will not 12358 // change the result of the comparison. 12359 if (E->isEqualityOp()) { 12360 unsigned comparisonWidth = S.Context.getIntWidth(T); 12361 IntRange unsignedRange = 12362 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12363 /*Approximate*/ true); 12364 12365 // We should never be unable to prove that the unsigned operand is 12366 // non-negative. 12367 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12368 12369 if (unsignedRange.Width < comparisonWidth) 12370 return; 12371 } 12372 12373 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12374 S.PDiag(diag::warn_mixed_sign_comparison) 12375 << LHS->getType() << RHS->getType() 12376 << LHS->getSourceRange() << RHS->getSourceRange()); 12377 } 12378 12379 /// Analyzes an attempt to assign the given value to a bitfield. 12380 /// 12381 /// Returns true if there was something fishy about the attempt. 12382 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12383 SourceLocation InitLoc) { 12384 assert(Bitfield->isBitField()); 12385 if (Bitfield->isInvalidDecl()) 12386 return false; 12387 12388 // White-list bool bitfields. 12389 QualType BitfieldType = Bitfield->getType(); 12390 if (BitfieldType->isBooleanType()) 12391 return false; 12392 12393 if (BitfieldType->isEnumeralType()) { 12394 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12395 // If the underlying enum type was not explicitly specified as an unsigned 12396 // type and the enum contain only positive values, MSVC++ will cause an 12397 // inconsistency by storing this as a signed type. 12398 if (S.getLangOpts().CPlusPlus11 && 12399 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12400 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12401 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12402 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12403 << BitfieldEnumDecl; 12404 } 12405 } 12406 12407 if (Bitfield->getType()->isBooleanType()) 12408 return false; 12409 12410 // Ignore value- or type-dependent expressions. 12411 if (Bitfield->getBitWidth()->isValueDependent() || 12412 Bitfield->getBitWidth()->isTypeDependent() || 12413 Init->isValueDependent() || 12414 Init->isTypeDependent()) 12415 return false; 12416 12417 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12418 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12419 12420 Expr::EvalResult Result; 12421 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12422 Expr::SE_AllowSideEffects)) { 12423 // The RHS is not constant. If the RHS has an enum type, make sure the 12424 // bitfield is wide enough to hold all the values of the enum without 12425 // truncation. 12426 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12427 EnumDecl *ED = EnumTy->getDecl(); 12428 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12429 12430 // Enum types are implicitly signed on Windows, so check if there are any 12431 // negative enumerators to see if the enum was intended to be signed or 12432 // not. 12433 bool SignedEnum = ED->getNumNegativeBits() > 0; 12434 12435 // Check for surprising sign changes when assigning enum values to a 12436 // bitfield of different signedness. If the bitfield is signed and we 12437 // have exactly the right number of bits to store this unsigned enum, 12438 // suggest changing the enum to an unsigned type. This typically happens 12439 // on Windows where unfixed enums always use an underlying type of 'int'. 12440 unsigned DiagID = 0; 12441 if (SignedEnum && !SignedBitfield) { 12442 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12443 } else if (SignedBitfield && !SignedEnum && 12444 ED->getNumPositiveBits() == FieldWidth) { 12445 DiagID = diag::warn_signed_bitfield_enum_conversion; 12446 } 12447 12448 if (DiagID) { 12449 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12450 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12451 SourceRange TypeRange = 12452 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12453 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12454 << SignedEnum << TypeRange; 12455 } 12456 12457 // Compute the required bitwidth. If the enum has negative values, we need 12458 // one more bit than the normal number of positive bits to represent the 12459 // sign bit. 12460 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12461 ED->getNumNegativeBits()) 12462 : ED->getNumPositiveBits(); 12463 12464 // Check the bitwidth. 12465 if (BitsNeeded > FieldWidth) { 12466 Expr *WidthExpr = Bitfield->getBitWidth(); 12467 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12468 << Bitfield << ED; 12469 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12470 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12471 } 12472 } 12473 12474 return false; 12475 } 12476 12477 llvm::APSInt Value = Result.Val.getInt(); 12478 12479 unsigned OriginalWidth = Value.getBitWidth(); 12480 12481 if (!Value.isSigned() || Value.isNegative()) 12482 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12483 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12484 OriginalWidth = Value.getMinSignedBits(); 12485 12486 if (OriginalWidth <= FieldWidth) 12487 return false; 12488 12489 // Compute the value which the bitfield will contain. 12490 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12491 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12492 12493 // Check whether the stored value is equal to the original value. 12494 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12495 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12496 return false; 12497 12498 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12499 // therefore don't strictly fit into a signed bitfield of width 1. 12500 if (FieldWidth == 1 && Value == 1) 12501 return false; 12502 12503 std::string PrettyValue = toString(Value, 10); 12504 std::string PrettyTrunc = toString(TruncatedValue, 10); 12505 12506 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12507 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12508 << Init->getSourceRange(); 12509 12510 return true; 12511 } 12512 12513 /// Analyze the given simple or compound assignment for warning-worthy 12514 /// operations. 12515 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12516 // Just recurse on the LHS. 12517 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12518 12519 // We want to recurse on the RHS as normal unless we're assigning to 12520 // a bitfield. 12521 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12522 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12523 E->getOperatorLoc())) { 12524 // Recurse, ignoring any implicit conversions on the RHS. 12525 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12526 E->getOperatorLoc()); 12527 } 12528 } 12529 12530 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12531 12532 // Diagnose implicitly sequentially-consistent atomic assignment. 12533 if (E->getLHS()->getType()->isAtomicType()) 12534 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12535 } 12536 12537 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12538 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12539 SourceLocation CContext, unsigned diag, 12540 bool pruneControlFlow = false) { 12541 if (pruneControlFlow) { 12542 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12543 S.PDiag(diag) 12544 << SourceType << T << E->getSourceRange() 12545 << SourceRange(CContext)); 12546 return; 12547 } 12548 S.Diag(E->getExprLoc(), diag) 12549 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12550 } 12551 12552 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12553 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12554 SourceLocation CContext, 12555 unsigned diag, bool pruneControlFlow = false) { 12556 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12557 } 12558 12559 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12560 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12561 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12562 } 12563 12564 static void adornObjCBoolConversionDiagWithTernaryFixit( 12565 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12566 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12567 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12568 Ignored = OVE->getSourceExpr(); 12569 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12570 isa<BinaryOperator>(Ignored) || 12571 isa<CXXOperatorCallExpr>(Ignored); 12572 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12573 if (NeedsParens) 12574 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12575 << FixItHint::CreateInsertion(EndLoc, ")"); 12576 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12577 } 12578 12579 /// Diagnose an implicit cast from a floating point value to an integer value. 12580 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12581 SourceLocation CContext) { 12582 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12583 const bool PruneWarnings = S.inTemplateInstantiation(); 12584 12585 Expr *InnerE = E->IgnoreParenImpCasts(); 12586 // We also want to warn on, e.g., "int i = -1.234" 12587 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12588 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12589 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12590 12591 const bool IsLiteral = 12592 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12593 12594 llvm::APFloat Value(0.0); 12595 bool IsConstant = 12596 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12597 if (!IsConstant) { 12598 if (isObjCSignedCharBool(S, T)) { 12599 return adornObjCBoolConversionDiagWithTernaryFixit( 12600 S, E, 12601 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12602 << E->getType()); 12603 } 12604 12605 return DiagnoseImpCast(S, E, T, CContext, 12606 diag::warn_impcast_float_integer, PruneWarnings); 12607 } 12608 12609 bool isExact = false; 12610 12611 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12612 T->hasUnsignedIntegerRepresentation()); 12613 llvm::APFloat::opStatus Result = Value.convertToInteger( 12614 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12615 12616 // FIXME: Force the precision of the source value down so we don't print 12617 // digits which are usually useless (we don't really care here if we 12618 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12619 // would automatically print the shortest representation, but it's a bit 12620 // tricky to implement. 12621 SmallString<16> PrettySourceValue; 12622 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12623 precision = (precision * 59 + 195) / 196; 12624 Value.toString(PrettySourceValue, precision); 12625 12626 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12627 return adornObjCBoolConversionDiagWithTernaryFixit( 12628 S, E, 12629 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12630 << PrettySourceValue); 12631 } 12632 12633 if (Result == llvm::APFloat::opOK && isExact) { 12634 if (IsLiteral) return; 12635 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12636 PruneWarnings); 12637 } 12638 12639 // Conversion of a floating-point value to a non-bool integer where the 12640 // integral part cannot be represented by the integer type is undefined. 12641 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12642 return DiagnoseImpCast( 12643 S, E, T, CContext, 12644 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12645 : diag::warn_impcast_float_to_integer_out_of_range, 12646 PruneWarnings); 12647 12648 unsigned DiagID = 0; 12649 if (IsLiteral) { 12650 // Warn on floating point literal to integer. 12651 DiagID = diag::warn_impcast_literal_float_to_integer; 12652 } else if (IntegerValue == 0) { 12653 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12654 return DiagnoseImpCast(S, E, T, CContext, 12655 diag::warn_impcast_float_integer, PruneWarnings); 12656 } 12657 // Warn on non-zero to zero conversion. 12658 DiagID = diag::warn_impcast_float_to_integer_zero; 12659 } else { 12660 if (IntegerValue.isUnsigned()) { 12661 if (!IntegerValue.isMaxValue()) { 12662 return DiagnoseImpCast(S, E, T, CContext, 12663 diag::warn_impcast_float_integer, PruneWarnings); 12664 } 12665 } else { // IntegerValue.isSigned() 12666 if (!IntegerValue.isMaxSignedValue() && 12667 !IntegerValue.isMinSignedValue()) { 12668 return DiagnoseImpCast(S, E, T, CContext, 12669 diag::warn_impcast_float_integer, PruneWarnings); 12670 } 12671 } 12672 // Warn on evaluatable floating point expression to integer conversion. 12673 DiagID = diag::warn_impcast_float_to_integer; 12674 } 12675 12676 SmallString<16> PrettyTargetValue; 12677 if (IsBool) 12678 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12679 else 12680 IntegerValue.toString(PrettyTargetValue); 12681 12682 if (PruneWarnings) { 12683 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12684 S.PDiag(DiagID) 12685 << E->getType() << T.getUnqualifiedType() 12686 << PrettySourceValue << PrettyTargetValue 12687 << E->getSourceRange() << SourceRange(CContext)); 12688 } else { 12689 S.Diag(E->getExprLoc(), DiagID) 12690 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12691 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12692 } 12693 } 12694 12695 /// Analyze the given compound assignment for the possible losing of 12696 /// floating-point precision. 12697 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12698 assert(isa<CompoundAssignOperator>(E) && 12699 "Must be compound assignment operation"); 12700 // Recurse on the LHS and RHS in here 12701 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12702 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12703 12704 if (E->getLHS()->getType()->isAtomicType()) 12705 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12706 12707 // Now check the outermost expression 12708 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12709 const auto *RBT = cast<CompoundAssignOperator>(E) 12710 ->getComputationResultType() 12711 ->getAs<BuiltinType>(); 12712 12713 // The below checks assume source is floating point. 12714 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12715 12716 // If source is floating point but target is an integer. 12717 if (ResultBT->isInteger()) 12718 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12719 E->getExprLoc(), diag::warn_impcast_float_integer); 12720 12721 if (!ResultBT->isFloatingPoint()) 12722 return; 12723 12724 // If both source and target are floating points, warn about losing precision. 12725 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12726 QualType(ResultBT, 0), QualType(RBT, 0)); 12727 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12728 // warn about dropping FP rank. 12729 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12730 diag::warn_impcast_float_result_precision); 12731 } 12732 12733 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12734 IntRange Range) { 12735 if (!Range.Width) return "0"; 12736 12737 llvm::APSInt ValueInRange = Value; 12738 ValueInRange.setIsSigned(!Range.NonNegative); 12739 ValueInRange = ValueInRange.trunc(Range.Width); 12740 return toString(ValueInRange, 10); 12741 } 12742 12743 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12744 if (!isa<ImplicitCastExpr>(Ex)) 12745 return false; 12746 12747 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12748 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12749 const Type *Source = 12750 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12751 if (Target->isDependentType()) 12752 return false; 12753 12754 const BuiltinType *FloatCandidateBT = 12755 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12756 const Type *BoolCandidateType = ToBool ? Target : Source; 12757 12758 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12759 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12760 } 12761 12762 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12763 SourceLocation CC) { 12764 unsigned NumArgs = TheCall->getNumArgs(); 12765 for (unsigned i = 0; i < NumArgs; ++i) { 12766 Expr *CurrA = TheCall->getArg(i); 12767 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12768 continue; 12769 12770 bool IsSwapped = ((i > 0) && 12771 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12772 IsSwapped |= ((i < (NumArgs - 1)) && 12773 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12774 if (IsSwapped) { 12775 // Warn on this floating-point to bool conversion. 12776 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12777 CurrA->getType(), CC, 12778 diag::warn_impcast_floating_point_to_bool); 12779 } 12780 } 12781 } 12782 12783 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12784 SourceLocation CC) { 12785 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12786 E->getExprLoc())) 12787 return; 12788 12789 // Don't warn on functions which have return type nullptr_t. 12790 if (isa<CallExpr>(E)) 12791 return; 12792 12793 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12794 const Expr::NullPointerConstantKind NullKind = 12795 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12796 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12797 return; 12798 12799 // Return if target type is a safe conversion. 12800 if (T->isAnyPointerType() || T->isBlockPointerType() || 12801 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12802 return; 12803 12804 SourceLocation Loc = E->getSourceRange().getBegin(); 12805 12806 // Venture through the macro stacks to get to the source of macro arguments. 12807 // The new location is a better location than the complete location that was 12808 // passed in. 12809 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12810 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12811 12812 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12813 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12814 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12815 Loc, S.SourceMgr, S.getLangOpts()); 12816 if (MacroName == "NULL") 12817 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12818 } 12819 12820 // Only warn if the null and context location are in the same macro expansion. 12821 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12822 return; 12823 12824 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12825 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12826 << FixItHint::CreateReplacement(Loc, 12827 S.getFixItZeroLiteralForType(T, Loc)); 12828 } 12829 12830 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12831 ObjCArrayLiteral *ArrayLiteral); 12832 12833 static void 12834 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12835 ObjCDictionaryLiteral *DictionaryLiteral); 12836 12837 /// Check a single element within a collection literal against the 12838 /// target element type. 12839 static void checkObjCCollectionLiteralElement(Sema &S, 12840 QualType TargetElementType, 12841 Expr *Element, 12842 unsigned ElementKind) { 12843 // Skip a bitcast to 'id' or qualified 'id'. 12844 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12845 if (ICE->getCastKind() == CK_BitCast && 12846 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12847 Element = ICE->getSubExpr(); 12848 } 12849 12850 QualType ElementType = Element->getType(); 12851 ExprResult ElementResult(Element); 12852 if (ElementType->getAs<ObjCObjectPointerType>() && 12853 S.CheckSingleAssignmentConstraints(TargetElementType, 12854 ElementResult, 12855 false, false) 12856 != Sema::Compatible) { 12857 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12858 << ElementType << ElementKind << TargetElementType 12859 << Element->getSourceRange(); 12860 } 12861 12862 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12863 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12864 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12865 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12866 } 12867 12868 /// Check an Objective-C array literal being converted to the given 12869 /// target type. 12870 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12871 ObjCArrayLiteral *ArrayLiteral) { 12872 if (!S.NSArrayDecl) 12873 return; 12874 12875 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12876 if (!TargetObjCPtr) 12877 return; 12878 12879 if (TargetObjCPtr->isUnspecialized() || 12880 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12881 != S.NSArrayDecl->getCanonicalDecl()) 12882 return; 12883 12884 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12885 if (TypeArgs.size() != 1) 12886 return; 12887 12888 QualType TargetElementType = TypeArgs[0]; 12889 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12890 checkObjCCollectionLiteralElement(S, TargetElementType, 12891 ArrayLiteral->getElement(I), 12892 0); 12893 } 12894 } 12895 12896 /// Check an Objective-C dictionary literal being converted to the given 12897 /// target type. 12898 static void 12899 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12900 ObjCDictionaryLiteral *DictionaryLiteral) { 12901 if (!S.NSDictionaryDecl) 12902 return; 12903 12904 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12905 if (!TargetObjCPtr) 12906 return; 12907 12908 if (TargetObjCPtr->isUnspecialized() || 12909 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12910 != S.NSDictionaryDecl->getCanonicalDecl()) 12911 return; 12912 12913 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12914 if (TypeArgs.size() != 2) 12915 return; 12916 12917 QualType TargetKeyType = TypeArgs[0]; 12918 QualType TargetObjectType = TypeArgs[1]; 12919 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12920 auto Element = DictionaryLiteral->getKeyValueElement(I); 12921 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12922 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12923 } 12924 } 12925 12926 // Helper function to filter out cases for constant width constant conversion. 12927 // Don't warn on char array initialization or for non-decimal values. 12928 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12929 SourceLocation CC) { 12930 // If initializing from a constant, and the constant starts with '0', 12931 // then it is a binary, octal, or hexadecimal. Allow these constants 12932 // to fill all the bits, even if there is a sign change. 12933 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12934 const char FirstLiteralCharacter = 12935 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12936 if (FirstLiteralCharacter == '0') 12937 return false; 12938 } 12939 12940 // If the CC location points to a '{', and the type is char, then assume 12941 // assume it is an array initialization. 12942 if (CC.isValid() && T->isCharType()) { 12943 const char FirstContextCharacter = 12944 S.getSourceManager().getCharacterData(CC)[0]; 12945 if (FirstContextCharacter == '{') 12946 return false; 12947 } 12948 12949 return true; 12950 } 12951 12952 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12953 const auto *IL = dyn_cast<IntegerLiteral>(E); 12954 if (!IL) { 12955 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12956 if (UO->getOpcode() == UO_Minus) 12957 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12958 } 12959 } 12960 12961 return IL; 12962 } 12963 12964 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12965 E = E->IgnoreParenImpCasts(); 12966 SourceLocation ExprLoc = E->getExprLoc(); 12967 12968 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12969 BinaryOperator::Opcode Opc = BO->getOpcode(); 12970 Expr::EvalResult Result; 12971 // Do not diagnose unsigned shifts. 12972 if (Opc == BO_Shl) { 12973 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12974 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12975 if (LHS && LHS->getValue() == 0) 12976 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12977 else if (!E->isValueDependent() && LHS && RHS && 12978 RHS->getValue().isNonNegative() && 12979 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12980 S.Diag(ExprLoc, diag::warn_left_shift_always) 12981 << (Result.Val.getInt() != 0); 12982 else if (E->getType()->isSignedIntegerType()) 12983 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12984 } 12985 } 12986 12987 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12988 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12989 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12990 if (!LHS || !RHS) 12991 return; 12992 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12993 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12994 // Do not diagnose common idioms. 12995 return; 12996 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12997 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12998 } 12999 } 13000 13001 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13002 SourceLocation CC, 13003 bool *ICContext = nullptr, 13004 bool IsListInit = false) { 13005 if (E->isTypeDependent() || E->isValueDependent()) return; 13006 13007 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13008 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13009 if (Source == Target) return; 13010 if (Target->isDependentType()) return; 13011 13012 // If the conversion context location is invalid don't complain. We also 13013 // don't want to emit a warning if the issue occurs from the expansion of 13014 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13015 // delay this check as long as possible. Once we detect we are in that 13016 // scenario, we just return. 13017 if (CC.isInvalid()) 13018 return; 13019 13020 if (Source->isAtomicType()) 13021 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13022 13023 // Diagnose implicit casts to bool. 13024 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13025 if (isa<StringLiteral>(E)) 13026 // Warn on string literal to bool. Checks for string literals in logical 13027 // and expressions, for instance, assert(0 && "error here"), are 13028 // prevented by a check in AnalyzeImplicitConversions(). 13029 return DiagnoseImpCast(S, E, T, CC, 13030 diag::warn_impcast_string_literal_to_bool); 13031 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13032 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13033 // This covers the literal expressions that evaluate to Objective-C 13034 // objects. 13035 return DiagnoseImpCast(S, E, T, CC, 13036 diag::warn_impcast_objective_c_literal_to_bool); 13037 } 13038 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13039 // Warn on pointer to bool conversion that is always true. 13040 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13041 SourceRange(CC)); 13042 } 13043 } 13044 13045 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13046 // is a typedef for signed char (macOS), then that constant value has to be 1 13047 // or 0. 13048 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13049 Expr::EvalResult Result; 13050 if (E->EvaluateAsInt(Result, S.getASTContext(), 13051 Expr::SE_AllowSideEffects)) { 13052 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13053 adornObjCBoolConversionDiagWithTernaryFixit( 13054 S, E, 13055 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13056 << toString(Result.Val.getInt(), 10)); 13057 } 13058 return; 13059 } 13060 } 13061 13062 // Check implicit casts from Objective-C collection literals to specialized 13063 // collection types, e.g., NSArray<NSString *> *. 13064 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13065 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13066 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13067 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13068 13069 // Strip vector types. 13070 if (isa<VectorType>(Source)) { 13071 if (Target->isVLSTBuiltinType() && 13072 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13073 QualType(Source, 0)) || 13074 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13075 QualType(Source, 0)))) 13076 return; 13077 13078 if (!isa<VectorType>(Target)) { 13079 if (S.SourceMgr.isInSystemMacro(CC)) 13080 return; 13081 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13082 } 13083 13084 // If the vector cast is cast between two vectors of the same size, it is 13085 // a bitcast, not a conversion. 13086 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13087 return; 13088 13089 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13090 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13091 } 13092 if (auto VecTy = dyn_cast<VectorType>(Target)) 13093 Target = VecTy->getElementType().getTypePtr(); 13094 13095 // Strip complex types. 13096 if (isa<ComplexType>(Source)) { 13097 if (!isa<ComplexType>(Target)) { 13098 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13099 return; 13100 13101 return DiagnoseImpCast(S, E, T, CC, 13102 S.getLangOpts().CPlusPlus 13103 ? diag::err_impcast_complex_scalar 13104 : diag::warn_impcast_complex_scalar); 13105 } 13106 13107 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13108 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13109 } 13110 13111 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13112 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13113 13114 // If the source is floating point... 13115 if (SourceBT && SourceBT->isFloatingPoint()) { 13116 // ...and the target is floating point... 13117 if (TargetBT && TargetBT->isFloatingPoint()) { 13118 // ...then warn if we're dropping FP rank. 13119 13120 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13121 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13122 if (Order > 0) { 13123 // Don't warn about float constants that are precisely 13124 // representable in the target type. 13125 Expr::EvalResult result; 13126 if (E->EvaluateAsRValue(result, S.Context)) { 13127 // Value might be a float, a float vector, or a float complex. 13128 if (IsSameFloatAfterCast(result.Val, 13129 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13130 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13131 return; 13132 } 13133 13134 if (S.SourceMgr.isInSystemMacro(CC)) 13135 return; 13136 13137 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13138 } 13139 // ... or possibly if we're increasing rank, too 13140 else if (Order < 0) { 13141 if (S.SourceMgr.isInSystemMacro(CC)) 13142 return; 13143 13144 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13145 } 13146 return; 13147 } 13148 13149 // If the target is integral, always warn. 13150 if (TargetBT && TargetBT->isInteger()) { 13151 if (S.SourceMgr.isInSystemMacro(CC)) 13152 return; 13153 13154 DiagnoseFloatingImpCast(S, E, T, CC); 13155 } 13156 13157 // Detect the case where a call result is converted from floating-point to 13158 // to bool, and the final argument to the call is converted from bool, to 13159 // discover this typo: 13160 // 13161 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13162 // 13163 // FIXME: This is an incredibly special case; is there some more general 13164 // way to detect this class of misplaced-parentheses bug? 13165 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13166 // Check last argument of function call to see if it is an 13167 // implicit cast from a type matching the type the result 13168 // is being cast to. 13169 CallExpr *CEx = cast<CallExpr>(E); 13170 if (unsigned NumArgs = CEx->getNumArgs()) { 13171 Expr *LastA = CEx->getArg(NumArgs - 1); 13172 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13173 if (isa<ImplicitCastExpr>(LastA) && 13174 InnerE->getType()->isBooleanType()) { 13175 // Warn on this floating-point to bool conversion 13176 DiagnoseImpCast(S, E, T, CC, 13177 diag::warn_impcast_floating_point_to_bool); 13178 } 13179 } 13180 } 13181 return; 13182 } 13183 13184 // Valid casts involving fixed point types should be accounted for here. 13185 if (Source->isFixedPointType()) { 13186 if (Target->isUnsaturatedFixedPointType()) { 13187 Expr::EvalResult Result; 13188 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13189 S.isConstantEvaluated())) { 13190 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13191 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13192 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13193 if (Value > MaxVal || Value < MinVal) { 13194 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13195 S.PDiag(diag::warn_impcast_fixed_point_range) 13196 << Value.toString() << T 13197 << E->getSourceRange() 13198 << clang::SourceRange(CC)); 13199 return; 13200 } 13201 } 13202 } else if (Target->isIntegerType()) { 13203 Expr::EvalResult Result; 13204 if (!S.isConstantEvaluated() && 13205 E->EvaluateAsFixedPoint(Result, S.Context, 13206 Expr::SE_AllowSideEffects)) { 13207 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13208 13209 bool Overflowed; 13210 llvm::APSInt IntResult = FXResult.convertToInt( 13211 S.Context.getIntWidth(T), 13212 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13213 13214 if (Overflowed) { 13215 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13216 S.PDiag(diag::warn_impcast_fixed_point_range) 13217 << FXResult.toString() << T 13218 << E->getSourceRange() 13219 << clang::SourceRange(CC)); 13220 return; 13221 } 13222 } 13223 } 13224 } else if (Target->isUnsaturatedFixedPointType()) { 13225 if (Source->isIntegerType()) { 13226 Expr::EvalResult Result; 13227 if (!S.isConstantEvaluated() && 13228 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13229 llvm::APSInt Value = Result.Val.getInt(); 13230 13231 bool Overflowed; 13232 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13233 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13234 13235 if (Overflowed) { 13236 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13237 S.PDiag(diag::warn_impcast_fixed_point_range) 13238 << toString(Value, /*Radix=*/10) << T 13239 << E->getSourceRange() 13240 << clang::SourceRange(CC)); 13241 return; 13242 } 13243 } 13244 } 13245 } 13246 13247 // If we are casting an integer type to a floating point type without 13248 // initialization-list syntax, we might lose accuracy if the floating 13249 // point type has a narrower significand than the integer type. 13250 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13251 TargetBT->isFloatingType() && !IsListInit) { 13252 // Determine the number of precision bits in the source integer type. 13253 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13254 /*Approximate*/ true); 13255 unsigned int SourcePrecision = SourceRange.Width; 13256 13257 // Determine the number of precision bits in the 13258 // target floating point type. 13259 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13260 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13261 13262 if (SourcePrecision > 0 && TargetPrecision > 0 && 13263 SourcePrecision > TargetPrecision) { 13264 13265 if (Optional<llvm::APSInt> SourceInt = 13266 E->getIntegerConstantExpr(S.Context)) { 13267 // If the source integer is a constant, convert it to the target 13268 // floating point type. Issue a warning if the value changes 13269 // during the whole conversion. 13270 llvm::APFloat TargetFloatValue( 13271 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13272 llvm::APFloat::opStatus ConversionStatus = 13273 TargetFloatValue.convertFromAPInt( 13274 *SourceInt, SourceBT->isSignedInteger(), 13275 llvm::APFloat::rmNearestTiesToEven); 13276 13277 if (ConversionStatus != llvm::APFloat::opOK) { 13278 SmallString<32> PrettySourceValue; 13279 SourceInt->toString(PrettySourceValue, 10); 13280 SmallString<32> PrettyTargetValue; 13281 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13282 13283 S.DiagRuntimeBehavior( 13284 E->getExprLoc(), E, 13285 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13286 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13287 << E->getSourceRange() << clang::SourceRange(CC)); 13288 } 13289 } else { 13290 // Otherwise, the implicit conversion may lose precision. 13291 DiagnoseImpCast(S, E, T, CC, 13292 diag::warn_impcast_integer_float_precision); 13293 } 13294 } 13295 } 13296 13297 DiagnoseNullConversion(S, E, T, CC); 13298 13299 S.DiscardMisalignedMemberAddress(Target, E); 13300 13301 if (Target->isBooleanType()) 13302 DiagnoseIntInBoolContext(S, E); 13303 13304 if (!Source->isIntegerType() || !Target->isIntegerType()) 13305 return; 13306 13307 // TODO: remove this early return once the false positives for constant->bool 13308 // in templates, macros, etc, are reduced or removed. 13309 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13310 return; 13311 13312 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13313 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13314 return adornObjCBoolConversionDiagWithTernaryFixit( 13315 S, E, 13316 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13317 << E->getType()); 13318 } 13319 13320 IntRange SourceTypeRange = 13321 IntRange::forTargetOfCanonicalType(S.Context, Source); 13322 IntRange LikelySourceRange = 13323 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13324 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13325 13326 if (LikelySourceRange.Width > TargetRange.Width) { 13327 // If the source is a constant, use a default-on diagnostic. 13328 // TODO: this should happen for bitfield stores, too. 13329 Expr::EvalResult Result; 13330 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13331 S.isConstantEvaluated())) { 13332 llvm::APSInt Value(32); 13333 Value = Result.Val.getInt(); 13334 13335 if (S.SourceMgr.isInSystemMacro(CC)) 13336 return; 13337 13338 std::string PrettySourceValue = toString(Value, 10); 13339 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13340 13341 S.DiagRuntimeBehavior( 13342 E->getExprLoc(), E, 13343 S.PDiag(diag::warn_impcast_integer_precision_constant) 13344 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13345 << E->getSourceRange() << SourceRange(CC)); 13346 return; 13347 } 13348 13349 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13350 if (S.SourceMgr.isInSystemMacro(CC)) 13351 return; 13352 13353 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13354 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13355 /* pruneControlFlow */ true); 13356 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13357 } 13358 13359 if (TargetRange.Width > SourceTypeRange.Width) { 13360 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13361 if (UO->getOpcode() == UO_Minus) 13362 if (Source->isUnsignedIntegerType()) { 13363 if (Target->isUnsignedIntegerType()) 13364 return DiagnoseImpCast(S, E, T, CC, 13365 diag::warn_impcast_high_order_zero_bits); 13366 if (Target->isSignedIntegerType()) 13367 return DiagnoseImpCast(S, E, T, CC, 13368 diag::warn_impcast_nonnegative_result); 13369 } 13370 } 13371 13372 if (TargetRange.Width == LikelySourceRange.Width && 13373 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13374 Source->isSignedIntegerType()) { 13375 // Warn when doing a signed to signed conversion, warn if the positive 13376 // source value is exactly the width of the target type, which will 13377 // cause a negative value to be stored. 13378 13379 Expr::EvalResult Result; 13380 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13381 !S.SourceMgr.isInSystemMacro(CC)) { 13382 llvm::APSInt Value = Result.Val.getInt(); 13383 if (isSameWidthConstantConversion(S, E, T, CC)) { 13384 std::string PrettySourceValue = toString(Value, 10); 13385 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13386 13387 S.DiagRuntimeBehavior( 13388 E->getExprLoc(), E, 13389 S.PDiag(diag::warn_impcast_integer_precision_constant) 13390 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13391 << E->getSourceRange() << SourceRange(CC)); 13392 return; 13393 } 13394 } 13395 13396 // Fall through for non-constants to give a sign conversion warning. 13397 } 13398 13399 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13400 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13401 LikelySourceRange.Width == TargetRange.Width)) { 13402 if (S.SourceMgr.isInSystemMacro(CC)) 13403 return; 13404 13405 unsigned DiagID = diag::warn_impcast_integer_sign; 13406 13407 // Traditionally, gcc has warned about this under -Wsign-compare. 13408 // We also want to warn about it in -Wconversion. 13409 // So if -Wconversion is off, use a completely identical diagnostic 13410 // in the sign-compare group. 13411 // The conditional-checking code will 13412 if (ICContext) { 13413 DiagID = diag::warn_impcast_integer_sign_conditional; 13414 *ICContext = true; 13415 } 13416 13417 return DiagnoseImpCast(S, E, T, CC, DiagID); 13418 } 13419 13420 // Diagnose conversions between different enumeration types. 13421 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13422 // type, to give us better diagnostics. 13423 QualType SourceType = E->getType(); 13424 if (!S.getLangOpts().CPlusPlus) { 13425 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13426 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13427 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13428 SourceType = S.Context.getTypeDeclType(Enum); 13429 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13430 } 13431 } 13432 13433 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13434 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13435 if (SourceEnum->getDecl()->hasNameForLinkage() && 13436 TargetEnum->getDecl()->hasNameForLinkage() && 13437 SourceEnum != TargetEnum) { 13438 if (S.SourceMgr.isInSystemMacro(CC)) 13439 return; 13440 13441 return DiagnoseImpCast(S, E, SourceType, T, CC, 13442 diag::warn_impcast_different_enum_types); 13443 } 13444 } 13445 13446 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13447 SourceLocation CC, QualType T); 13448 13449 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13450 SourceLocation CC, bool &ICContext) { 13451 E = E->IgnoreParenImpCasts(); 13452 13453 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13454 return CheckConditionalOperator(S, CO, CC, T); 13455 13456 AnalyzeImplicitConversions(S, E, CC); 13457 if (E->getType() != T) 13458 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13459 } 13460 13461 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13462 SourceLocation CC, QualType T) { 13463 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13464 13465 Expr *TrueExpr = E->getTrueExpr(); 13466 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13467 TrueExpr = BCO->getCommon(); 13468 13469 bool Suspicious = false; 13470 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13471 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13472 13473 if (T->isBooleanType()) 13474 DiagnoseIntInBoolContext(S, E); 13475 13476 // If -Wconversion would have warned about either of the candidates 13477 // for a signedness conversion to the context type... 13478 if (!Suspicious) return; 13479 13480 // ...but it's currently ignored... 13481 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13482 return; 13483 13484 // ...then check whether it would have warned about either of the 13485 // candidates for a signedness conversion to the condition type. 13486 if (E->getType() == T) return; 13487 13488 Suspicious = false; 13489 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13490 E->getType(), CC, &Suspicious); 13491 if (!Suspicious) 13492 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13493 E->getType(), CC, &Suspicious); 13494 } 13495 13496 /// Check conversion of given expression to boolean. 13497 /// Input argument E is a logical expression. 13498 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13499 if (S.getLangOpts().Bool) 13500 return; 13501 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13502 return; 13503 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13504 } 13505 13506 namespace { 13507 struct AnalyzeImplicitConversionsWorkItem { 13508 Expr *E; 13509 SourceLocation CC; 13510 bool IsListInit; 13511 }; 13512 } 13513 13514 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13515 /// that should be visited are added to WorkList. 13516 static void AnalyzeImplicitConversions( 13517 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13518 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13519 Expr *OrigE = Item.E; 13520 SourceLocation CC = Item.CC; 13521 13522 QualType T = OrigE->getType(); 13523 Expr *E = OrigE->IgnoreParenImpCasts(); 13524 13525 // Propagate whether we are in a C++ list initialization expression. 13526 // If so, we do not issue warnings for implicit int-float conversion 13527 // precision loss, because C++11 narrowing already handles it. 13528 bool IsListInit = Item.IsListInit || 13529 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13530 13531 if (E->isTypeDependent() || E->isValueDependent()) 13532 return; 13533 13534 Expr *SourceExpr = E; 13535 // Examine, but don't traverse into the source expression of an 13536 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13537 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13538 // evaluate it in the context of checking the specific conversion to T though. 13539 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13540 if (auto *Src = OVE->getSourceExpr()) 13541 SourceExpr = Src; 13542 13543 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13544 if (UO->getOpcode() == UO_Not && 13545 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13546 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13547 << OrigE->getSourceRange() << T->isBooleanType() 13548 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13549 13550 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 13551 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 13552 BO->getLHS()->isKnownToHaveBooleanValue() && 13553 BO->getRHS()->isKnownToHaveBooleanValue() && 13554 BO->getLHS()->HasSideEffects(S.Context) && 13555 BO->getRHS()->HasSideEffects(S.Context)) { 13556 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 13557 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 13558 << FixItHint::CreateReplacement( 13559 BO->getOperatorLoc(), 13560 (BO->getOpcode() == BO_And ? "&&" : "||")); 13561 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 13562 } 13563 13564 // For conditional operators, we analyze the arguments as if they 13565 // were being fed directly into the output. 13566 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13567 CheckConditionalOperator(S, CO, CC, T); 13568 return; 13569 } 13570 13571 // Check implicit argument conversions for function calls. 13572 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13573 CheckImplicitArgumentConversions(S, Call, CC); 13574 13575 // Go ahead and check any implicit conversions we might have skipped. 13576 // The non-canonical typecheck is just an optimization; 13577 // CheckImplicitConversion will filter out dead implicit conversions. 13578 if (SourceExpr->getType() != T) 13579 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13580 13581 // Now continue drilling into this expression. 13582 13583 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13584 // The bound subexpressions in a PseudoObjectExpr are not reachable 13585 // as transitive children. 13586 // FIXME: Use a more uniform representation for this. 13587 for (auto *SE : POE->semantics()) 13588 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13589 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13590 } 13591 13592 // Skip past explicit casts. 13593 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13594 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13595 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13596 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13597 WorkList.push_back({E, CC, IsListInit}); 13598 return; 13599 } 13600 13601 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13602 // Do a somewhat different check with comparison operators. 13603 if (BO->isComparisonOp()) 13604 return AnalyzeComparison(S, BO); 13605 13606 // And with simple assignments. 13607 if (BO->getOpcode() == BO_Assign) 13608 return AnalyzeAssignment(S, BO); 13609 // And with compound assignments. 13610 if (BO->isAssignmentOp()) 13611 return AnalyzeCompoundAssignment(S, BO); 13612 } 13613 13614 // These break the otherwise-useful invariant below. Fortunately, 13615 // we don't really need to recurse into them, because any internal 13616 // expressions should have been analyzed already when they were 13617 // built into statements. 13618 if (isa<StmtExpr>(E)) return; 13619 13620 // Don't descend into unevaluated contexts. 13621 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13622 13623 // Now just recurse over the expression's children. 13624 CC = E->getExprLoc(); 13625 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13626 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13627 for (Stmt *SubStmt : E->children()) { 13628 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13629 if (!ChildExpr) 13630 continue; 13631 13632 if (IsLogicalAndOperator && 13633 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13634 // Ignore checking string literals that are in logical and operators. 13635 // This is a common pattern for asserts. 13636 continue; 13637 WorkList.push_back({ChildExpr, CC, IsListInit}); 13638 } 13639 13640 if (BO && BO->isLogicalOp()) { 13641 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13642 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13643 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13644 13645 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13646 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13647 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13648 } 13649 13650 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13651 if (U->getOpcode() == UO_LNot) { 13652 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13653 } else if (U->getOpcode() != UO_AddrOf) { 13654 if (U->getSubExpr()->getType()->isAtomicType()) 13655 S.Diag(U->getSubExpr()->getBeginLoc(), 13656 diag::warn_atomic_implicit_seq_cst); 13657 } 13658 } 13659 } 13660 13661 /// AnalyzeImplicitConversions - Find and report any interesting 13662 /// implicit conversions in the given expression. There are a couple 13663 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13664 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13665 bool IsListInit/*= false*/) { 13666 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13667 WorkList.push_back({OrigE, CC, IsListInit}); 13668 while (!WorkList.empty()) 13669 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13670 } 13671 13672 /// Diagnose integer type and any valid implicit conversion to it. 13673 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13674 // Taking into account implicit conversions, 13675 // allow any integer. 13676 if (!E->getType()->isIntegerType()) { 13677 S.Diag(E->getBeginLoc(), 13678 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13679 return true; 13680 } 13681 // Potentially emit standard warnings for implicit conversions if enabled 13682 // using -Wconversion. 13683 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13684 return false; 13685 } 13686 13687 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13688 // Returns true when emitting a warning about taking the address of a reference. 13689 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13690 const PartialDiagnostic &PD) { 13691 E = E->IgnoreParenImpCasts(); 13692 13693 const FunctionDecl *FD = nullptr; 13694 13695 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13696 if (!DRE->getDecl()->getType()->isReferenceType()) 13697 return false; 13698 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13699 if (!M->getMemberDecl()->getType()->isReferenceType()) 13700 return false; 13701 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13702 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13703 return false; 13704 FD = Call->getDirectCallee(); 13705 } else { 13706 return false; 13707 } 13708 13709 SemaRef.Diag(E->getExprLoc(), PD); 13710 13711 // If possible, point to location of function. 13712 if (FD) { 13713 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13714 } 13715 13716 return true; 13717 } 13718 13719 // Returns true if the SourceLocation is expanded from any macro body. 13720 // Returns false if the SourceLocation is invalid, is from not in a macro 13721 // expansion, or is from expanded from a top-level macro argument. 13722 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13723 if (Loc.isInvalid()) 13724 return false; 13725 13726 while (Loc.isMacroID()) { 13727 if (SM.isMacroBodyExpansion(Loc)) 13728 return true; 13729 Loc = SM.getImmediateMacroCallerLoc(Loc); 13730 } 13731 13732 return false; 13733 } 13734 13735 /// Diagnose pointers that are always non-null. 13736 /// \param E the expression containing the pointer 13737 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13738 /// compared to a null pointer 13739 /// \param IsEqual True when the comparison is equal to a null pointer 13740 /// \param Range Extra SourceRange to highlight in the diagnostic 13741 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13742 Expr::NullPointerConstantKind NullKind, 13743 bool IsEqual, SourceRange Range) { 13744 if (!E) 13745 return; 13746 13747 // Don't warn inside macros. 13748 if (E->getExprLoc().isMacroID()) { 13749 const SourceManager &SM = getSourceManager(); 13750 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13751 IsInAnyMacroBody(SM, Range.getBegin())) 13752 return; 13753 } 13754 E = E->IgnoreImpCasts(); 13755 13756 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13757 13758 if (isa<CXXThisExpr>(E)) { 13759 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13760 : diag::warn_this_bool_conversion; 13761 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13762 return; 13763 } 13764 13765 bool IsAddressOf = false; 13766 13767 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13768 if (UO->getOpcode() != UO_AddrOf) 13769 return; 13770 IsAddressOf = true; 13771 E = UO->getSubExpr(); 13772 } 13773 13774 if (IsAddressOf) { 13775 unsigned DiagID = IsCompare 13776 ? diag::warn_address_of_reference_null_compare 13777 : diag::warn_address_of_reference_bool_conversion; 13778 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13779 << IsEqual; 13780 if (CheckForReference(*this, E, PD)) { 13781 return; 13782 } 13783 } 13784 13785 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13786 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13787 std::string Str; 13788 llvm::raw_string_ostream S(Str); 13789 E->printPretty(S, nullptr, getPrintingPolicy()); 13790 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13791 : diag::warn_cast_nonnull_to_bool; 13792 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13793 << E->getSourceRange() << Range << IsEqual; 13794 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13795 }; 13796 13797 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13798 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13799 if (auto *Callee = Call->getDirectCallee()) { 13800 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13801 ComplainAboutNonnullParamOrCall(A); 13802 return; 13803 } 13804 } 13805 } 13806 13807 // Expect to find a single Decl. Skip anything more complicated. 13808 ValueDecl *D = nullptr; 13809 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13810 D = R->getDecl(); 13811 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13812 D = M->getMemberDecl(); 13813 } 13814 13815 // Weak Decls can be null. 13816 if (!D || D->isWeak()) 13817 return; 13818 13819 // Check for parameter decl with nonnull attribute 13820 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13821 if (getCurFunction() && 13822 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13823 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13824 ComplainAboutNonnullParamOrCall(A); 13825 return; 13826 } 13827 13828 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13829 // Skip function template not specialized yet. 13830 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13831 return; 13832 auto ParamIter = llvm::find(FD->parameters(), PV); 13833 assert(ParamIter != FD->param_end()); 13834 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13835 13836 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13837 if (!NonNull->args_size()) { 13838 ComplainAboutNonnullParamOrCall(NonNull); 13839 return; 13840 } 13841 13842 for (const ParamIdx &ArgNo : NonNull->args()) { 13843 if (ArgNo.getASTIndex() == ParamNo) { 13844 ComplainAboutNonnullParamOrCall(NonNull); 13845 return; 13846 } 13847 } 13848 } 13849 } 13850 } 13851 } 13852 13853 QualType T = D->getType(); 13854 const bool IsArray = T->isArrayType(); 13855 const bool IsFunction = T->isFunctionType(); 13856 13857 // Address of function is used to silence the function warning. 13858 if (IsAddressOf && IsFunction) { 13859 return; 13860 } 13861 13862 // Found nothing. 13863 if (!IsAddressOf && !IsFunction && !IsArray) 13864 return; 13865 13866 // Pretty print the expression for the diagnostic. 13867 std::string Str; 13868 llvm::raw_string_ostream S(Str); 13869 E->printPretty(S, nullptr, getPrintingPolicy()); 13870 13871 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13872 : diag::warn_impcast_pointer_to_bool; 13873 enum { 13874 AddressOf, 13875 FunctionPointer, 13876 ArrayPointer 13877 } DiagType; 13878 if (IsAddressOf) 13879 DiagType = AddressOf; 13880 else if (IsFunction) 13881 DiagType = FunctionPointer; 13882 else if (IsArray) 13883 DiagType = ArrayPointer; 13884 else 13885 llvm_unreachable("Could not determine diagnostic."); 13886 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13887 << Range << IsEqual; 13888 13889 if (!IsFunction) 13890 return; 13891 13892 // Suggest '&' to silence the function warning. 13893 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13894 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13895 13896 // Check to see if '()' fixit should be emitted. 13897 QualType ReturnType; 13898 UnresolvedSet<4> NonTemplateOverloads; 13899 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13900 if (ReturnType.isNull()) 13901 return; 13902 13903 if (IsCompare) { 13904 // There are two cases here. If there is null constant, the only suggest 13905 // for a pointer return type. If the null is 0, then suggest if the return 13906 // type is a pointer or an integer type. 13907 if (!ReturnType->isPointerType()) { 13908 if (NullKind == Expr::NPCK_ZeroExpression || 13909 NullKind == Expr::NPCK_ZeroLiteral) { 13910 if (!ReturnType->isIntegerType()) 13911 return; 13912 } else { 13913 return; 13914 } 13915 } 13916 } else { // !IsCompare 13917 // For function to bool, only suggest if the function pointer has bool 13918 // return type. 13919 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13920 return; 13921 } 13922 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13923 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13924 } 13925 13926 /// Diagnoses "dangerous" implicit conversions within the given 13927 /// expression (which is a full expression). Implements -Wconversion 13928 /// and -Wsign-compare. 13929 /// 13930 /// \param CC the "context" location of the implicit conversion, i.e. 13931 /// the most location of the syntactic entity requiring the implicit 13932 /// conversion 13933 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13934 // Don't diagnose in unevaluated contexts. 13935 if (isUnevaluatedContext()) 13936 return; 13937 13938 // Don't diagnose for value- or type-dependent expressions. 13939 if (E->isTypeDependent() || E->isValueDependent()) 13940 return; 13941 13942 // Check for array bounds violations in cases where the check isn't triggered 13943 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13944 // ArraySubscriptExpr is on the RHS of a variable initialization. 13945 CheckArrayAccess(E); 13946 13947 // This is not the right CC for (e.g.) a variable initialization. 13948 AnalyzeImplicitConversions(*this, E, CC); 13949 } 13950 13951 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13952 /// Input argument E is a logical expression. 13953 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13954 ::CheckBoolLikeConversion(*this, E, CC); 13955 } 13956 13957 /// Diagnose when expression is an integer constant expression and its evaluation 13958 /// results in integer overflow 13959 void Sema::CheckForIntOverflow (Expr *E) { 13960 // Use a work list to deal with nested struct initializers. 13961 SmallVector<Expr *, 2> Exprs(1, E); 13962 13963 do { 13964 Expr *OriginalE = Exprs.pop_back_val(); 13965 Expr *E = OriginalE->IgnoreParenCasts(); 13966 13967 if (isa<BinaryOperator>(E)) { 13968 E->EvaluateForOverflow(Context); 13969 continue; 13970 } 13971 13972 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13973 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13974 else if (isa<ObjCBoxedExpr>(OriginalE)) 13975 E->EvaluateForOverflow(Context); 13976 else if (auto Call = dyn_cast<CallExpr>(E)) 13977 Exprs.append(Call->arg_begin(), Call->arg_end()); 13978 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13979 Exprs.append(Message->arg_begin(), Message->arg_end()); 13980 } while (!Exprs.empty()); 13981 } 13982 13983 namespace { 13984 13985 /// Visitor for expressions which looks for unsequenced operations on the 13986 /// same object. 13987 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13988 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13989 13990 /// A tree of sequenced regions within an expression. Two regions are 13991 /// unsequenced if one is an ancestor or a descendent of the other. When we 13992 /// finish processing an expression with sequencing, such as a comma 13993 /// expression, we fold its tree nodes into its parent, since they are 13994 /// unsequenced with respect to nodes we will visit later. 13995 class SequenceTree { 13996 struct Value { 13997 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13998 unsigned Parent : 31; 13999 unsigned Merged : 1; 14000 }; 14001 SmallVector<Value, 8> Values; 14002 14003 public: 14004 /// A region within an expression which may be sequenced with respect 14005 /// to some other region. 14006 class Seq { 14007 friend class SequenceTree; 14008 14009 unsigned Index; 14010 14011 explicit Seq(unsigned N) : Index(N) {} 14012 14013 public: 14014 Seq() : Index(0) {} 14015 }; 14016 14017 SequenceTree() { Values.push_back(Value(0)); } 14018 Seq root() const { return Seq(0); } 14019 14020 /// Create a new sequence of operations, which is an unsequenced 14021 /// subset of \p Parent. This sequence of operations is sequenced with 14022 /// respect to other children of \p Parent. 14023 Seq allocate(Seq Parent) { 14024 Values.push_back(Value(Parent.Index)); 14025 return Seq(Values.size() - 1); 14026 } 14027 14028 /// Merge a sequence of operations into its parent. 14029 void merge(Seq S) { 14030 Values[S.Index].Merged = true; 14031 } 14032 14033 /// Determine whether two operations are unsequenced. This operation 14034 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14035 /// should have been merged into its parent as appropriate. 14036 bool isUnsequenced(Seq Cur, Seq Old) { 14037 unsigned C = representative(Cur.Index); 14038 unsigned Target = representative(Old.Index); 14039 while (C >= Target) { 14040 if (C == Target) 14041 return true; 14042 C = Values[C].Parent; 14043 } 14044 return false; 14045 } 14046 14047 private: 14048 /// Pick a representative for a sequence. 14049 unsigned representative(unsigned K) { 14050 if (Values[K].Merged) 14051 // Perform path compression as we go. 14052 return Values[K].Parent = representative(Values[K].Parent); 14053 return K; 14054 } 14055 }; 14056 14057 /// An object for which we can track unsequenced uses. 14058 using Object = const NamedDecl *; 14059 14060 /// Different flavors of object usage which we track. We only track the 14061 /// least-sequenced usage of each kind. 14062 enum UsageKind { 14063 /// A read of an object. Multiple unsequenced reads are OK. 14064 UK_Use, 14065 14066 /// A modification of an object which is sequenced before the value 14067 /// computation of the expression, such as ++n in C++. 14068 UK_ModAsValue, 14069 14070 /// A modification of an object which is not sequenced before the value 14071 /// computation of the expression, such as n++. 14072 UK_ModAsSideEffect, 14073 14074 UK_Count = UK_ModAsSideEffect + 1 14075 }; 14076 14077 /// Bundle together a sequencing region and the expression corresponding 14078 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14079 struct Usage { 14080 const Expr *UsageExpr; 14081 SequenceTree::Seq Seq; 14082 14083 Usage() : UsageExpr(nullptr) {} 14084 }; 14085 14086 struct UsageInfo { 14087 Usage Uses[UK_Count]; 14088 14089 /// Have we issued a diagnostic for this object already? 14090 bool Diagnosed; 14091 14092 UsageInfo() : Diagnosed(false) {} 14093 }; 14094 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14095 14096 Sema &SemaRef; 14097 14098 /// Sequenced regions within the expression. 14099 SequenceTree Tree; 14100 14101 /// Declaration modifications and references which we have seen. 14102 UsageInfoMap UsageMap; 14103 14104 /// The region we are currently within. 14105 SequenceTree::Seq Region; 14106 14107 /// Filled in with declarations which were modified as a side-effect 14108 /// (that is, post-increment operations). 14109 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14110 14111 /// Expressions to check later. We defer checking these to reduce 14112 /// stack usage. 14113 SmallVectorImpl<const Expr *> &WorkList; 14114 14115 /// RAII object wrapping the visitation of a sequenced subexpression of an 14116 /// expression. At the end of this process, the side-effects of the evaluation 14117 /// become sequenced with respect to the value computation of the result, so 14118 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14119 /// UK_ModAsValue. 14120 struct SequencedSubexpression { 14121 SequencedSubexpression(SequenceChecker &Self) 14122 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14123 Self.ModAsSideEffect = &ModAsSideEffect; 14124 } 14125 14126 ~SequencedSubexpression() { 14127 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14128 // Add a new usage with usage kind UK_ModAsValue, and then restore 14129 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14130 // the previous one was empty). 14131 UsageInfo &UI = Self.UsageMap[M.first]; 14132 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14133 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14134 SideEffectUsage = M.second; 14135 } 14136 Self.ModAsSideEffect = OldModAsSideEffect; 14137 } 14138 14139 SequenceChecker &Self; 14140 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14141 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14142 }; 14143 14144 /// RAII object wrapping the visitation of a subexpression which we might 14145 /// choose to evaluate as a constant. If any subexpression is evaluated and 14146 /// found to be non-constant, this allows us to suppress the evaluation of 14147 /// the outer expression. 14148 class EvaluationTracker { 14149 public: 14150 EvaluationTracker(SequenceChecker &Self) 14151 : Self(Self), Prev(Self.EvalTracker) { 14152 Self.EvalTracker = this; 14153 } 14154 14155 ~EvaluationTracker() { 14156 Self.EvalTracker = Prev; 14157 if (Prev) 14158 Prev->EvalOK &= EvalOK; 14159 } 14160 14161 bool evaluate(const Expr *E, bool &Result) { 14162 if (!EvalOK || E->isValueDependent()) 14163 return false; 14164 EvalOK = E->EvaluateAsBooleanCondition( 14165 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14166 return EvalOK; 14167 } 14168 14169 private: 14170 SequenceChecker &Self; 14171 EvaluationTracker *Prev; 14172 bool EvalOK = true; 14173 } *EvalTracker = nullptr; 14174 14175 /// Find the object which is produced by the specified expression, 14176 /// if any. 14177 Object getObject(const Expr *E, bool Mod) const { 14178 E = E->IgnoreParenCasts(); 14179 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14180 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14181 return getObject(UO->getSubExpr(), Mod); 14182 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14183 if (BO->getOpcode() == BO_Comma) 14184 return getObject(BO->getRHS(), Mod); 14185 if (Mod && BO->isAssignmentOp()) 14186 return getObject(BO->getLHS(), Mod); 14187 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14188 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14189 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14190 return ME->getMemberDecl(); 14191 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14192 // FIXME: If this is a reference, map through to its value. 14193 return DRE->getDecl(); 14194 return nullptr; 14195 } 14196 14197 /// Note that an object \p O was modified or used by an expression 14198 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14199 /// the object \p O as obtained via the \p UsageMap. 14200 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14201 // Get the old usage for the given object and usage kind. 14202 Usage &U = UI.Uses[UK]; 14203 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14204 // If we have a modification as side effect and are in a sequenced 14205 // subexpression, save the old Usage so that we can restore it later 14206 // in SequencedSubexpression::~SequencedSubexpression. 14207 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14208 ModAsSideEffect->push_back(std::make_pair(O, U)); 14209 // Then record the new usage with the current sequencing region. 14210 U.UsageExpr = UsageExpr; 14211 U.Seq = Region; 14212 } 14213 } 14214 14215 /// Check whether a modification or use of an object \p O in an expression 14216 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14217 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14218 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14219 /// usage and false we are checking for a mod-use unsequenced usage. 14220 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14221 UsageKind OtherKind, bool IsModMod) { 14222 if (UI.Diagnosed) 14223 return; 14224 14225 const Usage &U = UI.Uses[OtherKind]; 14226 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14227 return; 14228 14229 const Expr *Mod = U.UsageExpr; 14230 const Expr *ModOrUse = UsageExpr; 14231 if (OtherKind == UK_Use) 14232 std::swap(Mod, ModOrUse); 14233 14234 SemaRef.DiagRuntimeBehavior( 14235 Mod->getExprLoc(), {Mod, ModOrUse}, 14236 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14237 : diag::warn_unsequenced_mod_use) 14238 << O << SourceRange(ModOrUse->getExprLoc())); 14239 UI.Diagnosed = true; 14240 } 14241 14242 // A note on note{Pre, Post}{Use, Mod}: 14243 // 14244 // (It helps to follow the algorithm with an expression such as 14245 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14246 // operations before C++17 and both are well-defined in C++17). 14247 // 14248 // When visiting a node which uses/modify an object we first call notePreUse 14249 // or notePreMod before visiting its sub-expression(s). At this point the 14250 // children of the current node have not yet been visited and so the eventual 14251 // uses/modifications resulting from the children of the current node have not 14252 // been recorded yet. 14253 // 14254 // We then visit the children of the current node. After that notePostUse or 14255 // notePostMod is called. These will 1) detect an unsequenced modification 14256 // as side effect (as in "k++ + k") and 2) add a new usage with the 14257 // appropriate usage kind. 14258 // 14259 // We also have to be careful that some operation sequences modification as 14260 // side effect as well (for example: || or ,). To account for this we wrap 14261 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14262 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14263 // which record usages which are modifications as side effect, and then 14264 // downgrade them (or more accurately restore the previous usage which was a 14265 // modification as side effect) when exiting the scope of the sequenced 14266 // subexpression. 14267 14268 void notePreUse(Object O, const Expr *UseExpr) { 14269 UsageInfo &UI = UsageMap[O]; 14270 // Uses conflict with other modifications. 14271 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14272 } 14273 14274 void notePostUse(Object O, const Expr *UseExpr) { 14275 UsageInfo &UI = UsageMap[O]; 14276 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14277 /*IsModMod=*/false); 14278 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14279 } 14280 14281 void notePreMod(Object O, const Expr *ModExpr) { 14282 UsageInfo &UI = UsageMap[O]; 14283 // Modifications conflict with other modifications and with uses. 14284 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14285 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14286 } 14287 14288 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14289 UsageInfo &UI = UsageMap[O]; 14290 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14291 /*IsModMod=*/true); 14292 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14293 } 14294 14295 public: 14296 SequenceChecker(Sema &S, const Expr *E, 14297 SmallVectorImpl<const Expr *> &WorkList) 14298 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14299 Visit(E); 14300 // Silence a -Wunused-private-field since WorkList is now unused. 14301 // TODO: Evaluate if it can be used, and if not remove it. 14302 (void)this->WorkList; 14303 } 14304 14305 void VisitStmt(const Stmt *S) { 14306 // Skip all statements which aren't expressions for now. 14307 } 14308 14309 void VisitExpr(const Expr *E) { 14310 // By default, just recurse to evaluated subexpressions. 14311 Base::VisitStmt(E); 14312 } 14313 14314 void VisitCastExpr(const CastExpr *E) { 14315 Object O = Object(); 14316 if (E->getCastKind() == CK_LValueToRValue) 14317 O = getObject(E->getSubExpr(), false); 14318 14319 if (O) 14320 notePreUse(O, E); 14321 VisitExpr(E); 14322 if (O) 14323 notePostUse(O, E); 14324 } 14325 14326 void VisitSequencedExpressions(const Expr *SequencedBefore, 14327 const Expr *SequencedAfter) { 14328 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14329 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14330 SequenceTree::Seq OldRegion = Region; 14331 14332 { 14333 SequencedSubexpression SeqBefore(*this); 14334 Region = BeforeRegion; 14335 Visit(SequencedBefore); 14336 } 14337 14338 Region = AfterRegion; 14339 Visit(SequencedAfter); 14340 14341 Region = OldRegion; 14342 14343 Tree.merge(BeforeRegion); 14344 Tree.merge(AfterRegion); 14345 } 14346 14347 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14348 // C++17 [expr.sub]p1: 14349 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14350 // expression E1 is sequenced before the expression E2. 14351 if (SemaRef.getLangOpts().CPlusPlus17) 14352 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14353 else { 14354 Visit(ASE->getLHS()); 14355 Visit(ASE->getRHS()); 14356 } 14357 } 14358 14359 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14360 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14361 void VisitBinPtrMem(const BinaryOperator *BO) { 14362 // C++17 [expr.mptr.oper]p4: 14363 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14364 // the expression E1 is sequenced before the expression E2. 14365 if (SemaRef.getLangOpts().CPlusPlus17) 14366 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14367 else { 14368 Visit(BO->getLHS()); 14369 Visit(BO->getRHS()); 14370 } 14371 } 14372 14373 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14374 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14375 void VisitBinShlShr(const BinaryOperator *BO) { 14376 // C++17 [expr.shift]p4: 14377 // The expression E1 is sequenced before the expression E2. 14378 if (SemaRef.getLangOpts().CPlusPlus17) 14379 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14380 else { 14381 Visit(BO->getLHS()); 14382 Visit(BO->getRHS()); 14383 } 14384 } 14385 14386 void VisitBinComma(const BinaryOperator *BO) { 14387 // C++11 [expr.comma]p1: 14388 // Every value computation and side effect associated with the left 14389 // expression is sequenced before every value computation and side 14390 // effect associated with the right expression. 14391 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14392 } 14393 14394 void VisitBinAssign(const BinaryOperator *BO) { 14395 SequenceTree::Seq RHSRegion; 14396 SequenceTree::Seq LHSRegion; 14397 if (SemaRef.getLangOpts().CPlusPlus17) { 14398 RHSRegion = Tree.allocate(Region); 14399 LHSRegion = Tree.allocate(Region); 14400 } else { 14401 RHSRegion = Region; 14402 LHSRegion = Region; 14403 } 14404 SequenceTree::Seq OldRegion = Region; 14405 14406 // C++11 [expr.ass]p1: 14407 // [...] the assignment is sequenced after the value computation 14408 // of the right and left operands, [...] 14409 // 14410 // so check it before inspecting the operands and update the 14411 // map afterwards. 14412 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14413 if (O) 14414 notePreMod(O, BO); 14415 14416 if (SemaRef.getLangOpts().CPlusPlus17) { 14417 // C++17 [expr.ass]p1: 14418 // [...] The right operand is sequenced before the left operand. [...] 14419 { 14420 SequencedSubexpression SeqBefore(*this); 14421 Region = RHSRegion; 14422 Visit(BO->getRHS()); 14423 } 14424 14425 Region = LHSRegion; 14426 Visit(BO->getLHS()); 14427 14428 if (O && isa<CompoundAssignOperator>(BO)) 14429 notePostUse(O, BO); 14430 14431 } else { 14432 // C++11 does not specify any sequencing between the LHS and RHS. 14433 Region = LHSRegion; 14434 Visit(BO->getLHS()); 14435 14436 if (O && isa<CompoundAssignOperator>(BO)) 14437 notePostUse(O, BO); 14438 14439 Region = RHSRegion; 14440 Visit(BO->getRHS()); 14441 } 14442 14443 // C++11 [expr.ass]p1: 14444 // the assignment is sequenced [...] before the value computation of the 14445 // assignment expression. 14446 // C11 6.5.16/3 has no such rule. 14447 Region = OldRegion; 14448 if (O) 14449 notePostMod(O, BO, 14450 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14451 : UK_ModAsSideEffect); 14452 if (SemaRef.getLangOpts().CPlusPlus17) { 14453 Tree.merge(RHSRegion); 14454 Tree.merge(LHSRegion); 14455 } 14456 } 14457 14458 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14459 VisitBinAssign(CAO); 14460 } 14461 14462 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14463 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14464 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14465 Object O = getObject(UO->getSubExpr(), true); 14466 if (!O) 14467 return VisitExpr(UO); 14468 14469 notePreMod(O, UO); 14470 Visit(UO->getSubExpr()); 14471 // C++11 [expr.pre.incr]p1: 14472 // the expression ++x is equivalent to x+=1 14473 notePostMod(O, UO, 14474 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14475 : UK_ModAsSideEffect); 14476 } 14477 14478 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14479 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14480 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14481 Object O = getObject(UO->getSubExpr(), true); 14482 if (!O) 14483 return VisitExpr(UO); 14484 14485 notePreMod(O, UO); 14486 Visit(UO->getSubExpr()); 14487 notePostMod(O, UO, UK_ModAsSideEffect); 14488 } 14489 14490 void VisitBinLOr(const BinaryOperator *BO) { 14491 // C++11 [expr.log.or]p2: 14492 // If the second expression is evaluated, every value computation and 14493 // side effect associated with the first expression is sequenced before 14494 // every value computation and side effect associated with the 14495 // second expression. 14496 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14497 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14498 SequenceTree::Seq OldRegion = Region; 14499 14500 EvaluationTracker Eval(*this); 14501 { 14502 SequencedSubexpression Sequenced(*this); 14503 Region = LHSRegion; 14504 Visit(BO->getLHS()); 14505 } 14506 14507 // C++11 [expr.log.or]p1: 14508 // [...] the second operand is not evaluated if the first operand 14509 // evaluates to true. 14510 bool EvalResult = false; 14511 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14512 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14513 if (ShouldVisitRHS) { 14514 Region = RHSRegion; 14515 Visit(BO->getRHS()); 14516 } 14517 14518 Region = OldRegion; 14519 Tree.merge(LHSRegion); 14520 Tree.merge(RHSRegion); 14521 } 14522 14523 void VisitBinLAnd(const BinaryOperator *BO) { 14524 // C++11 [expr.log.and]p2: 14525 // If the second expression is evaluated, every value computation and 14526 // side effect associated with the first expression is sequenced before 14527 // every value computation and side effect associated with the 14528 // second expression. 14529 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14530 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14531 SequenceTree::Seq OldRegion = Region; 14532 14533 EvaluationTracker Eval(*this); 14534 { 14535 SequencedSubexpression Sequenced(*this); 14536 Region = LHSRegion; 14537 Visit(BO->getLHS()); 14538 } 14539 14540 // C++11 [expr.log.and]p1: 14541 // [...] the second operand is not evaluated if the first operand is false. 14542 bool EvalResult = false; 14543 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14544 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14545 if (ShouldVisitRHS) { 14546 Region = RHSRegion; 14547 Visit(BO->getRHS()); 14548 } 14549 14550 Region = OldRegion; 14551 Tree.merge(LHSRegion); 14552 Tree.merge(RHSRegion); 14553 } 14554 14555 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14556 // C++11 [expr.cond]p1: 14557 // [...] Every value computation and side effect associated with the first 14558 // expression is sequenced before every value computation and side effect 14559 // associated with the second or third expression. 14560 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14561 14562 // No sequencing is specified between the true and false expression. 14563 // However since exactly one of both is going to be evaluated we can 14564 // consider them to be sequenced. This is needed to avoid warning on 14565 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14566 // both the true and false expressions because we can't evaluate x. 14567 // This will still allow us to detect an expression like (pre C++17) 14568 // "(x ? y += 1 : y += 2) = y". 14569 // 14570 // We don't wrap the visitation of the true and false expression with 14571 // SequencedSubexpression because we don't want to downgrade modifications 14572 // as side effect in the true and false expressions after the visition 14573 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14574 // not warn between the two "y++", but we should warn between the "y++" 14575 // and the "y". 14576 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14577 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14578 SequenceTree::Seq OldRegion = Region; 14579 14580 EvaluationTracker Eval(*this); 14581 { 14582 SequencedSubexpression Sequenced(*this); 14583 Region = ConditionRegion; 14584 Visit(CO->getCond()); 14585 } 14586 14587 // C++11 [expr.cond]p1: 14588 // [...] The first expression is contextually converted to bool (Clause 4). 14589 // It is evaluated and if it is true, the result of the conditional 14590 // expression is the value of the second expression, otherwise that of the 14591 // third expression. Only one of the second and third expressions is 14592 // evaluated. [...] 14593 bool EvalResult = false; 14594 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14595 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14596 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14597 if (ShouldVisitTrueExpr) { 14598 Region = TrueRegion; 14599 Visit(CO->getTrueExpr()); 14600 } 14601 if (ShouldVisitFalseExpr) { 14602 Region = FalseRegion; 14603 Visit(CO->getFalseExpr()); 14604 } 14605 14606 Region = OldRegion; 14607 Tree.merge(ConditionRegion); 14608 Tree.merge(TrueRegion); 14609 Tree.merge(FalseRegion); 14610 } 14611 14612 void VisitCallExpr(const CallExpr *CE) { 14613 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14614 14615 if (CE->isUnevaluatedBuiltinCall(Context)) 14616 return; 14617 14618 // C++11 [intro.execution]p15: 14619 // When calling a function [...], every value computation and side effect 14620 // associated with any argument expression, or with the postfix expression 14621 // designating the called function, is sequenced before execution of every 14622 // expression or statement in the body of the function [and thus before 14623 // the value computation of its result]. 14624 SequencedSubexpression Sequenced(*this); 14625 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14626 // C++17 [expr.call]p5 14627 // The postfix-expression is sequenced before each expression in the 14628 // expression-list and any default argument. [...] 14629 SequenceTree::Seq CalleeRegion; 14630 SequenceTree::Seq OtherRegion; 14631 if (SemaRef.getLangOpts().CPlusPlus17) { 14632 CalleeRegion = Tree.allocate(Region); 14633 OtherRegion = Tree.allocate(Region); 14634 } else { 14635 CalleeRegion = Region; 14636 OtherRegion = Region; 14637 } 14638 SequenceTree::Seq OldRegion = Region; 14639 14640 // Visit the callee expression first. 14641 Region = CalleeRegion; 14642 if (SemaRef.getLangOpts().CPlusPlus17) { 14643 SequencedSubexpression Sequenced(*this); 14644 Visit(CE->getCallee()); 14645 } else { 14646 Visit(CE->getCallee()); 14647 } 14648 14649 // Then visit the argument expressions. 14650 Region = OtherRegion; 14651 for (const Expr *Argument : CE->arguments()) 14652 Visit(Argument); 14653 14654 Region = OldRegion; 14655 if (SemaRef.getLangOpts().CPlusPlus17) { 14656 Tree.merge(CalleeRegion); 14657 Tree.merge(OtherRegion); 14658 } 14659 }); 14660 } 14661 14662 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14663 // C++17 [over.match.oper]p2: 14664 // [...] the operator notation is first transformed to the equivalent 14665 // function-call notation as summarized in Table 12 (where @ denotes one 14666 // of the operators covered in the specified subclause). However, the 14667 // operands are sequenced in the order prescribed for the built-in 14668 // operator (Clause 8). 14669 // 14670 // From the above only overloaded binary operators and overloaded call 14671 // operators have sequencing rules in C++17 that we need to handle 14672 // separately. 14673 if (!SemaRef.getLangOpts().CPlusPlus17 || 14674 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14675 return VisitCallExpr(CXXOCE); 14676 14677 enum { 14678 NoSequencing, 14679 LHSBeforeRHS, 14680 RHSBeforeLHS, 14681 LHSBeforeRest 14682 } SequencingKind; 14683 switch (CXXOCE->getOperator()) { 14684 case OO_Equal: 14685 case OO_PlusEqual: 14686 case OO_MinusEqual: 14687 case OO_StarEqual: 14688 case OO_SlashEqual: 14689 case OO_PercentEqual: 14690 case OO_CaretEqual: 14691 case OO_AmpEqual: 14692 case OO_PipeEqual: 14693 case OO_LessLessEqual: 14694 case OO_GreaterGreaterEqual: 14695 SequencingKind = RHSBeforeLHS; 14696 break; 14697 14698 case OO_LessLess: 14699 case OO_GreaterGreater: 14700 case OO_AmpAmp: 14701 case OO_PipePipe: 14702 case OO_Comma: 14703 case OO_ArrowStar: 14704 case OO_Subscript: 14705 SequencingKind = LHSBeforeRHS; 14706 break; 14707 14708 case OO_Call: 14709 SequencingKind = LHSBeforeRest; 14710 break; 14711 14712 default: 14713 SequencingKind = NoSequencing; 14714 break; 14715 } 14716 14717 if (SequencingKind == NoSequencing) 14718 return VisitCallExpr(CXXOCE); 14719 14720 // This is a call, so all subexpressions are sequenced before the result. 14721 SequencedSubexpression Sequenced(*this); 14722 14723 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14724 assert(SemaRef.getLangOpts().CPlusPlus17 && 14725 "Should only get there with C++17 and above!"); 14726 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14727 "Should only get there with an overloaded binary operator" 14728 " or an overloaded call operator!"); 14729 14730 if (SequencingKind == LHSBeforeRest) { 14731 assert(CXXOCE->getOperator() == OO_Call && 14732 "We should only have an overloaded call operator here!"); 14733 14734 // This is very similar to VisitCallExpr, except that we only have the 14735 // C++17 case. The postfix-expression is the first argument of the 14736 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14737 // are in the following arguments. 14738 // 14739 // Note that we intentionally do not visit the callee expression since 14740 // it is just a decayed reference to a function. 14741 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14742 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14743 SequenceTree::Seq OldRegion = Region; 14744 14745 assert(CXXOCE->getNumArgs() >= 1 && 14746 "An overloaded call operator must have at least one argument" 14747 " for the postfix-expression!"); 14748 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14749 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14750 CXXOCE->getNumArgs() - 1); 14751 14752 // Visit the postfix-expression first. 14753 { 14754 Region = PostfixExprRegion; 14755 SequencedSubexpression Sequenced(*this); 14756 Visit(PostfixExpr); 14757 } 14758 14759 // Then visit the argument expressions. 14760 Region = ArgsRegion; 14761 for (const Expr *Arg : Args) 14762 Visit(Arg); 14763 14764 Region = OldRegion; 14765 Tree.merge(PostfixExprRegion); 14766 Tree.merge(ArgsRegion); 14767 } else { 14768 assert(CXXOCE->getNumArgs() == 2 && 14769 "Should only have two arguments here!"); 14770 assert((SequencingKind == LHSBeforeRHS || 14771 SequencingKind == RHSBeforeLHS) && 14772 "Unexpected sequencing kind!"); 14773 14774 // We do not visit the callee expression since it is just a decayed 14775 // reference to a function. 14776 const Expr *E1 = CXXOCE->getArg(0); 14777 const Expr *E2 = CXXOCE->getArg(1); 14778 if (SequencingKind == RHSBeforeLHS) 14779 std::swap(E1, E2); 14780 14781 return VisitSequencedExpressions(E1, E2); 14782 } 14783 }); 14784 } 14785 14786 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14787 // This is a call, so all subexpressions are sequenced before the result. 14788 SequencedSubexpression Sequenced(*this); 14789 14790 if (!CCE->isListInitialization()) 14791 return VisitExpr(CCE); 14792 14793 // In C++11, list initializations are sequenced. 14794 SmallVector<SequenceTree::Seq, 32> Elts; 14795 SequenceTree::Seq Parent = Region; 14796 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14797 E = CCE->arg_end(); 14798 I != E; ++I) { 14799 Region = Tree.allocate(Parent); 14800 Elts.push_back(Region); 14801 Visit(*I); 14802 } 14803 14804 // Forget that the initializers are sequenced. 14805 Region = Parent; 14806 for (unsigned I = 0; I < Elts.size(); ++I) 14807 Tree.merge(Elts[I]); 14808 } 14809 14810 void VisitInitListExpr(const InitListExpr *ILE) { 14811 if (!SemaRef.getLangOpts().CPlusPlus11) 14812 return VisitExpr(ILE); 14813 14814 // In C++11, list initializations are sequenced. 14815 SmallVector<SequenceTree::Seq, 32> Elts; 14816 SequenceTree::Seq Parent = Region; 14817 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14818 const Expr *E = ILE->getInit(I); 14819 if (!E) 14820 continue; 14821 Region = Tree.allocate(Parent); 14822 Elts.push_back(Region); 14823 Visit(E); 14824 } 14825 14826 // Forget that the initializers are sequenced. 14827 Region = Parent; 14828 for (unsigned I = 0; I < Elts.size(); ++I) 14829 Tree.merge(Elts[I]); 14830 } 14831 }; 14832 14833 } // namespace 14834 14835 void Sema::CheckUnsequencedOperations(const Expr *E) { 14836 SmallVector<const Expr *, 8> WorkList; 14837 WorkList.push_back(E); 14838 while (!WorkList.empty()) { 14839 const Expr *Item = WorkList.pop_back_val(); 14840 SequenceChecker(*this, Item, WorkList); 14841 } 14842 } 14843 14844 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14845 bool IsConstexpr) { 14846 llvm::SaveAndRestore<bool> ConstantContext( 14847 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14848 CheckImplicitConversions(E, CheckLoc); 14849 if (!E->isInstantiationDependent()) 14850 CheckUnsequencedOperations(E); 14851 if (!IsConstexpr && !E->isValueDependent()) 14852 CheckForIntOverflow(E); 14853 DiagnoseMisalignedMembers(); 14854 } 14855 14856 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14857 FieldDecl *BitField, 14858 Expr *Init) { 14859 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14860 } 14861 14862 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14863 SourceLocation Loc) { 14864 if (!PType->isVariablyModifiedType()) 14865 return; 14866 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14867 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14868 return; 14869 } 14870 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14871 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14872 return; 14873 } 14874 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14875 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14876 return; 14877 } 14878 14879 const ArrayType *AT = S.Context.getAsArrayType(PType); 14880 if (!AT) 14881 return; 14882 14883 if (AT->getSizeModifier() != ArrayType::Star) { 14884 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14885 return; 14886 } 14887 14888 S.Diag(Loc, diag::err_array_star_in_function_definition); 14889 } 14890 14891 /// CheckParmsForFunctionDef - Check that the parameters of the given 14892 /// function are appropriate for the definition of a function. This 14893 /// takes care of any checks that cannot be performed on the 14894 /// declaration itself, e.g., that the types of each of the function 14895 /// parameters are complete. 14896 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14897 bool CheckParameterNames) { 14898 bool HasInvalidParm = false; 14899 for (ParmVarDecl *Param : Parameters) { 14900 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14901 // function declarator that is part of a function definition of 14902 // that function shall not have incomplete type. 14903 // 14904 // This is also C++ [dcl.fct]p6. 14905 if (!Param->isInvalidDecl() && 14906 RequireCompleteType(Param->getLocation(), Param->getType(), 14907 diag::err_typecheck_decl_incomplete_type)) { 14908 Param->setInvalidDecl(); 14909 HasInvalidParm = true; 14910 } 14911 14912 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14913 // declaration of each parameter shall include an identifier. 14914 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14915 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14916 // Diagnose this as an extension in C17 and earlier. 14917 if (!getLangOpts().C2x) 14918 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14919 } 14920 14921 // C99 6.7.5.3p12: 14922 // If the function declarator is not part of a definition of that 14923 // function, parameters may have incomplete type and may use the [*] 14924 // notation in their sequences of declarator specifiers to specify 14925 // variable length array types. 14926 QualType PType = Param->getOriginalType(); 14927 // FIXME: This diagnostic should point the '[*]' if source-location 14928 // information is added for it. 14929 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14930 14931 // If the parameter is a c++ class type and it has to be destructed in the 14932 // callee function, declare the destructor so that it can be called by the 14933 // callee function. Do not perform any direct access check on the dtor here. 14934 if (!Param->isInvalidDecl()) { 14935 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14936 if (!ClassDecl->isInvalidDecl() && 14937 !ClassDecl->hasIrrelevantDestructor() && 14938 !ClassDecl->isDependentContext() && 14939 ClassDecl->isParamDestroyedInCallee()) { 14940 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14941 MarkFunctionReferenced(Param->getLocation(), Destructor); 14942 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14943 } 14944 } 14945 } 14946 14947 // Parameters with the pass_object_size attribute only need to be marked 14948 // constant at function definitions. Because we lack information about 14949 // whether we're on a declaration or definition when we're instantiating the 14950 // attribute, we need to check for constness here. 14951 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14952 if (!Param->getType().isConstQualified()) 14953 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14954 << Attr->getSpelling() << 1; 14955 14956 // Check for parameter names shadowing fields from the class. 14957 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14958 // The owning context for the parameter should be the function, but we 14959 // want to see if this function's declaration context is a record. 14960 DeclContext *DC = Param->getDeclContext(); 14961 if (DC && DC->isFunctionOrMethod()) { 14962 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14963 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14964 RD, /*DeclIsField*/ false); 14965 } 14966 } 14967 } 14968 14969 return HasInvalidParm; 14970 } 14971 14972 Optional<std::pair<CharUnits, CharUnits>> 14973 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14974 14975 /// Compute the alignment and offset of the base class object given the 14976 /// derived-to-base cast expression and the alignment and offset of the derived 14977 /// class object. 14978 static std::pair<CharUnits, CharUnits> 14979 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14980 CharUnits BaseAlignment, CharUnits Offset, 14981 ASTContext &Ctx) { 14982 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14983 ++PathI) { 14984 const CXXBaseSpecifier *Base = *PathI; 14985 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14986 if (Base->isVirtual()) { 14987 // The complete object may have a lower alignment than the non-virtual 14988 // alignment of the base, in which case the base may be misaligned. Choose 14989 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14990 // conservative lower bound of the complete object alignment. 14991 CharUnits NonVirtualAlignment = 14992 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14993 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14994 Offset = CharUnits::Zero(); 14995 } else { 14996 const ASTRecordLayout &RL = 14997 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14998 Offset += RL.getBaseClassOffset(BaseDecl); 14999 } 15000 DerivedType = Base->getType(); 15001 } 15002 15003 return std::make_pair(BaseAlignment, Offset); 15004 } 15005 15006 /// Compute the alignment and offset of a binary additive operator. 15007 static Optional<std::pair<CharUnits, CharUnits>> 15008 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15009 bool IsSub, ASTContext &Ctx) { 15010 QualType PointeeType = PtrE->getType()->getPointeeType(); 15011 15012 if (!PointeeType->isConstantSizeType()) 15013 return llvm::None; 15014 15015 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15016 15017 if (!P) 15018 return llvm::None; 15019 15020 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15021 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15022 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15023 if (IsSub) 15024 Offset = -Offset; 15025 return std::make_pair(P->first, P->second + Offset); 15026 } 15027 15028 // If the integer expression isn't a constant expression, compute the lower 15029 // bound of the alignment using the alignment and offset of the pointer 15030 // expression and the element size. 15031 return std::make_pair( 15032 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15033 CharUnits::Zero()); 15034 } 15035 15036 /// This helper function takes an lvalue expression and returns the alignment of 15037 /// a VarDecl and a constant offset from the VarDecl. 15038 Optional<std::pair<CharUnits, CharUnits>> 15039 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15040 E = E->IgnoreParens(); 15041 switch (E->getStmtClass()) { 15042 default: 15043 break; 15044 case Stmt::CStyleCastExprClass: 15045 case Stmt::CXXStaticCastExprClass: 15046 case Stmt::ImplicitCastExprClass: { 15047 auto *CE = cast<CastExpr>(E); 15048 const Expr *From = CE->getSubExpr(); 15049 switch (CE->getCastKind()) { 15050 default: 15051 break; 15052 case CK_NoOp: 15053 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15054 case CK_UncheckedDerivedToBase: 15055 case CK_DerivedToBase: { 15056 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15057 if (!P) 15058 break; 15059 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15060 P->second, Ctx); 15061 } 15062 } 15063 break; 15064 } 15065 case Stmt::ArraySubscriptExprClass: { 15066 auto *ASE = cast<ArraySubscriptExpr>(E); 15067 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15068 false, Ctx); 15069 } 15070 case Stmt::DeclRefExprClass: { 15071 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15072 // FIXME: If VD is captured by copy or is an escaping __block variable, 15073 // use the alignment of VD's type. 15074 if (!VD->getType()->isReferenceType()) 15075 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15076 if (VD->hasInit()) 15077 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15078 } 15079 break; 15080 } 15081 case Stmt::MemberExprClass: { 15082 auto *ME = cast<MemberExpr>(E); 15083 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15084 if (!FD || FD->getType()->isReferenceType() || 15085 FD->getParent()->isInvalidDecl()) 15086 break; 15087 Optional<std::pair<CharUnits, CharUnits>> P; 15088 if (ME->isArrow()) 15089 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15090 else 15091 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15092 if (!P) 15093 break; 15094 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15095 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15096 return std::make_pair(P->first, 15097 P->second + CharUnits::fromQuantity(Offset)); 15098 } 15099 case Stmt::UnaryOperatorClass: { 15100 auto *UO = cast<UnaryOperator>(E); 15101 switch (UO->getOpcode()) { 15102 default: 15103 break; 15104 case UO_Deref: 15105 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15106 } 15107 break; 15108 } 15109 case Stmt::BinaryOperatorClass: { 15110 auto *BO = cast<BinaryOperator>(E); 15111 auto Opcode = BO->getOpcode(); 15112 switch (Opcode) { 15113 default: 15114 break; 15115 case BO_Comma: 15116 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15117 } 15118 break; 15119 } 15120 } 15121 return llvm::None; 15122 } 15123 15124 /// This helper function takes a pointer expression and returns the alignment of 15125 /// a VarDecl and a constant offset from the VarDecl. 15126 Optional<std::pair<CharUnits, CharUnits>> 15127 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15128 E = E->IgnoreParens(); 15129 switch (E->getStmtClass()) { 15130 default: 15131 break; 15132 case Stmt::CStyleCastExprClass: 15133 case Stmt::CXXStaticCastExprClass: 15134 case Stmt::ImplicitCastExprClass: { 15135 auto *CE = cast<CastExpr>(E); 15136 const Expr *From = CE->getSubExpr(); 15137 switch (CE->getCastKind()) { 15138 default: 15139 break; 15140 case CK_NoOp: 15141 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15142 case CK_ArrayToPointerDecay: 15143 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15144 case CK_UncheckedDerivedToBase: 15145 case CK_DerivedToBase: { 15146 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15147 if (!P) 15148 break; 15149 return getDerivedToBaseAlignmentAndOffset( 15150 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15151 } 15152 } 15153 break; 15154 } 15155 case Stmt::CXXThisExprClass: { 15156 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15157 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15158 return std::make_pair(Alignment, CharUnits::Zero()); 15159 } 15160 case Stmt::UnaryOperatorClass: { 15161 auto *UO = cast<UnaryOperator>(E); 15162 if (UO->getOpcode() == UO_AddrOf) 15163 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15164 break; 15165 } 15166 case Stmt::BinaryOperatorClass: { 15167 auto *BO = cast<BinaryOperator>(E); 15168 auto Opcode = BO->getOpcode(); 15169 switch (Opcode) { 15170 default: 15171 break; 15172 case BO_Add: 15173 case BO_Sub: { 15174 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15175 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15176 std::swap(LHS, RHS); 15177 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15178 Ctx); 15179 } 15180 case BO_Comma: 15181 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15182 } 15183 break; 15184 } 15185 } 15186 return llvm::None; 15187 } 15188 15189 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15190 // See if we can compute the alignment of a VarDecl and an offset from it. 15191 Optional<std::pair<CharUnits, CharUnits>> P = 15192 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15193 15194 if (P) 15195 return P->first.alignmentAtOffset(P->second); 15196 15197 // If that failed, return the type's alignment. 15198 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15199 } 15200 15201 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15202 /// pointer cast increases the alignment requirements. 15203 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15204 // This is actually a lot of work to potentially be doing on every 15205 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15206 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15207 return; 15208 15209 // Ignore dependent types. 15210 if (T->isDependentType() || Op->getType()->isDependentType()) 15211 return; 15212 15213 // Require that the destination be a pointer type. 15214 const PointerType *DestPtr = T->getAs<PointerType>(); 15215 if (!DestPtr) return; 15216 15217 // If the destination has alignment 1, we're done. 15218 QualType DestPointee = DestPtr->getPointeeType(); 15219 if (DestPointee->isIncompleteType()) return; 15220 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15221 if (DestAlign.isOne()) return; 15222 15223 // Require that the source be a pointer type. 15224 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15225 if (!SrcPtr) return; 15226 QualType SrcPointee = SrcPtr->getPointeeType(); 15227 15228 // Explicitly allow casts from cv void*. We already implicitly 15229 // allowed casts to cv void*, since they have alignment 1. 15230 // Also allow casts involving incomplete types, which implicitly 15231 // includes 'void'. 15232 if (SrcPointee->isIncompleteType()) return; 15233 15234 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15235 15236 if (SrcAlign >= DestAlign) return; 15237 15238 Diag(TRange.getBegin(), diag::warn_cast_align) 15239 << Op->getType() << T 15240 << static_cast<unsigned>(SrcAlign.getQuantity()) 15241 << static_cast<unsigned>(DestAlign.getQuantity()) 15242 << TRange << Op->getSourceRange(); 15243 } 15244 15245 /// Check whether this array fits the idiom of a size-one tail padded 15246 /// array member of a struct. 15247 /// 15248 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15249 /// commonly used to emulate flexible arrays in C89 code. 15250 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15251 const NamedDecl *ND) { 15252 if (Size != 1 || !ND) return false; 15253 15254 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15255 if (!FD) return false; 15256 15257 // Don't consider sizes resulting from macro expansions or template argument 15258 // substitution to form C89 tail-padded arrays. 15259 15260 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15261 while (TInfo) { 15262 TypeLoc TL = TInfo->getTypeLoc(); 15263 // Look through typedefs. 15264 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15265 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15266 TInfo = TDL->getTypeSourceInfo(); 15267 continue; 15268 } 15269 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15270 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15271 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15272 return false; 15273 } 15274 break; 15275 } 15276 15277 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15278 if (!RD) return false; 15279 if (RD->isUnion()) return false; 15280 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15281 if (!CRD->isStandardLayout()) return false; 15282 } 15283 15284 // See if this is the last field decl in the record. 15285 const Decl *D = FD; 15286 while ((D = D->getNextDeclInContext())) 15287 if (isa<FieldDecl>(D)) 15288 return false; 15289 return true; 15290 } 15291 15292 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15293 const ArraySubscriptExpr *ASE, 15294 bool AllowOnePastEnd, bool IndexNegated) { 15295 // Already diagnosed by the constant evaluator. 15296 if (isConstantEvaluated()) 15297 return; 15298 15299 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15300 if (IndexExpr->isValueDependent()) 15301 return; 15302 15303 const Type *EffectiveType = 15304 BaseExpr->getType()->getPointeeOrArrayElementType(); 15305 BaseExpr = BaseExpr->IgnoreParenCasts(); 15306 const ConstantArrayType *ArrayTy = 15307 Context.getAsConstantArrayType(BaseExpr->getType()); 15308 15309 const Type *BaseType = 15310 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15311 bool IsUnboundedArray = (BaseType == nullptr); 15312 if (EffectiveType->isDependentType() || 15313 (!IsUnboundedArray && BaseType->isDependentType())) 15314 return; 15315 15316 Expr::EvalResult Result; 15317 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15318 return; 15319 15320 llvm::APSInt index = Result.Val.getInt(); 15321 if (IndexNegated) { 15322 index.setIsUnsigned(false); 15323 index = -index; 15324 } 15325 15326 const NamedDecl *ND = nullptr; 15327 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15328 ND = DRE->getDecl(); 15329 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15330 ND = ME->getMemberDecl(); 15331 15332 if (IsUnboundedArray) { 15333 if (index.isUnsigned() || !index.isNegative()) { 15334 const auto &ASTC = getASTContext(); 15335 unsigned AddrBits = 15336 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15337 EffectiveType->getCanonicalTypeInternal())); 15338 if (index.getBitWidth() < AddrBits) 15339 index = index.zext(AddrBits); 15340 Optional<CharUnits> ElemCharUnits = 15341 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15342 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15343 // pointer) bounds-checking isn't meaningful. 15344 if (!ElemCharUnits) 15345 return; 15346 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15347 // If index has more active bits than address space, we already know 15348 // we have a bounds violation to warn about. Otherwise, compute 15349 // address of (index + 1)th element, and warn about bounds violation 15350 // only if that address exceeds address space. 15351 if (index.getActiveBits() <= AddrBits) { 15352 bool Overflow; 15353 llvm::APInt Product(index); 15354 Product += 1; 15355 Product = Product.umul_ov(ElemBytes, Overflow); 15356 if (!Overflow && Product.getActiveBits() <= AddrBits) 15357 return; 15358 } 15359 15360 // Need to compute max possible elements in address space, since that 15361 // is included in diag message. 15362 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15363 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15364 MaxElems += 1; 15365 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15366 MaxElems = MaxElems.udiv(ElemBytes); 15367 15368 unsigned DiagID = 15369 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15370 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15371 15372 // Diag message shows element size in bits and in "bytes" (platform- 15373 // dependent CharUnits) 15374 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15375 PDiag(DiagID) 15376 << toString(index, 10, true) << AddrBits 15377 << (unsigned)ASTC.toBits(*ElemCharUnits) 15378 << toString(ElemBytes, 10, false) 15379 << toString(MaxElems, 10, false) 15380 << (unsigned)MaxElems.getLimitedValue(~0U) 15381 << IndexExpr->getSourceRange()); 15382 15383 if (!ND) { 15384 // Try harder to find a NamedDecl to point at in the note. 15385 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15386 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15387 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15388 ND = DRE->getDecl(); 15389 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15390 ND = ME->getMemberDecl(); 15391 } 15392 15393 if (ND) 15394 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15395 PDiag(diag::note_array_declared_here) << ND); 15396 } 15397 return; 15398 } 15399 15400 if (index.isUnsigned() || !index.isNegative()) { 15401 // It is possible that the type of the base expression after 15402 // IgnoreParenCasts is incomplete, even though the type of the base 15403 // expression before IgnoreParenCasts is complete (see PR39746 for an 15404 // example). In this case we have no information about whether the array 15405 // access exceeds the array bounds. However we can still diagnose an array 15406 // access which precedes the array bounds. 15407 if (BaseType->isIncompleteType()) 15408 return; 15409 15410 llvm::APInt size = ArrayTy->getSize(); 15411 if (!size.isStrictlyPositive()) 15412 return; 15413 15414 if (BaseType != EffectiveType) { 15415 // Make sure we're comparing apples to apples when comparing index to size 15416 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15417 uint64_t array_typesize = Context.getTypeSize(BaseType); 15418 // Handle ptrarith_typesize being zero, such as when casting to void* 15419 if (!ptrarith_typesize) ptrarith_typesize = 1; 15420 if (ptrarith_typesize != array_typesize) { 15421 // There's a cast to a different size type involved 15422 uint64_t ratio = array_typesize / ptrarith_typesize; 15423 // TODO: Be smarter about handling cases where array_typesize is not a 15424 // multiple of ptrarith_typesize 15425 if (ptrarith_typesize * ratio == array_typesize) 15426 size *= llvm::APInt(size.getBitWidth(), ratio); 15427 } 15428 } 15429 15430 if (size.getBitWidth() > index.getBitWidth()) 15431 index = index.zext(size.getBitWidth()); 15432 else if (size.getBitWidth() < index.getBitWidth()) 15433 size = size.zext(index.getBitWidth()); 15434 15435 // For array subscripting the index must be less than size, but for pointer 15436 // arithmetic also allow the index (offset) to be equal to size since 15437 // computing the next address after the end of the array is legal and 15438 // commonly done e.g. in C++ iterators and range-based for loops. 15439 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15440 return; 15441 15442 // Also don't warn for arrays of size 1 which are members of some 15443 // structure. These are often used to approximate flexible arrays in C89 15444 // code. 15445 if (IsTailPaddedMemberArray(*this, size, ND)) 15446 return; 15447 15448 // Suppress the warning if the subscript expression (as identified by the 15449 // ']' location) and the index expression are both from macro expansions 15450 // within a system header. 15451 if (ASE) { 15452 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15453 ASE->getRBracketLoc()); 15454 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15455 SourceLocation IndexLoc = 15456 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15457 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15458 return; 15459 } 15460 } 15461 15462 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15463 : diag::warn_ptr_arith_exceeds_bounds; 15464 15465 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15466 PDiag(DiagID) << toString(index, 10, true) 15467 << toString(size, 10, true) 15468 << (unsigned)size.getLimitedValue(~0U) 15469 << IndexExpr->getSourceRange()); 15470 } else { 15471 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15472 if (!ASE) { 15473 DiagID = diag::warn_ptr_arith_precedes_bounds; 15474 if (index.isNegative()) index = -index; 15475 } 15476 15477 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15478 PDiag(DiagID) << toString(index, 10, true) 15479 << IndexExpr->getSourceRange()); 15480 } 15481 15482 if (!ND) { 15483 // Try harder to find a NamedDecl to point at in the note. 15484 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15485 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15486 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15487 ND = DRE->getDecl(); 15488 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15489 ND = ME->getMemberDecl(); 15490 } 15491 15492 if (ND) 15493 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15494 PDiag(diag::note_array_declared_here) << ND); 15495 } 15496 15497 void Sema::CheckArrayAccess(const Expr *expr) { 15498 int AllowOnePastEnd = 0; 15499 while (expr) { 15500 expr = expr->IgnoreParenImpCasts(); 15501 switch (expr->getStmtClass()) { 15502 case Stmt::ArraySubscriptExprClass: { 15503 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15504 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15505 AllowOnePastEnd > 0); 15506 expr = ASE->getBase(); 15507 break; 15508 } 15509 case Stmt::MemberExprClass: { 15510 expr = cast<MemberExpr>(expr)->getBase(); 15511 break; 15512 } 15513 case Stmt::OMPArraySectionExprClass: { 15514 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15515 if (ASE->getLowerBound()) 15516 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15517 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15518 return; 15519 } 15520 case Stmt::UnaryOperatorClass: { 15521 // Only unwrap the * and & unary operators 15522 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15523 expr = UO->getSubExpr(); 15524 switch (UO->getOpcode()) { 15525 case UO_AddrOf: 15526 AllowOnePastEnd++; 15527 break; 15528 case UO_Deref: 15529 AllowOnePastEnd--; 15530 break; 15531 default: 15532 return; 15533 } 15534 break; 15535 } 15536 case Stmt::ConditionalOperatorClass: { 15537 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15538 if (const Expr *lhs = cond->getLHS()) 15539 CheckArrayAccess(lhs); 15540 if (const Expr *rhs = cond->getRHS()) 15541 CheckArrayAccess(rhs); 15542 return; 15543 } 15544 case Stmt::CXXOperatorCallExprClass: { 15545 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15546 for (const auto *Arg : OCE->arguments()) 15547 CheckArrayAccess(Arg); 15548 return; 15549 } 15550 default: 15551 return; 15552 } 15553 } 15554 } 15555 15556 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15557 15558 namespace { 15559 15560 struct RetainCycleOwner { 15561 VarDecl *Variable = nullptr; 15562 SourceRange Range; 15563 SourceLocation Loc; 15564 bool Indirect = false; 15565 15566 RetainCycleOwner() = default; 15567 15568 void setLocsFrom(Expr *e) { 15569 Loc = e->getExprLoc(); 15570 Range = e->getSourceRange(); 15571 } 15572 }; 15573 15574 } // namespace 15575 15576 /// Consider whether capturing the given variable can possibly lead to 15577 /// a retain cycle. 15578 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15579 // In ARC, it's captured strongly iff the variable has __strong 15580 // lifetime. In MRR, it's captured strongly if the variable is 15581 // __block and has an appropriate type. 15582 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15583 return false; 15584 15585 owner.Variable = var; 15586 if (ref) 15587 owner.setLocsFrom(ref); 15588 return true; 15589 } 15590 15591 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15592 while (true) { 15593 e = e->IgnoreParens(); 15594 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15595 switch (cast->getCastKind()) { 15596 case CK_BitCast: 15597 case CK_LValueBitCast: 15598 case CK_LValueToRValue: 15599 case CK_ARCReclaimReturnedObject: 15600 e = cast->getSubExpr(); 15601 continue; 15602 15603 default: 15604 return false; 15605 } 15606 } 15607 15608 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15609 ObjCIvarDecl *ivar = ref->getDecl(); 15610 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15611 return false; 15612 15613 // Try to find a retain cycle in the base. 15614 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15615 return false; 15616 15617 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15618 owner.Indirect = true; 15619 return true; 15620 } 15621 15622 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15623 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15624 if (!var) return false; 15625 return considerVariable(var, ref, owner); 15626 } 15627 15628 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15629 if (member->isArrow()) return false; 15630 15631 // Don't count this as an indirect ownership. 15632 e = member->getBase(); 15633 continue; 15634 } 15635 15636 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15637 // Only pay attention to pseudo-objects on property references. 15638 ObjCPropertyRefExpr *pre 15639 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15640 ->IgnoreParens()); 15641 if (!pre) return false; 15642 if (pre->isImplicitProperty()) return false; 15643 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15644 if (!property->isRetaining() && 15645 !(property->getPropertyIvarDecl() && 15646 property->getPropertyIvarDecl()->getType() 15647 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15648 return false; 15649 15650 owner.Indirect = true; 15651 if (pre->isSuperReceiver()) { 15652 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15653 if (!owner.Variable) 15654 return false; 15655 owner.Loc = pre->getLocation(); 15656 owner.Range = pre->getSourceRange(); 15657 return true; 15658 } 15659 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15660 ->getSourceExpr()); 15661 continue; 15662 } 15663 15664 // Array ivars? 15665 15666 return false; 15667 } 15668 } 15669 15670 namespace { 15671 15672 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15673 ASTContext &Context; 15674 VarDecl *Variable; 15675 Expr *Capturer = nullptr; 15676 bool VarWillBeReased = false; 15677 15678 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15679 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15680 Context(Context), Variable(variable) {} 15681 15682 void VisitDeclRefExpr(DeclRefExpr *ref) { 15683 if (ref->getDecl() == Variable && !Capturer) 15684 Capturer = ref; 15685 } 15686 15687 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15688 if (Capturer) return; 15689 Visit(ref->getBase()); 15690 if (Capturer && ref->isFreeIvar()) 15691 Capturer = ref; 15692 } 15693 15694 void VisitBlockExpr(BlockExpr *block) { 15695 // Look inside nested blocks 15696 if (block->getBlockDecl()->capturesVariable(Variable)) 15697 Visit(block->getBlockDecl()->getBody()); 15698 } 15699 15700 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15701 if (Capturer) return; 15702 if (OVE->getSourceExpr()) 15703 Visit(OVE->getSourceExpr()); 15704 } 15705 15706 void VisitBinaryOperator(BinaryOperator *BinOp) { 15707 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15708 return; 15709 Expr *LHS = BinOp->getLHS(); 15710 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15711 if (DRE->getDecl() != Variable) 15712 return; 15713 if (Expr *RHS = BinOp->getRHS()) { 15714 RHS = RHS->IgnoreParenCasts(); 15715 Optional<llvm::APSInt> Value; 15716 VarWillBeReased = 15717 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15718 *Value == 0); 15719 } 15720 } 15721 } 15722 }; 15723 15724 } // namespace 15725 15726 /// Check whether the given argument is a block which captures a 15727 /// variable. 15728 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15729 assert(owner.Variable && owner.Loc.isValid()); 15730 15731 e = e->IgnoreParenCasts(); 15732 15733 // Look through [^{...} copy] and Block_copy(^{...}). 15734 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15735 Selector Cmd = ME->getSelector(); 15736 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15737 e = ME->getInstanceReceiver(); 15738 if (!e) 15739 return nullptr; 15740 e = e->IgnoreParenCasts(); 15741 } 15742 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15743 if (CE->getNumArgs() == 1) { 15744 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15745 if (Fn) { 15746 const IdentifierInfo *FnI = Fn->getIdentifier(); 15747 if (FnI && FnI->isStr("_Block_copy")) { 15748 e = CE->getArg(0)->IgnoreParenCasts(); 15749 } 15750 } 15751 } 15752 } 15753 15754 BlockExpr *block = dyn_cast<BlockExpr>(e); 15755 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15756 return nullptr; 15757 15758 FindCaptureVisitor visitor(S.Context, owner.Variable); 15759 visitor.Visit(block->getBlockDecl()->getBody()); 15760 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15761 } 15762 15763 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15764 RetainCycleOwner &owner) { 15765 assert(capturer); 15766 assert(owner.Variable && owner.Loc.isValid()); 15767 15768 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15769 << owner.Variable << capturer->getSourceRange(); 15770 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15771 << owner.Indirect << owner.Range; 15772 } 15773 15774 /// Check for a keyword selector that starts with the word 'add' or 15775 /// 'set'. 15776 static bool isSetterLikeSelector(Selector sel) { 15777 if (sel.isUnarySelector()) return false; 15778 15779 StringRef str = sel.getNameForSlot(0); 15780 while (!str.empty() && str.front() == '_') str = str.substr(1); 15781 if (str.startswith("set")) 15782 str = str.substr(3); 15783 else if (str.startswith("add")) { 15784 // Specially allow 'addOperationWithBlock:'. 15785 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15786 return false; 15787 str = str.substr(3); 15788 } 15789 else 15790 return false; 15791 15792 if (str.empty()) return true; 15793 return !isLowercase(str.front()); 15794 } 15795 15796 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15797 ObjCMessageExpr *Message) { 15798 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15799 Message->getReceiverInterface(), 15800 NSAPI::ClassId_NSMutableArray); 15801 if (!IsMutableArray) { 15802 return None; 15803 } 15804 15805 Selector Sel = Message->getSelector(); 15806 15807 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15808 S.NSAPIObj->getNSArrayMethodKind(Sel); 15809 if (!MKOpt) { 15810 return None; 15811 } 15812 15813 NSAPI::NSArrayMethodKind MK = *MKOpt; 15814 15815 switch (MK) { 15816 case NSAPI::NSMutableArr_addObject: 15817 case NSAPI::NSMutableArr_insertObjectAtIndex: 15818 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15819 return 0; 15820 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15821 return 1; 15822 15823 default: 15824 return None; 15825 } 15826 15827 return None; 15828 } 15829 15830 static 15831 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15832 ObjCMessageExpr *Message) { 15833 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15834 Message->getReceiverInterface(), 15835 NSAPI::ClassId_NSMutableDictionary); 15836 if (!IsMutableDictionary) { 15837 return None; 15838 } 15839 15840 Selector Sel = Message->getSelector(); 15841 15842 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15843 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15844 if (!MKOpt) { 15845 return None; 15846 } 15847 15848 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15849 15850 switch (MK) { 15851 case NSAPI::NSMutableDict_setObjectForKey: 15852 case NSAPI::NSMutableDict_setValueForKey: 15853 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15854 return 0; 15855 15856 default: 15857 return None; 15858 } 15859 15860 return None; 15861 } 15862 15863 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15864 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15865 Message->getReceiverInterface(), 15866 NSAPI::ClassId_NSMutableSet); 15867 15868 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15869 Message->getReceiverInterface(), 15870 NSAPI::ClassId_NSMutableOrderedSet); 15871 if (!IsMutableSet && !IsMutableOrderedSet) { 15872 return None; 15873 } 15874 15875 Selector Sel = Message->getSelector(); 15876 15877 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15878 if (!MKOpt) { 15879 return None; 15880 } 15881 15882 NSAPI::NSSetMethodKind MK = *MKOpt; 15883 15884 switch (MK) { 15885 case NSAPI::NSMutableSet_addObject: 15886 case NSAPI::NSOrderedSet_setObjectAtIndex: 15887 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15888 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15889 return 0; 15890 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15891 return 1; 15892 } 15893 15894 return None; 15895 } 15896 15897 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15898 if (!Message->isInstanceMessage()) { 15899 return; 15900 } 15901 15902 Optional<int> ArgOpt; 15903 15904 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15905 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15906 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15907 return; 15908 } 15909 15910 int ArgIndex = *ArgOpt; 15911 15912 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15913 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15914 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15915 } 15916 15917 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15918 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15919 if (ArgRE->isObjCSelfExpr()) { 15920 Diag(Message->getSourceRange().getBegin(), 15921 diag::warn_objc_circular_container) 15922 << ArgRE->getDecl() << StringRef("'super'"); 15923 } 15924 } 15925 } else { 15926 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15927 15928 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15929 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15930 } 15931 15932 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15933 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15934 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15935 ValueDecl *Decl = ReceiverRE->getDecl(); 15936 Diag(Message->getSourceRange().getBegin(), 15937 diag::warn_objc_circular_container) 15938 << Decl << Decl; 15939 if (!ArgRE->isObjCSelfExpr()) { 15940 Diag(Decl->getLocation(), 15941 diag::note_objc_circular_container_declared_here) 15942 << Decl; 15943 } 15944 } 15945 } 15946 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15947 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15948 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15949 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15950 Diag(Message->getSourceRange().getBegin(), 15951 diag::warn_objc_circular_container) 15952 << Decl << Decl; 15953 Diag(Decl->getLocation(), 15954 diag::note_objc_circular_container_declared_here) 15955 << Decl; 15956 } 15957 } 15958 } 15959 } 15960 } 15961 15962 /// Check a message send to see if it's likely to cause a retain cycle. 15963 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15964 // Only check instance methods whose selector looks like a setter. 15965 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15966 return; 15967 15968 // Try to find a variable that the receiver is strongly owned by. 15969 RetainCycleOwner owner; 15970 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15971 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15972 return; 15973 } else { 15974 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15975 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15976 owner.Loc = msg->getSuperLoc(); 15977 owner.Range = msg->getSuperLoc(); 15978 } 15979 15980 // Check whether the receiver is captured by any of the arguments. 15981 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15982 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15983 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15984 // noescape blocks should not be retained by the method. 15985 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15986 continue; 15987 return diagnoseRetainCycle(*this, capturer, owner); 15988 } 15989 } 15990 } 15991 15992 /// Check a property assign to see if it's likely to cause a retain cycle. 15993 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15994 RetainCycleOwner owner; 15995 if (!findRetainCycleOwner(*this, receiver, owner)) 15996 return; 15997 15998 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15999 diagnoseRetainCycle(*this, capturer, owner); 16000 } 16001 16002 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16003 RetainCycleOwner Owner; 16004 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16005 return; 16006 16007 // Because we don't have an expression for the variable, we have to set the 16008 // location explicitly here. 16009 Owner.Loc = Var->getLocation(); 16010 Owner.Range = Var->getSourceRange(); 16011 16012 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16013 diagnoseRetainCycle(*this, Capturer, Owner); 16014 } 16015 16016 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16017 Expr *RHS, bool isProperty) { 16018 // Check if RHS is an Objective-C object literal, which also can get 16019 // immediately zapped in a weak reference. Note that we explicitly 16020 // allow ObjCStringLiterals, since those are designed to never really die. 16021 RHS = RHS->IgnoreParenImpCasts(); 16022 16023 // This enum needs to match with the 'select' in 16024 // warn_objc_arc_literal_assign (off-by-1). 16025 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16026 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16027 return false; 16028 16029 S.Diag(Loc, diag::warn_arc_literal_assign) 16030 << (unsigned) Kind 16031 << (isProperty ? 0 : 1) 16032 << RHS->getSourceRange(); 16033 16034 return true; 16035 } 16036 16037 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16038 Qualifiers::ObjCLifetime LT, 16039 Expr *RHS, bool isProperty) { 16040 // Strip off any implicit cast added to get to the one ARC-specific. 16041 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16042 if (cast->getCastKind() == CK_ARCConsumeObject) { 16043 S.Diag(Loc, diag::warn_arc_retained_assign) 16044 << (LT == Qualifiers::OCL_ExplicitNone) 16045 << (isProperty ? 0 : 1) 16046 << RHS->getSourceRange(); 16047 return true; 16048 } 16049 RHS = cast->getSubExpr(); 16050 } 16051 16052 if (LT == Qualifiers::OCL_Weak && 16053 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16054 return true; 16055 16056 return false; 16057 } 16058 16059 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16060 QualType LHS, Expr *RHS) { 16061 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16062 16063 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16064 return false; 16065 16066 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16067 return true; 16068 16069 return false; 16070 } 16071 16072 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16073 Expr *LHS, Expr *RHS) { 16074 QualType LHSType; 16075 // PropertyRef on LHS type need be directly obtained from 16076 // its declaration as it has a PseudoType. 16077 ObjCPropertyRefExpr *PRE 16078 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16079 if (PRE && !PRE->isImplicitProperty()) { 16080 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16081 if (PD) 16082 LHSType = PD->getType(); 16083 } 16084 16085 if (LHSType.isNull()) 16086 LHSType = LHS->getType(); 16087 16088 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16089 16090 if (LT == Qualifiers::OCL_Weak) { 16091 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16092 getCurFunction()->markSafeWeakUse(LHS); 16093 } 16094 16095 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16096 return; 16097 16098 // FIXME. Check for other life times. 16099 if (LT != Qualifiers::OCL_None) 16100 return; 16101 16102 if (PRE) { 16103 if (PRE->isImplicitProperty()) 16104 return; 16105 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16106 if (!PD) 16107 return; 16108 16109 unsigned Attributes = PD->getPropertyAttributes(); 16110 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16111 // when 'assign' attribute was not explicitly specified 16112 // by user, ignore it and rely on property type itself 16113 // for lifetime info. 16114 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16115 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16116 LHSType->isObjCRetainableType()) 16117 return; 16118 16119 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16120 if (cast->getCastKind() == CK_ARCConsumeObject) { 16121 Diag(Loc, diag::warn_arc_retained_property_assign) 16122 << RHS->getSourceRange(); 16123 return; 16124 } 16125 RHS = cast->getSubExpr(); 16126 } 16127 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16128 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16129 return; 16130 } 16131 } 16132 } 16133 16134 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16135 16136 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16137 SourceLocation StmtLoc, 16138 const NullStmt *Body) { 16139 // Do not warn if the body is a macro that expands to nothing, e.g: 16140 // 16141 // #define CALL(x) 16142 // if (condition) 16143 // CALL(0); 16144 if (Body->hasLeadingEmptyMacro()) 16145 return false; 16146 16147 // Get line numbers of statement and body. 16148 bool StmtLineInvalid; 16149 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16150 &StmtLineInvalid); 16151 if (StmtLineInvalid) 16152 return false; 16153 16154 bool BodyLineInvalid; 16155 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16156 &BodyLineInvalid); 16157 if (BodyLineInvalid) 16158 return false; 16159 16160 // Warn if null statement and body are on the same line. 16161 if (StmtLine != BodyLine) 16162 return false; 16163 16164 return true; 16165 } 16166 16167 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16168 const Stmt *Body, 16169 unsigned DiagID) { 16170 // Since this is a syntactic check, don't emit diagnostic for template 16171 // instantiations, this just adds noise. 16172 if (CurrentInstantiationScope) 16173 return; 16174 16175 // The body should be a null statement. 16176 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16177 if (!NBody) 16178 return; 16179 16180 // Do the usual checks. 16181 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16182 return; 16183 16184 Diag(NBody->getSemiLoc(), DiagID); 16185 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16186 } 16187 16188 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16189 const Stmt *PossibleBody) { 16190 assert(!CurrentInstantiationScope); // Ensured by caller 16191 16192 SourceLocation StmtLoc; 16193 const Stmt *Body; 16194 unsigned DiagID; 16195 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16196 StmtLoc = FS->getRParenLoc(); 16197 Body = FS->getBody(); 16198 DiagID = diag::warn_empty_for_body; 16199 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16200 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16201 Body = WS->getBody(); 16202 DiagID = diag::warn_empty_while_body; 16203 } else 16204 return; // Neither `for' nor `while'. 16205 16206 // The body should be a null statement. 16207 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16208 if (!NBody) 16209 return; 16210 16211 // Skip expensive checks if diagnostic is disabled. 16212 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16213 return; 16214 16215 // Do the usual checks. 16216 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16217 return; 16218 16219 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16220 // noise level low, emit diagnostics only if for/while is followed by a 16221 // CompoundStmt, e.g.: 16222 // for (int i = 0; i < n; i++); 16223 // { 16224 // a(i); 16225 // } 16226 // or if for/while is followed by a statement with more indentation 16227 // than for/while itself: 16228 // for (int i = 0; i < n; i++); 16229 // a(i); 16230 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16231 if (!ProbableTypo) { 16232 bool BodyColInvalid; 16233 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16234 PossibleBody->getBeginLoc(), &BodyColInvalid); 16235 if (BodyColInvalid) 16236 return; 16237 16238 bool StmtColInvalid; 16239 unsigned StmtCol = 16240 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16241 if (StmtColInvalid) 16242 return; 16243 16244 if (BodyCol > StmtCol) 16245 ProbableTypo = true; 16246 } 16247 16248 if (ProbableTypo) { 16249 Diag(NBody->getSemiLoc(), DiagID); 16250 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16251 } 16252 } 16253 16254 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16255 16256 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16257 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16258 SourceLocation OpLoc) { 16259 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16260 return; 16261 16262 if (inTemplateInstantiation()) 16263 return; 16264 16265 // Strip parens and casts away. 16266 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16267 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16268 16269 // Check for a call expression 16270 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16271 if (!CE || CE->getNumArgs() != 1) 16272 return; 16273 16274 // Check for a call to std::move 16275 if (!CE->isCallToStdMove()) 16276 return; 16277 16278 // Get argument from std::move 16279 RHSExpr = CE->getArg(0); 16280 16281 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16282 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16283 16284 // Two DeclRefExpr's, check that the decls are the same. 16285 if (LHSDeclRef && RHSDeclRef) { 16286 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16287 return; 16288 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16289 RHSDeclRef->getDecl()->getCanonicalDecl()) 16290 return; 16291 16292 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16293 << LHSExpr->getSourceRange() 16294 << RHSExpr->getSourceRange(); 16295 return; 16296 } 16297 16298 // Member variables require a different approach to check for self moves. 16299 // MemberExpr's are the same if every nested MemberExpr refers to the same 16300 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16301 // the base Expr's are CXXThisExpr's. 16302 const Expr *LHSBase = LHSExpr; 16303 const Expr *RHSBase = RHSExpr; 16304 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16305 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16306 if (!LHSME || !RHSME) 16307 return; 16308 16309 while (LHSME && RHSME) { 16310 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16311 RHSME->getMemberDecl()->getCanonicalDecl()) 16312 return; 16313 16314 LHSBase = LHSME->getBase(); 16315 RHSBase = RHSME->getBase(); 16316 LHSME = dyn_cast<MemberExpr>(LHSBase); 16317 RHSME = dyn_cast<MemberExpr>(RHSBase); 16318 } 16319 16320 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16321 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16322 if (LHSDeclRef && RHSDeclRef) { 16323 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16324 return; 16325 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16326 RHSDeclRef->getDecl()->getCanonicalDecl()) 16327 return; 16328 16329 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16330 << LHSExpr->getSourceRange() 16331 << RHSExpr->getSourceRange(); 16332 return; 16333 } 16334 16335 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16336 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16337 << LHSExpr->getSourceRange() 16338 << RHSExpr->getSourceRange(); 16339 } 16340 16341 //===--- Layout compatibility ----------------------------------------------// 16342 16343 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16344 16345 /// Check if two enumeration types are layout-compatible. 16346 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16347 // C++11 [dcl.enum] p8: 16348 // Two enumeration types are layout-compatible if they have the same 16349 // underlying type. 16350 return ED1->isComplete() && ED2->isComplete() && 16351 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16352 } 16353 16354 /// Check if two fields are layout-compatible. 16355 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16356 FieldDecl *Field2) { 16357 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16358 return false; 16359 16360 if (Field1->isBitField() != Field2->isBitField()) 16361 return false; 16362 16363 if (Field1->isBitField()) { 16364 // Make sure that the bit-fields are the same length. 16365 unsigned Bits1 = Field1->getBitWidthValue(C); 16366 unsigned Bits2 = Field2->getBitWidthValue(C); 16367 16368 if (Bits1 != Bits2) 16369 return false; 16370 } 16371 16372 return true; 16373 } 16374 16375 /// Check if two standard-layout structs are layout-compatible. 16376 /// (C++11 [class.mem] p17) 16377 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16378 RecordDecl *RD2) { 16379 // If both records are C++ classes, check that base classes match. 16380 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16381 // If one of records is a CXXRecordDecl we are in C++ mode, 16382 // thus the other one is a CXXRecordDecl, too. 16383 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16384 // Check number of base classes. 16385 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16386 return false; 16387 16388 // Check the base classes. 16389 for (CXXRecordDecl::base_class_const_iterator 16390 Base1 = D1CXX->bases_begin(), 16391 BaseEnd1 = D1CXX->bases_end(), 16392 Base2 = D2CXX->bases_begin(); 16393 Base1 != BaseEnd1; 16394 ++Base1, ++Base2) { 16395 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16396 return false; 16397 } 16398 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16399 // If only RD2 is a C++ class, it should have zero base classes. 16400 if (D2CXX->getNumBases() > 0) 16401 return false; 16402 } 16403 16404 // Check the fields. 16405 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16406 Field2End = RD2->field_end(), 16407 Field1 = RD1->field_begin(), 16408 Field1End = RD1->field_end(); 16409 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16410 if (!isLayoutCompatible(C, *Field1, *Field2)) 16411 return false; 16412 } 16413 if (Field1 != Field1End || Field2 != Field2End) 16414 return false; 16415 16416 return true; 16417 } 16418 16419 /// Check if two standard-layout unions are layout-compatible. 16420 /// (C++11 [class.mem] p18) 16421 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16422 RecordDecl *RD2) { 16423 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16424 for (auto *Field2 : RD2->fields()) 16425 UnmatchedFields.insert(Field2); 16426 16427 for (auto *Field1 : RD1->fields()) { 16428 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16429 I = UnmatchedFields.begin(), 16430 E = UnmatchedFields.end(); 16431 16432 for ( ; I != E; ++I) { 16433 if (isLayoutCompatible(C, Field1, *I)) { 16434 bool Result = UnmatchedFields.erase(*I); 16435 (void) Result; 16436 assert(Result); 16437 break; 16438 } 16439 } 16440 if (I == E) 16441 return false; 16442 } 16443 16444 return UnmatchedFields.empty(); 16445 } 16446 16447 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16448 RecordDecl *RD2) { 16449 if (RD1->isUnion() != RD2->isUnion()) 16450 return false; 16451 16452 if (RD1->isUnion()) 16453 return isLayoutCompatibleUnion(C, RD1, RD2); 16454 else 16455 return isLayoutCompatibleStruct(C, RD1, RD2); 16456 } 16457 16458 /// Check if two types are layout-compatible in C++11 sense. 16459 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16460 if (T1.isNull() || T2.isNull()) 16461 return false; 16462 16463 // C++11 [basic.types] p11: 16464 // If two types T1 and T2 are the same type, then T1 and T2 are 16465 // layout-compatible types. 16466 if (C.hasSameType(T1, T2)) 16467 return true; 16468 16469 T1 = T1.getCanonicalType().getUnqualifiedType(); 16470 T2 = T2.getCanonicalType().getUnqualifiedType(); 16471 16472 const Type::TypeClass TC1 = T1->getTypeClass(); 16473 const Type::TypeClass TC2 = T2->getTypeClass(); 16474 16475 if (TC1 != TC2) 16476 return false; 16477 16478 if (TC1 == Type::Enum) { 16479 return isLayoutCompatible(C, 16480 cast<EnumType>(T1)->getDecl(), 16481 cast<EnumType>(T2)->getDecl()); 16482 } else if (TC1 == Type::Record) { 16483 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16484 return false; 16485 16486 return isLayoutCompatible(C, 16487 cast<RecordType>(T1)->getDecl(), 16488 cast<RecordType>(T2)->getDecl()); 16489 } 16490 16491 return false; 16492 } 16493 16494 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16495 16496 /// Given a type tag expression find the type tag itself. 16497 /// 16498 /// \param TypeExpr Type tag expression, as it appears in user's code. 16499 /// 16500 /// \param VD Declaration of an identifier that appears in a type tag. 16501 /// 16502 /// \param MagicValue Type tag magic value. 16503 /// 16504 /// \param isConstantEvaluated whether the evalaution should be performed in 16505 16506 /// constant context. 16507 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16508 const ValueDecl **VD, uint64_t *MagicValue, 16509 bool isConstantEvaluated) { 16510 while(true) { 16511 if (!TypeExpr) 16512 return false; 16513 16514 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16515 16516 switch (TypeExpr->getStmtClass()) { 16517 case Stmt::UnaryOperatorClass: { 16518 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16519 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16520 TypeExpr = UO->getSubExpr(); 16521 continue; 16522 } 16523 return false; 16524 } 16525 16526 case Stmt::DeclRefExprClass: { 16527 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16528 *VD = DRE->getDecl(); 16529 return true; 16530 } 16531 16532 case Stmt::IntegerLiteralClass: { 16533 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16534 llvm::APInt MagicValueAPInt = IL->getValue(); 16535 if (MagicValueAPInt.getActiveBits() <= 64) { 16536 *MagicValue = MagicValueAPInt.getZExtValue(); 16537 return true; 16538 } else 16539 return false; 16540 } 16541 16542 case Stmt::BinaryConditionalOperatorClass: 16543 case Stmt::ConditionalOperatorClass: { 16544 const AbstractConditionalOperator *ACO = 16545 cast<AbstractConditionalOperator>(TypeExpr); 16546 bool Result; 16547 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16548 isConstantEvaluated)) { 16549 if (Result) 16550 TypeExpr = ACO->getTrueExpr(); 16551 else 16552 TypeExpr = ACO->getFalseExpr(); 16553 continue; 16554 } 16555 return false; 16556 } 16557 16558 case Stmt::BinaryOperatorClass: { 16559 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16560 if (BO->getOpcode() == BO_Comma) { 16561 TypeExpr = BO->getRHS(); 16562 continue; 16563 } 16564 return false; 16565 } 16566 16567 default: 16568 return false; 16569 } 16570 } 16571 } 16572 16573 /// Retrieve the C type corresponding to type tag TypeExpr. 16574 /// 16575 /// \param TypeExpr Expression that specifies a type tag. 16576 /// 16577 /// \param MagicValues Registered magic values. 16578 /// 16579 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16580 /// kind. 16581 /// 16582 /// \param TypeInfo Information about the corresponding C type. 16583 /// 16584 /// \param isConstantEvaluated whether the evalaution should be performed in 16585 /// constant context. 16586 /// 16587 /// \returns true if the corresponding C type was found. 16588 static bool GetMatchingCType( 16589 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16590 const ASTContext &Ctx, 16591 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16592 *MagicValues, 16593 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16594 bool isConstantEvaluated) { 16595 FoundWrongKind = false; 16596 16597 // Variable declaration that has type_tag_for_datatype attribute. 16598 const ValueDecl *VD = nullptr; 16599 16600 uint64_t MagicValue; 16601 16602 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16603 return false; 16604 16605 if (VD) { 16606 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16607 if (I->getArgumentKind() != ArgumentKind) { 16608 FoundWrongKind = true; 16609 return false; 16610 } 16611 TypeInfo.Type = I->getMatchingCType(); 16612 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16613 TypeInfo.MustBeNull = I->getMustBeNull(); 16614 return true; 16615 } 16616 return false; 16617 } 16618 16619 if (!MagicValues) 16620 return false; 16621 16622 llvm::DenseMap<Sema::TypeTagMagicValue, 16623 Sema::TypeTagData>::const_iterator I = 16624 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16625 if (I == MagicValues->end()) 16626 return false; 16627 16628 TypeInfo = I->second; 16629 return true; 16630 } 16631 16632 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16633 uint64_t MagicValue, QualType Type, 16634 bool LayoutCompatible, 16635 bool MustBeNull) { 16636 if (!TypeTagForDatatypeMagicValues) 16637 TypeTagForDatatypeMagicValues.reset( 16638 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16639 16640 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16641 (*TypeTagForDatatypeMagicValues)[Magic] = 16642 TypeTagData(Type, LayoutCompatible, MustBeNull); 16643 } 16644 16645 static bool IsSameCharType(QualType T1, QualType T2) { 16646 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16647 if (!BT1) 16648 return false; 16649 16650 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16651 if (!BT2) 16652 return false; 16653 16654 BuiltinType::Kind T1Kind = BT1->getKind(); 16655 BuiltinType::Kind T2Kind = BT2->getKind(); 16656 16657 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16658 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16659 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16660 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16661 } 16662 16663 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16664 const ArrayRef<const Expr *> ExprArgs, 16665 SourceLocation CallSiteLoc) { 16666 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16667 bool IsPointerAttr = Attr->getIsPointer(); 16668 16669 // Retrieve the argument representing the 'type_tag'. 16670 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16671 if (TypeTagIdxAST >= ExprArgs.size()) { 16672 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16673 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16674 return; 16675 } 16676 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16677 bool FoundWrongKind; 16678 TypeTagData TypeInfo; 16679 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16680 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16681 TypeInfo, isConstantEvaluated())) { 16682 if (FoundWrongKind) 16683 Diag(TypeTagExpr->getExprLoc(), 16684 diag::warn_type_tag_for_datatype_wrong_kind) 16685 << TypeTagExpr->getSourceRange(); 16686 return; 16687 } 16688 16689 // Retrieve the argument representing the 'arg_idx'. 16690 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16691 if (ArgumentIdxAST >= ExprArgs.size()) { 16692 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16693 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16694 return; 16695 } 16696 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16697 if (IsPointerAttr) { 16698 // Skip implicit cast of pointer to `void *' (as a function argument). 16699 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16700 if (ICE->getType()->isVoidPointerType() && 16701 ICE->getCastKind() == CK_BitCast) 16702 ArgumentExpr = ICE->getSubExpr(); 16703 } 16704 QualType ArgumentType = ArgumentExpr->getType(); 16705 16706 // Passing a `void*' pointer shouldn't trigger a warning. 16707 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16708 return; 16709 16710 if (TypeInfo.MustBeNull) { 16711 // Type tag with matching void type requires a null pointer. 16712 if (!ArgumentExpr->isNullPointerConstant(Context, 16713 Expr::NPC_ValueDependentIsNotNull)) { 16714 Diag(ArgumentExpr->getExprLoc(), 16715 diag::warn_type_safety_null_pointer_required) 16716 << ArgumentKind->getName() 16717 << ArgumentExpr->getSourceRange() 16718 << TypeTagExpr->getSourceRange(); 16719 } 16720 return; 16721 } 16722 16723 QualType RequiredType = TypeInfo.Type; 16724 if (IsPointerAttr) 16725 RequiredType = Context.getPointerType(RequiredType); 16726 16727 bool mismatch = false; 16728 if (!TypeInfo.LayoutCompatible) { 16729 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16730 16731 // C++11 [basic.fundamental] p1: 16732 // Plain char, signed char, and unsigned char are three distinct types. 16733 // 16734 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16735 // char' depending on the current char signedness mode. 16736 if (mismatch) 16737 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16738 RequiredType->getPointeeType())) || 16739 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16740 mismatch = false; 16741 } else 16742 if (IsPointerAttr) 16743 mismatch = !isLayoutCompatible(Context, 16744 ArgumentType->getPointeeType(), 16745 RequiredType->getPointeeType()); 16746 else 16747 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16748 16749 if (mismatch) 16750 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16751 << ArgumentType << ArgumentKind 16752 << TypeInfo.LayoutCompatible << RequiredType 16753 << ArgumentExpr->getSourceRange() 16754 << TypeTagExpr->getSourceRange(); 16755 } 16756 16757 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16758 CharUnits Alignment) { 16759 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16760 } 16761 16762 void Sema::DiagnoseMisalignedMembers() { 16763 for (MisalignedMember &m : MisalignedMembers) { 16764 const NamedDecl *ND = m.RD; 16765 if (ND->getName().empty()) { 16766 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16767 ND = TD; 16768 } 16769 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16770 << m.MD << ND << m.E->getSourceRange(); 16771 } 16772 MisalignedMembers.clear(); 16773 } 16774 16775 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16776 E = E->IgnoreParens(); 16777 if (!T->isPointerType() && !T->isIntegerType()) 16778 return; 16779 if (isa<UnaryOperator>(E) && 16780 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16781 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16782 if (isa<MemberExpr>(Op)) { 16783 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16784 if (MA != MisalignedMembers.end() && 16785 (T->isIntegerType() || 16786 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16787 Context.getTypeAlignInChars( 16788 T->getPointeeType()) <= MA->Alignment)))) 16789 MisalignedMembers.erase(MA); 16790 } 16791 } 16792 } 16793 16794 void Sema::RefersToMemberWithReducedAlignment( 16795 Expr *E, 16796 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16797 Action) { 16798 const auto *ME = dyn_cast<MemberExpr>(E); 16799 if (!ME) 16800 return; 16801 16802 // No need to check expressions with an __unaligned-qualified type. 16803 if (E->getType().getQualifiers().hasUnaligned()) 16804 return; 16805 16806 // For a chain of MemberExpr like "a.b.c.d" this list 16807 // will keep FieldDecl's like [d, c, b]. 16808 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16809 const MemberExpr *TopME = nullptr; 16810 bool AnyIsPacked = false; 16811 do { 16812 QualType BaseType = ME->getBase()->getType(); 16813 if (BaseType->isDependentType()) 16814 return; 16815 if (ME->isArrow()) 16816 BaseType = BaseType->getPointeeType(); 16817 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16818 if (RD->isInvalidDecl()) 16819 return; 16820 16821 ValueDecl *MD = ME->getMemberDecl(); 16822 auto *FD = dyn_cast<FieldDecl>(MD); 16823 // We do not care about non-data members. 16824 if (!FD || FD->isInvalidDecl()) 16825 return; 16826 16827 AnyIsPacked = 16828 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16829 ReverseMemberChain.push_back(FD); 16830 16831 TopME = ME; 16832 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16833 } while (ME); 16834 assert(TopME && "We did not compute a topmost MemberExpr!"); 16835 16836 // Not the scope of this diagnostic. 16837 if (!AnyIsPacked) 16838 return; 16839 16840 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16841 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16842 // TODO: The innermost base of the member expression may be too complicated. 16843 // For now, just disregard these cases. This is left for future 16844 // improvement. 16845 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16846 return; 16847 16848 // Alignment expected by the whole expression. 16849 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16850 16851 // No need to do anything else with this case. 16852 if (ExpectedAlignment.isOne()) 16853 return; 16854 16855 // Synthesize offset of the whole access. 16856 CharUnits Offset; 16857 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 16858 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 16859 16860 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16861 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16862 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16863 16864 // The base expression of the innermost MemberExpr may give 16865 // stronger guarantees than the class containing the member. 16866 if (DRE && !TopME->isArrow()) { 16867 const ValueDecl *VD = DRE->getDecl(); 16868 if (!VD->getType()->isReferenceType()) 16869 CompleteObjectAlignment = 16870 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16871 } 16872 16873 // Check if the synthesized offset fulfills the alignment. 16874 if (Offset % ExpectedAlignment != 0 || 16875 // It may fulfill the offset it but the effective alignment may still be 16876 // lower than the expected expression alignment. 16877 CompleteObjectAlignment < ExpectedAlignment) { 16878 // If this happens, we want to determine a sensible culprit of this. 16879 // Intuitively, watching the chain of member expressions from right to 16880 // left, we start with the required alignment (as required by the field 16881 // type) but some packed attribute in that chain has reduced the alignment. 16882 // It may happen that another packed structure increases it again. But if 16883 // we are here such increase has not been enough. So pointing the first 16884 // FieldDecl that either is packed or else its RecordDecl is, 16885 // seems reasonable. 16886 FieldDecl *FD = nullptr; 16887 CharUnits Alignment; 16888 for (FieldDecl *FDI : ReverseMemberChain) { 16889 if (FDI->hasAttr<PackedAttr>() || 16890 FDI->getParent()->hasAttr<PackedAttr>()) { 16891 FD = FDI; 16892 Alignment = std::min( 16893 Context.getTypeAlignInChars(FD->getType()), 16894 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16895 break; 16896 } 16897 } 16898 assert(FD && "We did not find a packed FieldDecl!"); 16899 Action(E, FD->getParent(), FD, Alignment); 16900 } 16901 } 16902 16903 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16904 using namespace std::placeholders; 16905 16906 RefersToMemberWithReducedAlignment( 16907 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16908 _2, _3, _4)); 16909 } 16910 16911 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 16912 // not a valid type, emit an error message and return true. Otherwise return 16913 // false. 16914 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 16915 QualType Ty) { 16916 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 16917 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 16918 << 1 << /* vector, integer or float ty*/ 0 << Ty; 16919 return true; 16920 } 16921 return false; 16922 } 16923 16924 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 16925 if (checkArgCount(*this, TheCall, 1)) 16926 return true; 16927 16928 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16929 if (A.isInvalid()) 16930 return true; 16931 16932 TheCall->setArg(0, A.get()); 16933 QualType TyA = A.get()->getType(); 16934 16935 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16936 return true; 16937 16938 TheCall->setType(TyA); 16939 return false; 16940 } 16941 16942 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 16943 if (checkArgCount(*this, TheCall, 2)) 16944 return true; 16945 16946 ExprResult A = TheCall->getArg(0); 16947 ExprResult B = TheCall->getArg(1); 16948 // Do standard promotions between the two arguments, returning their common 16949 // type. 16950 QualType Res = 16951 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 16952 if (A.isInvalid() || B.isInvalid()) 16953 return true; 16954 16955 QualType TyA = A.get()->getType(); 16956 QualType TyB = B.get()->getType(); 16957 16958 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 16959 return Diag(A.get()->getBeginLoc(), 16960 diag::err_typecheck_call_different_arg_types) 16961 << TyA << TyB; 16962 16963 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16964 return true; 16965 16966 TheCall->setArg(0, A.get()); 16967 TheCall->setArg(1, B.get()); 16968 TheCall->setType(Res); 16969 return false; 16970 } 16971 16972 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 16973 if (checkArgCount(*this, TheCall, 1)) 16974 return true; 16975 16976 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16977 if (A.isInvalid()) 16978 return true; 16979 16980 TheCall->setArg(0, A.get()); 16981 return false; 16982 } 16983 16984 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16985 ExprResult CallResult) { 16986 if (checkArgCount(*this, TheCall, 1)) 16987 return ExprError(); 16988 16989 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16990 if (MatrixArg.isInvalid()) 16991 return MatrixArg; 16992 Expr *Matrix = MatrixArg.get(); 16993 16994 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16995 if (!MType) { 16996 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16997 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 16998 return ExprError(); 16999 } 17000 17001 // Create returned matrix type by swapping rows and columns of the argument 17002 // matrix type. 17003 QualType ResultType = Context.getConstantMatrixType( 17004 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17005 17006 // Change the return type to the type of the returned matrix. 17007 TheCall->setType(ResultType); 17008 17009 // Update call argument to use the possibly converted matrix argument. 17010 TheCall->setArg(0, Matrix); 17011 return CallResult; 17012 } 17013 17014 // Get and verify the matrix dimensions. 17015 static llvm::Optional<unsigned> 17016 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17017 SourceLocation ErrorPos; 17018 Optional<llvm::APSInt> Value = 17019 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17020 if (!Value) { 17021 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17022 << Name; 17023 return {}; 17024 } 17025 uint64_t Dim = Value->getZExtValue(); 17026 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17027 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17028 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17029 return {}; 17030 } 17031 return Dim; 17032 } 17033 17034 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17035 ExprResult CallResult) { 17036 if (!getLangOpts().MatrixTypes) { 17037 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17038 return ExprError(); 17039 } 17040 17041 if (checkArgCount(*this, TheCall, 4)) 17042 return ExprError(); 17043 17044 unsigned PtrArgIdx = 0; 17045 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17046 Expr *RowsExpr = TheCall->getArg(1); 17047 Expr *ColumnsExpr = TheCall->getArg(2); 17048 Expr *StrideExpr = TheCall->getArg(3); 17049 17050 bool ArgError = false; 17051 17052 // Check pointer argument. 17053 { 17054 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17055 if (PtrConv.isInvalid()) 17056 return PtrConv; 17057 PtrExpr = PtrConv.get(); 17058 TheCall->setArg(0, PtrExpr); 17059 if (PtrExpr->isTypeDependent()) { 17060 TheCall->setType(Context.DependentTy); 17061 return TheCall; 17062 } 17063 } 17064 17065 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17066 QualType ElementTy; 17067 if (!PtrTy) { 17068 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17069 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17070 ArgError = true; 17071 } else { 17072 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17073 17074 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17075 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17076 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17077 << PtrExpr->getType(); 17078 ArgError = true; 17079 } 17080 } 17081 17082 // Apply default Lvalue conversions and convert the expression to size_t. 17083 auto ApplyArgumentConversions = [this](Expr *E) { 17084 ExprResult Conv = DefaultLvalueConversion(E); 17085 if (Conv.isInvalid()) 17086 return Conv; 17087 17088 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17089 }; 17090 17091 // Apply conversion to row and column expressions. 17092 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17093 if (!RowsConv.isInvalid()) { 17094 RowsExpr = RowsConv.get(); 17095 TheCall->setArg(1, RowsExpr); 17096 } else 17097 RowsExpr = nullptr; 17098 17099 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17100 if (!ColumnsConv.isInvalid()) { 17101 ColumnsExpr = ColumnsConv.get(); 17102 TheCall->setArg(2, ColumnsExpr); 17103 } else 17104 ColumnsExpr = nullptr; 17105 17106 // If any any part of the result matrix type is still pending, just use 17107 // Context.DependentTy, until all parts are resolved. 17108 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17109 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17110 TheCall->setType(Context.DependentTy); 17111 return CallResult; 17112 } 17113 17114 // Check row and column dimensions. 17115 llvm::Optional<unsigned> MaybeRows; 17116 if (RowsExpr) 17117 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17118 17119 llvm::Optional<unsigned> MaybeColumns; 17120 if (ColumnsExpr) 17121 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17122 17123 // Check stride argument. 17124 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17125 if (StrideConv.isInvalid()) 17126 return ExprError(); 17127 StrideExpr = StrideConv.get(); 17128 TheCall->setArg(3, StrideExpr); 17129 17130 if (MaybeRows) { 17131 if (Optional<llvm::APSInt> Value = 17132 StrideExpr->getIntegerConstantExpr(Context)) { 17133 uint64_t Stride = Value->getZExtValue(); 17134 if (Stride < *MaybeRows) { 17135 Diag(StrideExpr->getBeginLoc(), 17136 diag::err_builtin_matrix_stride_too_small); 17137 ArgError = true; 17138 } 17139 } 17140 } 17141 17142 if (ArgError || !MaybeRows || !MaybeColumns) 17143 return ExprError(); 17144 17145 TheCall->setType( 17146 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17147 return CallResult; 17148 } 17149 17150 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17151 ExprResult CallResult) { 17152 if (checkArgCount(*this, TheCall, 3)) 17153 return ExprError(); 17154 17155 unsigned PtrArgIdx = 1; 17156 Expr *MatrixExpr = TheCall->getArg(0); 17157 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17158 Expr *StrideExpr = TheCall->getArg(2); 17159 17160 bool ArgError = false; 17161 17162 { 17163 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17164 if (MatrixConv.isInvalid()) 17165 return MatrixConv; 17166 MatrixExpr = MatrixConv.get(); 17167 TheCall->setArg(0, MatrixExpr); 17168 } 17169 if (MatrixExpr->isTypeDependent()) { 17170 TheCall->setType(Context.DependentTy); 17171 return TheCall; 17172 } 17173 17174 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17175 if (!MatrixTy) { 17176 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17177 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17178 ArgError = true; 17179 } 17180 17181 { 17182 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17183 if (PtrConv.isInvalid()) 17184 return PtrConv; 17185 PtrExpr = PtrConv.get(); 17186 TheCall->setArg(1, PtrExpr); 17187 if (PtrExpr->isTypeDependent()) { 17188 TheCall->setType(Context.DependentTy); 17189 return TheCall; 17190 } 17191 } 17192 17193 // Check pointer argument. 17194 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17195 if (!PtrTy) { 17196 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17197 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17198 ArgError = true; 17199 } else { 17200 QualType ElementTy = PtrTy->getPointeeType(); 17201 if (ElementTy.isConstQualified()) { 17202 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17203 ArgError = true; 17204 } 17205 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17206 if (MatrixTy && 17207 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17208 Diag(PtrExpr->getBeginLoc(), 17209 diag::err_builtin_matrix_pointer_arg_mismatch) 17210 << ElementTy << MatrixTy->getElementType(); 17211 ArgError = true; 17212 } 17213 } 17214 17215 // Apply default Lvalue conversions and convert the stride expression to 17216 // size_t. 17217 { 17218 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17219 if (StrideConv.isInvalid()) 17220 return StrideConv; 17221 17222 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17223 if (StrideConv.isInvalid()) 17224 return StrideConv; 17225 StrideExpr = StrideConv.get(); 17226 TheCall->setArg(2, StrideExpr); 17227 } 17228 17229 // Check stride argument. 17230 if (MatrixTy) { 17231 if (Optional<llvm::APSInt> Value = 17232 StrideExpr->getIntegerConstantExpr(Context)) { 17233 uint64_t Stride = Value->getZExtValue(); 17234 if (Stride < MatrixTy->getNumRows()) { 17235 Diag(StrideExpr->getBeginLoc(), 17236 diag::err_builtin_matrix_stride_too_small); 17237 ArgError = true; 17238 } 17239 } 17240 } 17241 17242 if (ArgError) 17243 return ExprError(); 17244 17245 return CallResult; 17246 } 17247 17248 /// \brief Enforce the bounds of a TCB 17249 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17250 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17251 /// and enforce_tcb_leaf attributes. 17252 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 17253 const FunctionDecl *Callee) { 17254 const FunctionDecl *Caller = getCurFunctionDecl(); 17255 17256 // Calls to builtins are not enforced. 17257 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 17258 Callee->getBuiltinID() != 0) 17259 return; 17260 17261 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17262 // all TCBs the callee is a part of. 17263 llvm::StringSet<> CalleeTCBs; 17264 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17265 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17266 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17267 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17268 17269 // Go through the TCBs the caller is a part of and emit warnings if Caller 17270 // is in a TCB that the Callee is not. 17271 for_each( 17272 Caller->specific_attrs<EnforceTCBAttr>(), 17273 [&](const auto *A) { 17274 StringRef CallerTCB = A->getTCBName(); 17275 if (CalleeTCBs.count(CallerTCB) == 0) { 17276 this->Diag(TheCall->getExprLoc(), 17277 diag::warn_tcb_enforcement_violation) << Callee 17278 << CallerTCB; 17279 } 17280 }); 17281 } 17282