1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cstddef> 95 #include <cstdint> 96 #include <functional> 97 #include <limits> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 using namespace sema; 104 105 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 106 unsigned ByteNo) const { 107 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 108 Context.getTargetInfo()); 109 } 110 111 /// Checks that a call expression's argument count is the desired number. 112 /// This is useful when doing custom type-checking. Returns true on error. 113 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 114 unsigned argCount = call->getNumArgs(); 115 if (argCount == desiredArgCount) return false; 116 117 if (argCount < desiredArgCount) 118 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 119 << 0 /*function call*/ << desiredArgCount << argCount 120 << call->getSourceRange(); 121 122 // Highlight all the excess arguments. 123 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 124 call->getArg(argCount - 1)->getEndLoc()); 125 126 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 127 << 0 /*function call*/ << desiredArgCount << argCount 128 << call->getArg(1)->getSourceRange(); 129 } 130 131 /// Check that the first argument to __builtin_annotation is an integer 132 /// and the second argument is a non-wide string literal. 133 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 134 if (checkArgCount(S, TheCall, 2)) 135 return true; 136 137 // First argument should be an integer. 138 Expr *ValArg = TheCall->getArg(0); 139 QualType Ty = ValArg->getType(); 140 if (!Ty->isIntegerType()) { 141 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 142 << ValArg->getSourceRange(); 143 return true; 144 } 145 146 // Second argument should be a constant string. 147 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 148 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 149 if (!Literal || !Literal->isAscii()) { 150 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 151 << StrArg->getSourceRange(); 152 return true; 153 } 154 155 TheCall->setType(Ty); 156 return false; 157 } 158 159 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 160 // We need at least one argument. 161 if (TheCall->getNumArgs() < 1) { 162 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 163 << 0 << 1 << TheCall->getNumArgs() 164 << TheCall->getCallee()->getSourceRange(); 165 return true; 166 } 167 168 // All arguments should be wide string literals. 169 for (Expr *Arg : TheCall->arguments()) { 170 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 171 if (!Literal || !Literal->isWide()) { 172 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 173 << Arg->getSourceRange(); 174 return true; 175 } 176 } 177 178 return false; 179 } 180 181 /// Check that the argument to __builtin_addressof is a glvalue, and set the 182 /// result type to the corresponding pointer type. 183 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 184 if (checkArgCount(S, TheCall, 1)) 185 return true; 186 187 ExprResult Arg(TheCall->getArg(0)); 188 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 189 if (ResultType.isNull()) 190 return true; 191 192 TheCall->setArg(0, Arg.get()); 193 TheCall->setType(ResultType); 194 return false; 195 } 196 197 /// Check the number of arguments and set the result type to 198 /// the argument type. 199 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 200 if (checkArgCount(S, TheCall, 1)) 201 return true; 202 203 TheCall->setType(TheCall->getArg(0)->getType()); 204 return false; 205 } 206 207 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 208 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 209 /// type (but not a function pointer) and that the alignment is a power-of-two. 210 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 211 if (checkArgCount(S, TheCall, 2)) 212 return true; 213 214 clang::Expr *Source = TheCall->getArg(0); 215 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 216 217 auto IsValidIntegerType = [](QualType Ty) { 218 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 219 }; 220 QualType SrcTy = Source->getType(); 221 // We should also be able to use it with arrays (but not functions!). 222 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 223 SrcTy = S.Context.getDecayedType(SrcTy); 224 } 225 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 226 SrcTy->isFunctionPointerType()) { 227 // FIXME: this is not quite the right error message since we don't allow 228 // floating point types, or member pointers. 229 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 230 << SrcTy; 231 return true; 232 } 233 234 clang::Expr *AlignOp = TheCall->getArg(1); 235 if (!IsValidIntegerType(AlignOp->getType())) { 236 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 237 << AlignOp->getType(); 238 return true; 239 } 240 Expr::EvalResult AlignResult; 241 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 242 // We can't check validity of alignment if it is value dependent. 243 if (!AlignOp->isValueDependent() && 244 AlignOp->EvaluateAsInt(AlignResult, S.Context, 245 Expr::SE_AllowSideEffects)) { 246 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 247 llvm::APSInt MaxValue( 248 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 249 if (AlignValue < 1) { 250 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 251 return true; 252 } 253 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 254 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 255 << MaxValue.toString(10); 256 return true; 257 } 258 if (!AlignValue.isPowerOf2()) { 259 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 260 return true; 261 } 262 if (AlignValue == 1) { 263 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 264 << IsBooleanAlignBuiltin; 265 } 266 } 267 268 ExprResult SrcArg = S.PerformCopyInitialization( 269 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 270 SourceLocation(), Source); 271 if (SrcArg.isInvalid()) 272 return true; 273 TheCall->setArg(0, SrcArg.get()); 274 ExprResult AlignArg = 275 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 276 S.Context, AlignOp->getType(), false), 277 SourceLocation(), AlignOp); 278 if (AlignArg.isInvalid()) 279 return true; 280 TheCall->setArg(1, AlignArg.get()); 281 // For align_up/align_down, the return type is the same as the (potentially 282 // decayed) argument type including qualifiers. For is_aligned(), the result 283 // is always bool. 284 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 285 return false; 286 } 287 288 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 289 unsigned BuiltinID) { 290 if (checkArgCount(S, TheCall, 3)) 291 return true; 292 293 // First two arguments should be integers. 294 for (unsigned I = 0; I < 2; ++I) { 295 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 296 if (Arg.isInvalid()) return true; 297 TheCall->setArg(I, Arg.get()); 298 299 QualType Ty = Arg.get()->getType(); 300 if (!Ty->isIntegerType()) { 301 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 302 << Ty << Arg.get()->getSourceRange(); 303 return true; 304 } 305 } 306 307 // Third argument should be a pointer to a non-const integer. 308 // IRGen correctly handles volatile, restrict, and address spaces, and 309 // the other qualifiers aren't possible. 310 { 311 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 312 if (Arg.isInvalid()) return true; 313 TheCall->setArg(2, Arg.get()); 314 315 QualType Ty = Arg.get()->getType(); 316 const auto *PtrTy = Ty->getAs<PointerType>(); 317 if (!PtrTy || 318 !PtrTy->getPointeeType()->isIntegerType() || 319 PtrTy->getPointeeType().isConstQualified()) { 320 S.Diag(Arg.get()->getBeginLoc(), 321 diag::err_overflow_builtin_must_be_ptr_int) 322 << Ty << Arg.get()->getSourceRange(); 323 return true; 324 } 325 } 326 327 // Disallow signed ExtIntType args larger than 128 bits to mul function until 328 // we improve backend support. 329 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 330 for (unsigned I = 0; I < 3; ++I) { 331 const auto Arg = TheCall->getArg(I); 332 // Third argument will be a pointer. 333 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 334 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 335 S.getASTContext().getIntWidth(Ty) > 128) 336 return S.Diag(Arg->getBeginLoc(), 337 diag::err_overflow_builtin_ext_int_max_size) 338 << 128; 339 } 340 } 341 342 return false; 343 } 344 345 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 346 if (checkArgCount(S, BuiltinCall, 2)) 347 return true; 348 349 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 350 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 351 Expr *Call = BuiltinCall->getArg(0); 352 Expr *Chain = BuiltinCall->getArg(1); 353 354 if (Call->getStmtClass() != Stmt::CallExprClass) { 355 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 356 << Call->getSourceRange(); 357 return true; 358 } 359 360 auto CE = cast<CallExpr>(Call); 361 if (CE->getCallee()->getType()->isBlockPointerType()) { 362 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 363 << Call->getSourceRange(); 364 return true; 365 } 366 367 const Decl *TargetDecl = CE->getCalleeDecl(); 368 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 369 if (FD->getBuiltinID()) { 370 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 371 << Call->getSourceRange(); 372 return true; 373 } 374 375 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 376 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 377 << Call->getSourceRange(); 378 return true; 379 } 380 381 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 382 if (ChainResult.isInvalid()) 383 return true; 384 if (!ChainResult.get()->getType()->isPointerType()) { 385 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 386 << Chain->getSourceRange(); 387 return true; 388 } 389 390 QualType ReturnTy = CE->getCallReturnType(S.Context); 391 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 392 QualType BuiltinTy = S.Context.getFunctionType( 393 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 394 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 395 396 Builtin = 397 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 398 399 BuiltinCall->setType(CE->getType()); 400 BuiltinCall->setValueKind(CE->getValueKind()); 401 BuiltinCall->setObjectKind(CE->getObjectKind()); 402 BuiltinCall->setCallee(Builtin); 403 BuiltinCall->setArg(1, ChainResult.get()); 404 405 return false; 406 } 407 408 namespace { 409 410 class EstimateSizeFormatHandler 411 : public analyze_format_string::FormatStringHandler { 412 size_t Size; 413 414 public: 415 EstimateSizeFormatHandler(StringRef Format) 416 : Size(std::min(Format.find(0), Format.size()) + 417 1 /* null byte always written by sprintf */) {} 418 419 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 420 const char *, unsigned SpecifierLen) override { 421 422 const size_t FieldWidth = computeFieldWidth(FS); 423 const size_t Precision = computePrecision(FS); 424 425 // The actual format. 426 switch (FS.getConversionSpecifier().getKind()) { 427 // Just a char. 428 case analyze_format_string::ConversionSpecifier::cArg: 429 case analyze_format_string::ConversionSpecifier::CArg: 430 Size += std::max(FieldWidth, (size_t)1); 431 break; 432 // Just an integer. 433 case analyze_format_string::ConversionSpecifier::dArg: 434 case analyze_format_string::ConversionSpecifier::DArg: 435 case analyze_format_string::ConversionSpecifier::iArg: 436 case analyze_format_string::ConversionSpecifier::oArg: 437 case analyze_format_string::ConversionSpecifier::OArg: 438 case analyze_format_string::ConversionSpecifier::uArg: 439 case analyze_format_string::ConversionSpecifier::UArg: 440 case analyze_format_string::ConversionSpecifier::xArg: 441 case analyze_format_string::ConversionSpecifier::XArg: 442 Size += std::max(FieldWidth, Precision); 443 break; 444 445 // %g style conversion switches between %f or %e style dynamically. 446 // %f always takes less space, so default to it. 447 case analyze_format_string::ConversionSpecifier::gArg: 448 case analyze_format_string::ConversionSpecifier::GArg: 449 450 // Floating point number in the form '[+]ddd.ddd'. 451 case analyze_format_string::ConversionSpecifier::fArg: 452 case analyze_format_string::ConversionSpecifier::FArg: 453 Size += std::max(FieldWidth, 1 /* integer part */ + 454 (Precision ? 1 + Precision 455 : 0) /* period + decimal */); 456 break; 457 458 // Floating point number in the form '[-]d.ddde[+-]dd'. 459 case analyze_format_string::ConversionSpecifier::eArg: 460 case analyze_format_string::ConversionSpecifier::EArg: 461 Size += 462 std::max(FieldWidth, 463 1 /* integer part */ + 464 (Precision ? 1 + Precision : 0) /* period + decimal */ + 465 1 /* e or E letter */ + 2 /* exponent */); 466 break; 467 468 // Floating point number in the form '[-]0xh.hhhhp±dd'. 469 case analyze_format_string::ConversionSpecifier::aArg: 470 case analyze_format_string::ConversionSpecifier::AArg: 471 Size += 472 std::max(FieldWidth, 473 2 /* 0x */ + 1 /* integer part */ + 474 (Precision ? 1 + Precision : 0) /* period + decimal */ + 475 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 476 break; 477 478 // Just a string. 479 case analyze_format_string::ConversionSpecifier::sArg: 480 case analyze_format_string::ConversionSpecifier::SArg: 481 Size += FieldWidth; 482 break; 483 484 // Just a pointer in the form '0xddd'. 485 case analyze_format_string::ConversionSpecifier::pArg: 486 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 487 break; 488 489 // A plain percent. 490 case analyze_format_string::ConversionSpecifier::PercentArg: 491 Size += 1; 492 break; 493 494 default: 495 break; 496 } 497 498 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 499 500 if (FS.hasAlternativeForm()) { 501 switch (FS.getConversionSpecifier().getKind()) { 502 default: 503 break; 504 // Force a leading '0'. 505 case analyze_format_string::ConversionSpecifier::oArg: 506 Size += 1; 507 break; 508 // Force a leading '0x'. 509 case analyze_format_string::ConversionSpecifier::xArg: 510 case analyze_format_string::ConversionSpecifier::XArg: 511 Size += 2; 512 break; 513 // Force a period '.' before decimal, even if precision is 0. 514 case analyze_format_string::ConversionSpecifier::aArg: 515 case analyze_format_string::ConversionSpecifier::AArg: 516 case analyze_format_string::ConversionSpecifier::eArg: 517 case analyze_format_string::ConversionSpecifier::EArg: 518 case analyze_format_string::ConversionSpecifier::fArg: 519 case analyze_format_string::ConversionSpecifier::FArg: 520 case analyze_format_string::ConversionSpecifier::gArg: 521 case analyze_format_string::ConversionSpecifier::GArg: 522 Size += (Precision ? 0 : 1); 523 break; 524 } 525 } 526 assert(SpecifierLen <= Size && "no underflow"); 527 Size -= SpecifierLen; 528 return true; 529 } 530 531 size_t getSizeLowerBound() const { return Size; } 532 533 private: 534 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 535 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 536 size_t FieldWidth = 0; 537 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 538 FieldWidth = FW.getConstantAmount(); 539 return FieldWidth; 540 } 541 542 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 543 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 544 size_t Precision = 0; 545 546 // See man 3 printf for default precision value based on the specifier. 547 switch (FW.getHowSpecified()) { 548 case analyze_format_string::OptionalAmount::NotSpecified: 549 switch (FS.getConversionSpecifier().getKind()) { 550 default: 551 break; 552 case analyze_format_string::ConversionSpecifier::dArg: // %d 553 case analyze_format_string::ConversionSpecifier::DArg: // %D 554 case analyze_format_string::ConversionSpecifier::iArg: // %i 555 Precision = 1; 556 break; 557 case analyze_format_string::ConversionSpecifier::oArg: // %d 558 case analyze_format_string::ConversionSpecifier::OArg: // %D 559 case analyze_format_string::ConversionSpecifier::uArg: // %d 560 case analyze_format_string::ConversionSpecifier::UArg: // %D 561 case analyze_format_string::ConversionSpecifier::xArg: // %d 562 case analyze_format_string::ConversionSpecifier::XArg: // %D 563 Precision = 1; 564 break; 565 case analyze_format_string::ConversionSpecifier::fArg: // %f 566 case analyze_format_string::ConversionSpecifier::FArg: // %F 567 case analyze_format_string::ConversionSpecifier::eArg: // %e 568 case analyze_format_string::ConversionSpecifier::EArg: // %E 569 case analyze_format_string::ConversionSpecifier::gArg: // %g 570 case analyze_format_string::ConversionSpecifier::GArg: // %G 571 Precision = 6; 572 break; 573 case analyze_format_string::ConversionSpecifier::pArg: // %d 574 Precision = 1; 575 break; 576 } 577 break; 578 case analyze_format_string::OptionalAmount::Constant: 579 Precision = FW.getConstantAmount(); 580 break; 581 default: 582 break; 583 } 584 return Precision; 585 } 586 }; 587 588 } // namespace 589 590 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 591 /// __builtin_*_chk function, then use the object size argument specified in the 592 /// source. Otherwise, infer the object size using __builtin_object_size. 593 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 594 CallExpr *TheCall) { 595 // FIXME: There are some more useful checks we could be doing here: 596 // - Evaluate strlen of strcpy arguments, use as object size. 597 598 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 599 isConstantEvaluated()) 600 return; 601 602 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 603 if (!BuiltinID) 604 return; 605 606 const TargetInfo &TI = getASTContext().getTargetInfo(); 607 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 608 609 unsigned DiagID = 0; 610 bool IsChkVariant = false; 611 Optional<llvm::APSInt> UsedSize; 612 unsigned SizeIndex, ObjectIndex; 613 switch (BuiltinID) { 614 default: 615 return; 616 case Builtin::BIsprintf: 617 case Builtin::BI__builtin___sprintf_chk: { 618 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 619 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 620 621 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 622 623 if (!Format->isAscii() && !Format->isUTF8()) 624 return; 625 626 StringRef FormatStrRef = Format->getString(); 627 EstimateSizeFormatHandler H(FormatStrRef); 628 const char *FormatBytes = FormatStrRef.data(); 629 const ConstantArrayType *T = 630 Context.getAsConstantArrayType(Format->getType()); 631 assert(T && "String literal not of constant array type!"); 632 size_t TypeSize = T->getSize().getZExtValue(); 633 634 // In case there's a null byte somewhere. 635 size_t StrLen = 636 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 637 if (!analyze_format_string::ParsePrintfString( 638 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 639 Context.getTargetInfo(), false)) { 640 DiagID = diag::warn_fortify_source_format_overflow; 641 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 642 .extOrTrunc(SizeTypeWidth); 643 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 644 IsChkVariant = true; 645 ObjectIndex = 2; 646 } else { 647 IsChkVariant = false; 648 ObjectIndex = 0; 649 } 650 break; 651 } 652 } 653 return; 654 } 655 case Builtin::BI__builtin___memcpy_chk: 656 case Builtin::BI__builtin___memmove_chk: 657 case Builtin::BI__builtin___memset_chk: 658 case Builtin::BI__builtin___strlcat_chk: 659 case Builtin::BI__builtin___strlcpy_chk: 660 case Builtin::BI__builtin___strncat_chk: 661 case Builtin::BI__builtin___strncpy_chk: 662 case Builtin::BI__builtin___stpncpy_chk: 663 case Builtin::BI__builtin___memccpy_chk: 664 case Builtin::BI__builtin___mempcpy_chk: { 665 DiagID = diag::warn_builtin_chk_overflow; 666 IsChkVariant = true; 667 SizeIndex = TheCall->getNumArgs() - 2; 668 ObjectIndex = TheCall->getNumArgs() - 1; 669 break; 670 } 671 672 case Builtin::BI__builtin___snprintf_chk: 673 case Builtin::BI__builtin___vsnprintf_chk: { 674 DiagID = diag::warn_builtin_chk_overflow; 675 IsChkVariant = true; 676 SizeIndex = 1; 677 ObjectIndex = 3; 678 break; 679 } 680 681 case Builtin::BIstrncat: 682 case Builtin::BI__builtin_strncat: 683 case Builtin::BIstrncpy: 684 case Builtin::BI__builtin_strncpy: 685 case Builtin::BIstpncpy: 686 case Builtin::BI__builtin_stpncpy: { 687 // Whether these functions overflow depends on the runtime strlen of the 688 // string, not just the buffer size, so emitting the "always overflow" 689 // diagnostic isn't quite right. We should still diagnose passing a buffer 690 // size larger than the destination buffer though; this is a runtime abort 691 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 692 DiagID = diag::warn_fortify_source_size_mismatch; 693 SizeIndex = TheCall->getNumArgs() - 1; 694 ObjectIndex = 0; 695 break; 696 } 697 698 case Builtin::BImemcpy: 699 case Builtin::BI__builtin_memcpy: 700 case Builtin::BImemmove: 701 case Builtin::BI__builtin_memmove: 702 case Builtin::BImemset: 703 case Builtin::BI__builtin_memset: 704 case Builtin::BImempcpy: 705 case Builtin::BI__builtin_mempcpy: { 706 DiagID = diag::warn_fortify_source_overflow; 707 SizeIndex = TheCall->getNumArgs() - 1; 708 ObjectIndex = 0; 709 break; 710 } 711 case Builtin::BIsnprintf: 712 case Builtin::BI__builtin_snprintf: 713 case Builtin::BIvsnprintf: 714 case Builtin::BI__builtin_vsnprintf: { 715 DiagID = diag::warn_fortify_source_size_mismatch; 716 SizeIndex = 1; 717 ObjectIndex = 0; 718 break; 719 } 720 } 721 722 llvm::APSInt ObjectSize; 723 // For __builtin___*_chk, the object size is explicitly provided by the caller 724 // (usually using __builtin_object_size). Use that value to check this call. 725 if (IsChkVariant) { 726 Expr::EvalResult Result; 727 Expr *SizeArg = TheCall->getArg(ObjectIndex); 728 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 729 return; 730 ObjectSize = Result.Val.getInt(); 731 732 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 733 } else { 734 // If the parameter has a pass_object_size attribute, then we should use its 735 // (potentially) more strict checking mode. Otherwise, conservatively assume 736 // type 0. 737 int BOSType = 0; 738 if (const auto *POS = 739 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 740 BOSType = POS->getType(); 741 742 Expr *ObjArg = TheCall->getArg(ObjectIndex); 743 uint64_t Result; 744 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 745 return; 746 // Get the object size in the target's size_t width. 747 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 748 } 749 750 // Evaluate the number of bytes of the object that this call will use. 751 if (!UsedSize) { 752 Expr::EvalResult Result; 753 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 754 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 755 return; 756 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 757 } 758 759 if (UsedSize.getValue().ule(ObjectSize)) 760 return; 761 762 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 763 // Skim off the details of whichever builtin was called to produce a better 764 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 765 if (IsChkVariant) { 766 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 767 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 768 } else if (FunctionName.startswith("__builtin_")) { 769 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 770 } 771 772 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 773 PDiag(DiagID) 774 << FunctionName << ObjectSize.toString(/*Radix=*/10) 775 << UsedSize.getValue().toString(/*Radix=*/10)); 776 } 777 778 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 779 Scope::ScopeFlags NeededScopeFlags, 780 unsigned DiagID) { 781 // Scopes aren't available during instantiation. Fortunately, builtin 782 // functions cannot be template args so they cannot be formed through template 783 // instantiation. Therefore checking once during the parse is sufficient. 784 if (SemaRef.inTemplateInstantiation()) 785 return false; 786 787 Scope *S = SemaRef.getCurScope(); 788 while (S && !S->isSEHExceptScope()) 789 S = S->getParent(); 790 if (!S || !(S->getFlags() & NeededScopeFlags)) { 791 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 792 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 793 << DRE->getDecl()->getIdentifier(); 794 return true; 795 } 796 797 return false; 798 } 799 800 static inline bool isBlockPointer(Expr *Arg) { 801 return Arg->getType()->isBlockPointerType(); 802 } 803 804 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 805 /// void*, which is a requirement of device side enqueue. 806 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 807 const BlockPointerType *BPT = 808 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 809 ArrayRef<QualType> Params = 810 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 811 unsigned ArgCounter = 0; 812 bool IllegalParams = false; 813 // Iterate through the block parameters until either one is found that is not 814 // a local void*, or the block is valid. 815 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 816 I != E; ++I, ++ArgCounter) { 817 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 818 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 819 LangAS::opencl_local) { 820 // Get the location of the error. If a block literal has been passed 821 // (BlockExpr) then we can point straight to the offending argument, 822 // else we just point to the variable reference. 823 SourceLocation ErrorLoc; 824 if (isa<BlockExpr>(BlockArg)) { 825 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 826 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 827 } else if (isa<DeclRefExpr>(BlockArg)) { 828 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 829 } 830 S.Diag(ErrorLoc, 831 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 832 IllegalParams = true; 833 } 834 } 835 836 return IllegalParams; 837 } 838 839 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 840 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 841 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 842 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 843 return true; 844 } 845 return false; 846 } 847 848 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 849 if (checkArgCount(S, TheCall, 2)) 850 return true; 851 852 if (checkOpenCLSubgroupExt(S, TheCall)) 853 return true; 854 855 // First argument is an ndrange_t type. 856 Expr *NDRangeArg = TheCall->getArg(0); 857 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 858 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 859 << TheCall->getDirectCallee() << "'ndrange_t'"; 860 return true; 861 } 862 863 Expr *BlockArg = TheCall->getArg(1); 864 if (!isBlockPointer(BlockArg)) { 865 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 866 << TheCall->getDirectCallee() << "block"; 867 return true; 868 } 869 return checkOpenCLBlockArgs(S, BlockArg); 870 } 871 872 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 873 /// get_kernel_work_group_size 874 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 875 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 876 if (checkArgCount(S, TheCall, 1)) 877 return true; 878 879 Expr *BlockArg = TheCall->getArg(0); 880 if (!isBlockPointer(BlockArg)) { 881 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 882 << TheCall->getDirectCallee() << "block"; 883 return true; 884 } 885 return checkOpenCLBlockArgs(S, BlockArg); 886 } 887 888 /// Diagnose integer type and any valid implicit conversion to it. 889 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 890 const QualType &IntType); 891 892 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 893 unsigned Start, unsigned End) { 894 bool IllegalParams = false; 895 for (unsigned I = Start; I <= End; ++I) 896 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 897 S.Context.getSizeType()); 898 return IllegalParams; 899 } 900 901 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 902 /// 'local void*' parameter of passed block. 903 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 904 Expr *BlockArg, 905 unsigned NumNonVarArgs) { 906 const BlockPointerType *BPT = 907 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 908 unsigned NumBlockParams = 909 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 910 unsigned TotalNumArgs = TheCall->getNumArgs(); 911 912 // For each argument passed to the block, a corresponding uint needs to 913 // be passed to describe the size of the local memory. 914 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 915 S.Diag(TheCall->getBeginLoc(), 916 diag::err_opencl_enqueue_kernel_local_size_args); 917 return true; 918 } 919 920 // Check that the sizes of the local memory are specified by integers. 921 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 922 TotalNumArgs - 1); 923 } 924 925 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 926 /// overload formats specified in Table 6.13.17.1. 927 /// int enqueue_kernel(queue_t queue, 928 /// kernel_enqueue_flags_t flags, 929 /// const ndrange_t ndrange, 930 /// void (^block)(void)) 931 /// int enqueue_kernel(queue_t queue, 932 /// kernel_enqueue_flags_t flags, 933 /// const ndrange_t ndrange, 934 /// uint num_events_in_wait_list, 935 /// clk_event_t *event_wait_list, 936 /// clk_event_t *event_ret, 937 /// void (^block)(void)) 938 /// int enqueue_kernel(queue_t queue, 939 /// kernel_enqueue_flags_t flags, 940 /// const ndrange_t ndrange, 941 /// void (^block)(local void*, ...), 942 /// uint size0, ...) 943 /// int enqueue_kernel(queue_t queue, 944 /// kernel_enqueue_flags_t flags, 945 /// const ndrange_t ndrange, 946 /// uint num_events_in_wait_list, 947 /// clk_event_t *event_wait_list, 948 /// clk_event_t *event_ret, 949 /// void (^block)(local void*, ...), 950 /// uint size0, ...) 951 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 952 unsigned NumArgs = TheCall->getNumArgs(); 953 954 if (NumArgs < 4) { 955 S.Diag(TheCall->getBeginLoc(), 956 diag::err_typecheck_call_too_few_args_at_least) 957 << 0 << 4 << NumArgs; 958 return true; 959 } 960 961 Expr *Arg0 = TheCall->getArg(0); 962 Expr *Arg1 = TheCall->getArg(1); 963 Expr *Arg2 = TheCall->getArg(2); 964 Expr *Arg3 = TheCall->getArg(3); 965 966 // First argument always needs to be a queue_t type. 967 if (!Arg0->getType()->isQueueT()) { 968 S.Diag(TheCall->getArg(0)->getBeginLoc(), 969 diag::err_opencl_builtin_expected_type) 970 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 971 return true; 972 } 973 974 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 975 if (!Arg1->getType()->isIntegerType()) { 976 S.Diag(TheCall->getArg(1)->getBeginLoc(), 977 diag::err_opencl_builtin_expected_type) 978 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 979 return true; 980 } 981 982 // Third argument is always an ndrange_t type. 983 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 984 S.Diag(TheCall->getArg(2)->getBeginLoc(), 985 diag::err_opencl_builtin_expected_type) 986 << TheCall->getDirectCallee() << "'ndrange_t'"; 987 return true; 988 } 989 990 // With four arguments, there is only one form that the function could be 991 // called in: no events and no variable arguments. 992 if (NumArgs == 4) { 993 // check that the last argument is the right block type. 994 if (!isBlockPointer(Arg3)) { 995 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 996 << TheCall->getDirectCallee() << "block"; 997 return true; 998 } 999 // we have a block type, check the prototype 1000 const BlockPointerType *BPT = 1001 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1002 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1003 S.Diag(Arg3->getBeginLoc(), 1004 diag::err_opencl_enqueue_kernel_blocks_no_args); 1005 return true; 1006 } 1007 return false; 1008 } 1009 // we can have block + varargs. 1010 if (isBlockPointer(Arg3)) 1011 return (checkOpenCLBlockArgs(S, Arg3) || 1012 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1013 // last two cases with either exactly 7 args or 7 args and varargs. 1014 if (NumArgs >= 7) { 1015 // check common block argument. 1016 Expr *Arg6 = TheCall->getArg(6); 1017 if (!isBlockPointer(Arg6)) { 1018 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1019 << TheCall->getDirectCallee() << "block"; 1020 return true; 1021 } 1022 if (checkOpenCLBlockArgs(S, Arg6)) 1023 return true; 1024 1025 // Forth argument has to be any integer type. 1026 if (!Arg3->getType()->isIntegerType()) { 1027 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1028 diag::err_opencl_builtin_expected_type) 1029 << TheCall->getDirectCallee() << "integer"; 1030 return true; 1031 } 1032 // check remaining common arguments. 1033 Expr *Arg4 = TheCall->getArg(4); 1034 Expr *Arg5 = TheCall->getArg(5); 1035 1036 // Fifth argument is always passed as a pointer to clk_event_t. 1037 if (!Arg4->isNullPointerConstant(S.Context, 1038 Expr::NPC_ValueDependentIsNotNull) && 1039 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1040 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1041 diag::err_opencl_builtin_expected_type) 1042 << TheCall->getDirectCallee() 1043 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1044 return true; 1045 } 1046 1047 // Sixth argument is always passed as a pointer to clk_event_t. 1048 if (!Arg5->isNullPointerConstant(S.Context, 1049 Expr::NPC_ValueDependentIsNotNull) && 1050 !(Arg5->getType()->isPointerType() && 1051 Arg5->getType()->getPointeeType()->isClkEventT())) { 1052 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1053 diag::err_opencl_builtin_expected_type) 1054 << TheCall->getDirectCallee() 1055 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1056 return true; 1057 } 1058 1059 if (NumArgs == 7) 1060 return false; 1061 1062 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1063 } 1064 1065 // None of the specific case has been detected, give generic error 1066 S.Diag(TheCall->getBeginLoc(), 1067 diag::err_opencl_enqueue_kernel_incorrect_args); 1068 return true; 1069 } 1070 1071 /// Returns OpenCL access qual. 1072 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1073 return D->getAttr<OpenCLAccessAttr>(); 1074 } 1075 1076 /// Returns true if pipe element type is different from the pointer. 1077 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1078 const Expr *Arg0 = Call->getArg(0); 1079 // First argument type should always be pipe. 1080 if (!Arg0->getType()->isPipeType()) { 1081 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1082 << Call->getDirectCallee() << Arg0->getSourceRange(); 1083 return true; 1084 } 1085 OpenCLAccessAttr *AccessQual = 1086 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1087 // Validates the access qualifier is compatible with the call. 1088 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1089 // read_only and write_only, and assumed to be read_only if no qualifier is 1090 // specified. 1091 switch (Call->getDirectCallee()->getBuiltinID()) { 1092 case Builtin::BIread_pipe: 1093 case Builtin::BIreserve_read_pipe: 1094 case Builtin::BIcommit_read_pipe: 1095 case Builtin::BIwork_group_reserve_read_pipe: 1096 case Builtin::BIsub_group_reserve_read_pipe: 1097 case Builtin::BIwork_group_commit_read_pipe: 1098 case Builtin::BIsub_group_commit_read_pipe: 1099 if (!(!AccessQual || AccessQual->isReadOnly())) { 1100 S.Diag(Arg0->getBeginLoc(), 1101 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1102 << "read_only" << Arg0->getSourceRange(); 1103 return true; 1104 } 1105 break; 1106 case Builtin::BIwrite_pipe: 1107 case Builtin::BIreserve_write_pipe: 1108 case Builtin::BIcommit_write_pipe: 1109 case Builtin::BIwork_group_reserve_write_pipe: 1110 case Builtin::BIsub_group_reserve_write_pipe: 1111 case Builtin::BIwork_group_commit_write_pipe: 1112 case Builtin::BIsub_group_commit_write_pipe: 1113 if (!(AccessQual && AccessQual->isWriteOnly())) { 1114 S.Diag(Arg0->getBeginLoc(), 1115 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1116 << "write_only" << Arg0->getSourceRange(); 1117 return true; 1118 } 1119 break; 1120 default: 1121 break; 1122 } 1123 return false; 1124 } 1125 1126 /// Returns true if pipe element type is different from the pointer. 1127 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1128 const Expr *Arg0 = Call->getArg(0); 1129 const Expr *ArgIdx = Call->getArg(Idx); 1130 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1131 const QualType EltTy = PipeTy->getElementType(); 1132 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1133 // The Idx argument should be a pointer and the type of the pointer and 1134 // the type of pipe element should also be the same. 1135 if (!ArgTy || 1136 !S.Context.hasSameType( 1137 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1138 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1139 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1140 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1141 return true; 1142 } 1143 return false; 1144 } 1145 1146 // Performs semantic analysis for the read/write_pipe call. 1147 // \param S Reference to the semantic analyzer. 1148 // \param Call A pointer to the builtin call. 1149 // \return True if a semantic error has been found, false otherwise. 1150 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1151 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1152 // functions have two forms. 1153 switch (Call->getNumArgs()) { 1154 case 2: 1155 if (checkOpenCLPipeArg(S, Call)) 1156 return true; 1157 // The call with 2 arguments should be 1158 // read/write_pipe(pipe T, T*). 1159 // Check packet type T. 1160 if (checkOpenCLPipePacketType(S, Call, 1)) 1161 return true; 1162 break; 1163 1164 case 4: { 1165 if (checkOpenCLPipeArg(S, Call)) 1166 return true; 1167 // The call with 4 arguments should be 1168 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1169 // Check reserve_id_t. 1170 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1171 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1172 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1173 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1174 return true; 1175 } 1176 1177 // Check the index. 1178 const Expr *Arg2 = Call->getArg(2); 1179 if (!Arg2->getType()->isIntegerType() && 1180 !Arg2->getType()->isUnsignedIntegerType()) { 1181 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1182 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1183 << Arg2->getType() << Arg2->getSourceRange(); 1184 return true; 1185 } 1186 1187 // Check packet type T. 1188 if (checkOpenCLPipePacketType(S, Call, 3)) 1189 return true; 1190 } break; 1191 default: 1192 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1193 << Call->getDirectCallee() << Call->getSourceRange(); 1194 return true; 1195 } 1196 1197 return false; 1198 } 1199 1200 // Performs a semantic analysis on the {work_group_/sub_group_ 1201 // /_}reserve_{read/write}_pipe 1202 // \param S Reference to the semantic analyzer. 1203 // \param Call The call to the builtin function to be analyzed. 1204 // \return True if a semantic error was found, false otherwise. 1205 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1206 if (checkArgCount(S, Call, 2)) 1207 return true; 1208 1209 if (checkOpenCLPipeArg(S, Call)) 1210 return true; 1211 1212 // Check the reserve size. 1213 if (!Call->getArg(1)->getType()->isIntegerType() && 1214 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1215 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1216 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1217 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1218 return true; 1219 } 1220 1221 // Since return type of reserve_read/write_pipe built-in function is 1222 // reserve_id_t, which is not defined in the builtin def file , we used int 1223 // as return type and need to override the return type of these functions. 1224 Call->setType(S.Context.OCLReserveIDTy); 1225 1226 return false; 1227 } 1228 1229 // Performs a semantic analysis on {work_group_/sub_group_ 1230 // /_}commit_{read/write}_pipe 1231 // \param S Reference to the semantic analyzer. 1232 // \param Call The call to the builtin function to be analyzed. 1233 // \return True if a semantic error was found, false otherwise. 1234 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1235 if (checkArgCount(S, Call, 2)) 1236 return true; 1237 1238 if (checkOpenCLPipeArg(S, Call)) 1239 return true; 1240 1241 // Check reserve_id_t. 1242 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1243 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1244 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1245 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1246 return true; 1247 } 1248 1249 return false; 1250 } 1251 1252 // Performs a semantic analysis on the call to built-in Pipe 1253 // Query Functions. 1254 // \param S Reference to the semantic analyzer. 1255 // \param Call The call to the builtin function to be analyzed. 1256 // \return True if a semantic error was found, false otherwise. 1257 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1258 if (checkArgCount(S, Call, 1)) 1259 return true; 1260 1261 if (!Call->getArg(0)->getType()->isPipeType()) { 1262 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1263 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1264 return true; 1265 } 1266 1267 return false; 1268 } 1269 1270 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1271 // Performs semantic analysis for the to_global/local/private call. 1272 // \param S Reference to the semantic analyzer. 1273 // \param BuiltinID ID of the builtin function. 1274 // \param Call A pointer to the builtin call. 1275 // \return True if a semantic error has been found, false otherwise. 1276 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1277 CallExpr *Call) { 1278 if (checkArgCount(S, Call, 1)) 1279 return true; 1280 1281 auto RT = Call->getArg(0)->getType(); 1282 if (!RT->isPointerType() || RT->getPointeeType() 1283 .getAddressSpace() == LangAS::opencl_constant) { 1284 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1285 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1286 return true; 1287 } 1288 1289 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1290 S.Diag(Call->getArg(0)->getBeginLoc(), 1291 diag::warn_opencl_generic_address_space_arg) 1292 << Call->getDirectCallee()->getNameInfo().getAsString() 1293 << Call->getArg(0)->getSourceRange(); 1294 } 1295 1296 RT = RT->getPointeeType(); 1297 auto Qual = RT.getQualifiers(); 1298 switch (BuiltinID) { 1299 case Builtin::BIto_global: 1300 Qual.setAddressSpace(LangAS::opencl_global); 1301 break; 1302 case Builtin::BIto_local: 1303 Qual.setAddressSpace(LangAS::opencl_local); 1304 break; 1305 case Builtin::BIto_private: 1306 Qual.setAddressSpace(LangAS::opencl_private); 1307 break; 1308 default: 1309 llvm_unreachable("Invalid builtin function"); 1310 } 1311 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1312 RT.getUnqualifiedType(), Qual))); 1313 1314 return false; 1315 } 1316 1317 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1318 if (checkArgCount(S, TheCall, 1)) 1319 return ExprError(); 1320 1321 // Compute __builtin_launder's parameter type from the argument. 1322 // The parameter type is: 1323 // * The type of the argument if it's not an array or function type, 1324 // Otherwise, 1325 // * The decayed argument type. 1326 QualType ParamTy = [&]() { 1327 QualType ArgTy = TheCall->getArg(0)->getType(); 1328 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1329 return S.Context.getPointerType(Ty->getElementType()); 1330 if (ArgTy->isFunctionType()) { 1331 return S.Context.getPointerType(ArgTy); 1332 } 1333 return ArgTy; 1334 }(); 1335 1336 TheCall->setType(ParamTy); 1337 1338 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1339 if (!ParamTy->isPointerType()) 1340 return 0; 1341 if (ParamTy->isFunctionPointerType()) 1342 return 1; 1343 if (ParamTy->isVoidPointerType()) 1344 return 2; 1345 return llvm::Optional<unsigned>{}; 1346 }(); 1347 if (DiagSelect.hasValue()) { 1348 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1349 << DiagSelect.getValue() << TheCall->getSourceRange(); 1350 return ExprError(); 1351 } 1352 1353 // We either have an incomplete class type, or we have a class template 1354 // whose instantiation has not been forced. Example: 1355 // 1356 // template <class T> struct Foo { T value; }; 1357 // Foo<int> *p = nullptr; 1358 // auto *d = __builtin_launder(p); 1359 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1360 diag::err_incomplete_type)) 1361 return ExprError(); 1362 1363 assert(ParamTy->getPointeeType()->isObjectType() && 1364 "Unhandled non-object pointer case"); 1365 1366 InitializedEntity Entity = 1367 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1368 ExprResult Arg = 1369 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1370 if (Arg.isInvalid()) 1371 return ExprError(); 1372 TheCall->setArg(0, Arg.get()); 1373 1374 return TheCall; 1375 } 1376 1377 // Emit an error and return true if the current architecture is not in the list 1378 // of supported architectures. 1379 static bool 1380 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1381 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1382 llvm::Triple::ArchType CurArch = 1383 S.getASTContext().getTargetInfo().getTriple().getArch(); 1384 if (llvm::is_contained(SupportedArchs, CurArch)) 1385 return false; 1386 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1387 << TheCall->getSourceRange(); 1388 return true; 1389 } 1390 1391 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1392 SourceLocation CallSiteLoc); 1393 1394 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1395 CallExpr *TheCall) { 1396 switch (TI.getTriple().getArch()) { 1397 default: 1398 // Some builtins don't require additional checking, so just consider these 1399 // acceptable. 1400 return false; 1401 case llvm::Triple::arm: 1402 case llvm::Triple::armeb: 1403 case llvm::Triple::thumb: 1404 case llvm::Triple::thumbeb: 1405 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1406 case llvm::Triple::aarch64: 1407 case llvm::Triple::aarch64_32: 1408 case llvm::Triple::aarch64_be: 1409 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1410 case llvm::Triple::bpfeb: 1411 case llvm::Triple::bpfel: 1412 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1413 case llvm::Triple::hexagon: 1414 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1415 case llvm::Triple::mips: 1416 case llvm::Triple::mipsel: 1417 case llvm::Triple::mips64: 1418 case llvm::Triple::mips64el: 1419 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1420 case llvm::Triple::systemz: 1421 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1422 case llvm::Triple::x86: 1423 case llvm::Triple::x86_64: 1424 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1425 case llvm::Triple::ppc: 1426 case llvm::Triple::ppcle: 1427 case llvm::Triple::ppc64: 1428 case llvm::Triple::ppc64le: 1429 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1430 case llvm::Triple::amdgcn: 1431 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1432 } 1433 } 1434 1435 ExprResult 1436 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1437 CallExpr *TheCall) { 1438 ExprResult TheCallResult(TheCall); 1439 1440 // Find out if any arguments are required to be integer constant expressions. 1441 unsigned ICEArguments = 0; 1442 ASTContext::GetBuiltinTypeError Error; 1443 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1444 if (Error != ASTContext::GE_None) 1445 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1446 1447 // If any arguments are required to be ICE's, check and diagnose. 1448 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1449 // Skip arguments not required to be ICE's. 1450 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1451 1452 llvm::APSInt Result; 1453 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1454 return true; 1455 ICEArguments &= ~(1 << ArgNo); 1456 } 1457 1458 switch (BuiltinID) { 1459 case Builtin::BI__builtin___CFStringMakeConstantString: 1460 assert(TheCall->getNumArgs() == 1 && 1461 "Wrong # arguments to builtin CFStringMakeConstantString"); 1462 if (CheckObjCString(TheCall->getArg(0))) 1463 return ExprError(); 1464 break; 1465 case Builtin::BI__builtin_ms_va_start: 1466 case Builtin::BI__builtin_stdarg_start: 1467 case Builtin::BI__builtin_va_start: 1468 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1469 return ExprError(); 1470 break; 1471 case Builtin::BI__va_start: { 1472 switch (Context.getTargetInfo().getTriple().getArch()) { 1473 case llvm::Triple::aarch64: 1474 case llvm::Triple::arm: 1475 case llvm::Triple::thumb: 1476 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1477 return ExprError(); 1478 break; 1479 default: 1480 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1481 return ExprError(); 1482 break; 1483 } 1484 break; 1485 } 1486 1487 // The acquire, release, and no fence variants are ARM and AArch64 only. 1488 case Builtin::BI_interlockedbittestandset_acq: 1489 case Builtin::BI_interlockedbittestandset_rel: 1490 case Builtin::BI_interlockedbittestandset_nf: 1491 case Builtin::BI_interlockedbittestandreset_acq: 1492 case Builtin::BI_interlockedbittestandreset_rel: 1493 case Builtin::BI_interlockedbittestandreset_nf: 1494 if (CheckBuiltinTargetSupport( 1495 *this, BuiltinID, TheCall, 1496 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1497 return ExprError(); 1498 break; 1499 1500 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1501 case Builtin::BI_bittest64: 1502 case Builtin::BI_bittestandcomplement64: 1503 case Builtin::BI_bittestandreset64: 1504 case Builtin::BI_bittestandset64: 1505 case Builtin::BI_interlockedbittestandreset64: 1506 case Builtin::BI_interlockedbittestandset64: 1507 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1508 {llvm::Triple::x86_64, llvm::Triple::arm, 1509 llvm::Triple::thumb, llvm::Triple::aarch64})) 1510 return ExprError(); 1511 break; 1512 1513 case Builtin::BI__builtin_isgreater: 1514 case Builtin::BI__builtin_isgreaterequal: 1515 case Builtin::BI__builtin_isless: 1516 case Builtin::BI__builtin_islessequal: 1517 case Builtin::BI__builtin_islessgreater: 1518 case Builtin::BI__builtin_isunordered: 1519 if (SemaBuiltinUnorderedCompare(TheCall)) 1520 return ExprError(); 1521 break; 1522 case Builtin::BI__builtin_fpclassify: 1523 if (SemaBuiltinFPClassification(TheCall, 6)) 1524 return ExprError(); 1525 break; 1526 case Builtin::BI__builtin_isfinite: 1527 case Builtin::BI__builtin_isinf: 1528 case Builtin::BI__builtin_isinf_sign: 1529 case Builtin::BI__builtin_isnan: 1530 case Builtin::BI__builtin_isnormal: 1531 case Builtin::BI__builtin_signbit: 1532 case Builtin::BI__builtin_signbitf: 1533 case Builtin::BI__builtin_signbitl: 1534 if (SemaBuiltinFPClassification(TheCall, 1)) 1535 return ExprError(); 1536 break; 1537 case Builtin::BI__builtin_shufflevector: 1538 return SemaBuiltinShuffleVector(TheCall); 1539 // TheCall will be freed by the smart pointer here, but that's fine, since 1540 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1541 case Builtin::BI__builtin_prefetch: 1542 if (SemaBuiltinPrefetch(TheCall)) 1543 return ExprError(); 1544 break; 1545 case Builtin::BI__builtin_alloca_with_align: 1546 if (SemaBuiltinAllocaWithAlign(TheCall)) 1547 return ExprError(); 1548 LLVM_FALLTHROUGH; 1549 case Builtin::BI__builtin_alloca: 1550 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1551 << TheCall->getDirectCallee(); 1552 break; 1553 case Builtin::BI__assume: 1554 case Builtin::BI__builtin_assume: 1555 if (SemaBuiltinAssume(TheCall)) 1556 return ExprError(); 1557 break; 1558 case Builtin::BI__builtin_assume_aligned: 1559 if (SemaBuiltinAssumeAligned(TheCall)) 1560 return ExprError(); 1561 break; 1562 case Builtin::BI__builtin_dynamic_object_size: 1563 case Builtin::BI__builtin_object_size: 1564 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1565 return ExprError(); 1566 break; 1567 case Builtin::BI__builtin_longjmp: 1568 if (SemaBuiltinLongjmp(TheCall)) 1569 return ExprError(); 1570 break; 1571 case Builtin::BI__builtin_setjmp: 1572 if (SemaBuiltinSetjmp(TheCall)) 1573 return ExprError(); 1574 break; 1575 case Builtin::BI__builtin_classify_type: 1576 if (checkArgCount(*this, TheCall, 1)) return true; 1577 TheCall->setType(Context.IntTy); 1578 break; 1579 case Builtin::BI__builtin_complex: 1580 if (SemaBuiltinComplex(TheCall)) 1581 return ExprError(); 1582 break; 1583 case Builtin::BI__builtin_constant_p: { 1584 if (checkArgCount(*this, TheCall, 1)) return true; 1585 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1586 if (Arg.isInvalid()) return true; 1587 TheCall->setArg(0, Arg.get()); 1588 TheCall->setType(Context.IntTy); 1589 break; 1590 } 1591 case Builtin::BI__builtin_launder: 1592 return SemaBuiltinLaunder(*this, TheCall); 1593 case Builtin::BI__sync_fetch_and_add: 1594 case Builtin::BI__sync_fetch_and_add_1: 1595 case Builtin::BI__sync_fetch_and_add_2: 1596 case Builtin::BI__sync_fetch_and_add_4: 1597 case Builtin::BI__sync_fetch_and_add_8: 1598 case Builtin::BI__sync_fetch_and_add_16: 1599 case Builtin::BI__sync_fetch_and_sub: 1600 case Builtin::BI__sync_fetch_and_sub_1: 1601 case Builtin::BI__sync_fetch_and_sub_2: 1602 case Builtin::BI__sync_fetch_and_sub_4: 1603 case Builtin::BI__sync_fetch_and_sub_8: 1604 case Builtin::BI__sync_fetch_and_sub_16: 1605 case Builtin::BI__sync_fetch_and_or: 1606 case Builtin::BI__sync_fetch_and_or_1: 1607 case Builtin::BI__sync_fetch_and_or_2: 1608 case Builtin::BI__sync_fetch_and_or_4: 1609 case Builtin::BI__sync_fetch_and_or_8: 1610 case Builtin::BI__sync_fetch_and_or_16: 1611 case Builtin::BI__sync_fetch_and_and: 1612 case Builtin::BI__sync_fetch_and_and_1: 1613 case Builtin::BI__sync_fetch_and_and_2: 1614 case Builtin::BI__sync_fetch_and_and_4: 1615 case Builtin::BI__sync_fetch_and_and_8: 1616 case Builtin::BI__sync_fetch_and_and_16: 1617 case Builtin::BI__sync_fetch_and_xor: 1618 case Builtin::BI__sync_fetch_and_xor_1: 1619 case Builtin::BI__sync_fetch_and_xor_2: 1620 case Builtin::BI__sync_fetch_and_xor_4: 1621 case Builtin::BI__sync_fetch_and_xor_8: 1622 case Builtin::BI__sync_fetch_and_xor_16: 1623 case Builtin::BI__sync_fetch_and_nand: 1624 case Builtin::BI__sync_fetch_and_nand_1: 1625 case Builtin::BI__sync_fetch_and_nand_2: 1626 case Builtin::BI__sync_fetch_and_nand_4: 1627 case Builtin::BI__sync_fetch_and_nand_8: 1628 case Builtin::BI__sync_fetch_and_nand_16: 1629 case Builtin::BI__sync_add_and_fetch: 1630 case Builtin::BI__sync_add_and_fetch_1: 1631 case Builtin::BI__sync_add_and_fetch_2: 1632 case Builtin::BI__sync_add_and_fetch_4: 1633 case Builtin::BI__sync_add_and_fetch_8: 1634 case Builtin::BI__sync_add_and_fetch_16: 1635 case Builtin::BI__sync_sub_and_fetch: 1636 case Builtin::BI__sync_sub_and_fetch_1: 1637 case Builtin::BI__sync_sub_and_fetch_2: 1638 case Builtin::BI__sync_sub_and_fetch_4: 1639 case Builtin::BI__sync_sub_and_fetch_8: 1640 case Builtin::BI__sync_sub_and_fetch_16: 1641 case Builtin::BI__sync_and_and_fetch: 1642 case Builtin::BI__sync_and_and_fetch_1: 1643 case Builtin::BI__sync_and_and_fetch_2: 1644 case Builtin::BI__sync_and_and_fetch_4: 1645 case Builtin::BI__sync_and_and_fetch_8: 1646 case Builtin::BI__sync_and_and_fetch_16: 1647 case Builtin::BI__sync_or_and_fetch: 1648 case Builtin::BI__sync_or_and_fetch_1: 1649 case Builtin::BI__sync_or_and_fetch_2: 1650 case Builtin::BI__sync_or_and_fetch_4: 1651 case Builtin::BI__sync_or_and_fetch_8: 1652 case Builtin::BI__sync_or_and_fetch_16: 1653 case Builtin::BI__sync_xor_and_fetch: 1654 case Builtin::BI__sync_xor_and_fetch_1: 1655 case Builtin::BI__sync_xor_and_fetch_2: 1656 case Builtin::BI__sync_xor_and_fetch_4: 1657 case Builtin::BI__sync_xor_and_fetch_8: 1658 case Builtin::BI__sync_xor_and_fetch_16: 1659 case Builtin::BI__sync_nand_and_fetch: 1660 case Builtin::BI__sync_nand_and_fetch_1: 1661 case Builtin::BI__sync_nand_and_fetch_2: 1662 case Builtin::BI__sync_nand_and_fetch_4: 1663 case Builtin::BI__sync_nand_and_fetch_8: 1664 case Builtin::BI__sync_nand_and_fetch_16: 1665 case Builtin::BI__sync_val_compare_and_swap: 1666 case Builtin::BI__sync_val_compare_and_swap_1: 1667 case Builtin::BI__sync_val_compare_and_swap_2: 1668 case Builtin::BI__sync_val_compare_and_swap_4: 1669 case Builtin::BI__sync_val_compare_and_swap_8: 1670 case Builtin::BI__sync_val_compare_and_swap_16: 1671 case Builtin::BI__sync_bool_compare_and_swap: 1672 case Builtin::BI__sync_bool_compare_and_swap_1: 1673 case Builtin::BI__sync_bool_compare_and_swap_2: 1674 case Builtin::BI__sync_bool_compare_and_swap_4: 1675 case Builtin::BI__sync_bool_compare_and_swap_8: 1676 case Builtin::BI__sync_bool_compare_and_swap_16: 1677 case Builtin::BI__sync_lock_test_and_set: 1678 case Builtin::BI__sync_lock_test_and_set_1: 1679 case Builtin::BI__sync_lock_test_and_set_2: 1680 case Builtin::BI__sync_lock_test_and_set_4: 1681 case Builtin::BI__sync_lock_test_and_set_8: 1682 case Builtin::BI__sync_lock_test_and_set_16: 1683 case Builtin::BI__sync_lock_release: 1684 case Builtin::BI__sync_lock_release_1: 1685 case Builtin::BI__sync_lock_release_2: 1686 case Builtin::BI__sync_lock_release_4: 1687 case Builtin::BI__sync_lock_release_8: 1688 case Builtin::BI__sync_lock_release_16: 1689 case Builtin::BI__sync_swap: 1690 case Builtin::BI__sync_swap_1: 1691 case Builtin::BI__sync_swap_2: 1692 case Builtin::BI__sync_swap_4: 1693 case Builtin::BI__sync_swap_8: 1694 case Builtin::BI__sync_swap_16: 1695 return SemaBuiltinAtomicOverloaded(TheCallResult); 1696 case Builtin::BI__sync_synchronize: 1697 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1698 << TheCall->getCallee()->getSourceRange(); 1699 break; 1700 case Builtin::BI__builtin_nontemporal_load: 1701 case Builtin::BI__builtin_nontemporal_store: 1702 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1703 case Builtin::BI__builtin_memcpy_inline: { 1704 clang::Expr *SizeOp = TheCall->getArg(2); 1705 // We warn about copying to or from `nullptr` pointers when `size` is 1706 // greater than 0. When `size` is value dependent we cannot evaluate its 1707 // value so we bail out. 1708 if (SizeOp->isValueDependent()) 1709 break; 1710 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1711 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1712 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1713 } 1714 break; 1715 } 1716 #define BUILTIN(ID, TYPE, ATTRS) 1717 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1718 case Builtin::BI##ID: \ 1719 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1720 #include "clang/Basic/Builtins.def" 1721 case Builtin::BI__annotation: 1722 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1723 return ExprError(); 1724 break; 1725 case Builtin::BI__builtin_annotation: 1726 if (SemaBuiltinAnnotation(*this, TheCall)) 1727 return ExprError(); 1728 break; 1729 case Builtin::BI__builtin_addressof: 1730 if (SemaBuiltinAddressof(*this, TheCall)) 1731 return ExprError(); 1732 break; 1733 case Builtin::BI__builtin_is_aligned: 1734 case Builtin::BI__builtin_align_up: 1735 case Builtin::BI__builtin_align_down: 1736 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1737 return ExprError(); 1738 break; 1739 case Builtin::BI__builtin_add_overflow: 1740 case Builtin::BI__builtin_sub_overflow: 1741 case Builtin::BI__builtin_mul_overflow: 1742 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1743 return ExprError(); 1744 break; 1745 case Builtin::BI__builtin_operator_new: 1746 case Builtin::BI__builtin_operator_delete: { 1747 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1748 ExprResult Res = 1749 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1750 if (Res.isInvalid()) 1751 CorrectDelayedTyposInExpr(TheCallResult.get()); 1752 return Res; 1753 } 1754 case Builtin::BI__builtin_dump_struct: { 1755 // We first want to ensure we are called with 2 arguments 1756 if (checkArgCount(*this, TheCall, 2)) 1757 return ExprError(); 1758 // Ensure that the first argument is of type 'struct XX *' 1759 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1760 const QualType PtrArgType = PtrArg->getType(); 1761 if (!PtrArgType->isPointerType() || 1762 !PtrArgType->getPointeeType()->isRecordType()) { 1763 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1764 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1765 << "structure pointer"; 1766 return ExprError(); 1767 } 1768 1769 // Ensure that the second argument is of type 'FunctionType' 1770 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1771 const QualType FnPtrArgType = FnPtrArg->getType(); 1772 if (!FnPtrArgType->isPointerType()) { 1773 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1774 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1775 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1776 return ExprError(); 1777 } 1778 1779 const auto *FuncType = 1780 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1781 1782 if (!FuncType) { 1783 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1784 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1785 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1786 return ExprError(); 1787 } 1788 1789 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1790 if (!FT->getNumParams()) { 1791 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1792 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1793 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1794 return ExprError(); 1795 } 1796 QualType PT = FT->getParamType(0); 1797 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1798 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1799 !PT->getPointeeType().isConstQualified()) { 1800 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1801 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1802 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1803 return ExprError(); 1804 } 1805 } 1806 1807 TheCall->setType(Context.IntTy); 1808 break; 1809 } 1810 case Builtin::BI__builtin_expect_with_probability: { 1811 // We first want to ensure we are called with 3 arguments 1812 if (checkArgCount(*this, TheCall, 3)) 1813 return ExprError(); 1814 // then check probability is constant float in range [0.0, 1.0] 1815 const Expr *ProbArg = TheCall->getArg(2); 1816 SmallVector<PartialDiagnosticAt, 8> Notes; 1817 Expr::EvalResult Eval; 1818 Eval.Diag = &Notes; 1819 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 1820 !Eval.Val.isFloat()) { 1821 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1822 << ProbArg->getSourceRange(); 1823 for (const PartialDiagnosticAt &PDiag : Notes) 1824 Diag(PDiag.first, PDiag.second); 1825 return ExprError(); 1826 } 1827 llvm::APFloat Probability = Eval.Val.getFloat(); 1828 bool LoseInfo = false; 1829 Probability.convert(llvm::APFloat::IEEEdouble(), 1830 llvm::RoundingMode::Dynamic, &LoseInfo); 1831 if (!(Probability >= llvm::APFloat(0.0) && 1832 Probability <= llvm::APFloat(1.0))) { 1833 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1834 << ProbArg->getSourceRange(); 1835 return ExprError(); 1836 } 1837 break; 1838 } 1839 case Builtin::BI__builtin_preserve_access_index: 1840 if (SemaBuiltinPreserveAI(*this, TheCall)) 1841 return ExprError(); 1842 break; 1843 case Builtin::BI__builtin_call_with_static_chain: 1844 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1845 return ExprError(); 1846 break; 1847 case Builtin::BI__exception_code: 1848 case Builtin::BI_exception_code: 1849 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1850 diag::err_seh___except_block)) 1851 return ExprError(); 1852 break; 1853 case Builtin::BI__exception_info: 1854 case Builtin::BI_exception_info: 1855 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1856 diag::err_seh___except_filter)) 1857 return ExprError(); 1858 break; 1859 case Builtin::BI__GetExceptionInfo: 1860 if (checkArgCount(*this, TheCall, 1)) 1861 return ExprError(); 1862 1863 if (CheckCXXThrowOperand( 1864 TheCall->getBeginLoc(), 1865 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1866 TheCall)) 1867 return ExprError(); 1868 1869 TheCall->setType(Context.VoidPtrTy); 1870 break; 1871 // OpenCL v2.0, s6.13.16 - Pipe functions 1872 case Builtin::BIread_pipe: 1873 case Builtin::BIwrite_pipe: 1874 // Since those two functions are declared with var args, we need a semantic 1875 // check for the argument. 1876 if (SemaBuiltinRWPipe(*this, TheCall)) 1877 return ExprError(); 1878 break; 1879 case Builtin::BIreserve_read_pipe: 1880 case Builtin::BIreserve_write_pipe: 1881 case Builtin::BIwork_group_reserve_read_pipe: 1882 case Builtin::BIwork_group_reserve_write_pipe: 1883 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1884 return ExprError(); 1885 break; 1886 case Builtin::BIsub_group_reserve_read_pipe: 1887 case Builtin::BIsub_group_reserve_write_pipe: 1888 if (checkOpenCLSubgroupExt(*this, TheCall) || 1889 SemaBuiltinReserveRWPipe(*this, TheCall)) 1890 return ExprError(); 1891 break; 1892 case Builtin::BIcommit_read_pipe: 1893 case Builtin::BIcommit_write_pipe: 1894 case Builtin::BIwork_group_commit_read_pipe: 1895 case Builtin::BIwork_group_commit_write_pipe: 1896 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1897 return ExprError(); 1898 break; 1899 case Builtin::BIsub_group_commit_read_pipe: 1900 case Builtin::BIsub_group_commit_write_pipe: 1901 if (checkOpenCLSubgroupExt(*this, TheCall) || 1902 SemaBuiltinCommitRWPipe(*this, TheCall)) 1903 return ExprError(); 1904 break; 1905 case Builtin::BIget_pipe_num_packets: 1906 case Builtin::BIget_pipe_max_packets: 1907 if (SemaBuiltinPipePackets(*this, TheCall)) 1908 return ExprError(); 1909 break; 1910 case Builtin::BIto_global: 1911 case Builtin::BIto_local: 1912 case Builtin::BIto_private: 1913 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1914 return ExprError(); 1915 break; 1916 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1917 case Builtin::BIenqueue_kernel: 1918 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1919 return ExprError(); 1920 break; 1921 case Builtin::BIget_kernel_work_group_size: 1922 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1923 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1924 return ExprError(); 1925 break; 1926 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1927 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1928 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1929 return ExprError(); 1930 break; 1931 case Builtin::BI__builtin_os_log_format: 1932 Cleanup.setExprNeedsCleanups(true); 1933 LLVM_FALLTHROUGH; 1934 case Builtin::BI__builtin_os_log_format_buffer_size: 1935 if (SemaBuiltinOSLogFormat(TheCall)) 1936 return ExprError(); 1937 break; 1938 case Builtin::BI__builtin_frame_address: 1939 case Builtin::BI__builtin_return_address: { 1940 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1941 return ExprError(); 1942 1943 // -Wframe-address warning if non-zero passed to builtin 1944 // return/frame address. 1945 Expr::EvalResult Result; 1946 if (!TheCall->getArg(0)->isValueDependent() && 1947 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1948 Result.Val.getInt() != 0) 1949 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1950 << ((BuiltinID == Builtin::BI__builtin_return_address) 1951 ? "__builtin_return_address" 1952 : "__builtin_frame_address") 1953 << TheCall->getSourceRange(); 1954 break; 1955 } 1956 1957 case Builtin::BI__builtin_matrix_transpose: 1958 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1959 1960 case Builtin::BI__builtin_matrix_column_major_load: 1961 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1962 1963 case Builtin::BI__builtin_matrix_column_major_store: 1964 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1965 } 1966 1967 // Since the target specific builtins for each arch overlap, only check those 1968 // of the arch we are compiling for. 1969 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1970 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1971 assert(Context.getAuxTargetInfo() && 1972 "Aux Target Builtin, but not an aux target?"); 1973 1974 if (CheckTSBuiltinFunctionCall( 1975 *Context.getAuxTargetInfo(), 1976 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 1977 return ExprError(); 1978 } else { 1979 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 1980 TheCall)) 1981 return ExprError(); 1982 } 1983 } 1984 1985 return TheCallResult; 1986 } 1987 1988 // Get the valid immediate range for the specified NEON type code. 1989 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1990 NeonTypeFlags Type(t); 1991 int IsQuad = ForceQuad ? true : Type.isQuad(); 1992 switch (Type.getEltType()) { 1993 case NeonTypeFlags::Int8: 1994 case NeonTypeFlags::Poly8: 1995 return shift ? 7 : (8 << IsQuad) - 1; 1996 case NeonTypeFlags::Int16: 1997 case NeonTypeFlags::Poly16: 1998 return shift ? 15 : (4 << IsQuad) - 1; 1999 case NeonTypeFlags::Int32: 2000 return shift ? 31 : (2 << IsQuad) - 1; 2001 case NeonTypeFlags::Int64: 2002 case NeonTypeFlags::Poly64: 2003 return shift ? 63 : (1 << IsQuad) - 1; 2004 case NeonTypeFlags::Poly128: 2005 return shift ? 127 : (1 << IsQuad) - 1; 2006 case NeonTypeFlags::Float16: 2007 assert(!shift && "cannot shift float types!"); 2008 return (4 << IsQuad) - 1; 2009 case NeonTypeFlags::Float32: 2010 assert(!shift && "cannot shift float types!"); 2011 return (2 << IsQuad) - 1; 2012 case NeonTypeFlags::Float64: 2013 assert(!shift && "cannot shift float types!"); 2014 return (1 << IsQuad) - 1; 2015 case NeonTypeFlags::BFloat16: 2016 assert(!shift && "cannot shift float types!"); 2017 return (4 << IsQuad) - 1; 2018 } 2019 llvm_unreachable("Invalid NeonTypeFlag!"); 2020 } 2021 2022 /// getNeonEltType - Return the QualType corresponding to the elements of 2023 /// the vector type specified by the NeonTypeFlags. This is used to check 2024 /// the pointer arguments for Neon load/store intrinsics. 2025 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2026 bool IsPolyUnsigned, bool IsInt64Long) { 2027 switch (Flags.getEltType()) { 2028 case NeonTypeFlags::Int8: 2029 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2030 case NeonTypeFlags::Int16: 2031 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2032 case NeonTypeFlags::Int32: 2033 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2034 case NeonTypeFlags::Int64: 2035 if (IsInt64Long) 2036 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2037 else 2038 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2039 : Context.LongLongTy; 2040 case NeonTypeFlags::Poly8: 2041 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2042 case NeonTypeFlags::Poly16: 2043 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2044 case NeonTypeFlags::Poly64: 2045 if (IsInt64Long) 2046 return Context.UnsignedLongTy; 2047 else 2048 return Context.UnsignedLongLongTy; 2049 case NeonTypeFlags::Poly128: 2050 break; 2051 case NeonTypeFlags::Float16: 2052 return Context.HalfTy; 2053 case NeonTypeFlags::Float32: 2054 return Context.FloatTy; 2055 case NeonTypeFlags::Float64: 2056 return Context.DoubleTy; 2057 case NeonTypeFlags::BFloat16: 2058 return Context.BFloat16Ty; 2059 } 2060 llvm_unreachable("Invalid NeonTypeFlag!"); 2061 } 2062 2063 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2064 // Range check SVE intrinsics that take immediate values. 2065 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2066 2067 switch (BuiltinID) { 2068 default: 2069 return false; 2070 #define GET_SVE_IMMEDIATE_CHECK 2071 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2072 #undef GET_SVE_IMMEDIATE_CHECK 2073 } 2074 2075 // Perform all the immediate checks for this builtin call. 2076 bool HasError = false; 2077 for (auto &I : ImmChecks) { 2078 int ArgNum, CheckTy, ElementSizeInBits; 2079 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2080 2081 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2082 2083 // Function that checks whether the operand (ArgNum) is an immediate 2084 // that is one of the predefined values. 2085 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2086 int ErrDiag) -> bool { 2087 // We can't check the value of a dependent argument. 2088 Expr *Arg = TheCall->getArg(ArgNum); 2089 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2090 return false; 2091 2092 // Check constant-ness first. 2093 llvm::APSInt Imm; 2094 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2095 return true; 2096 2097 if (!CheckImm(Imm.getSExtValue())) 2098 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2099 return false; 2100 }; 2101 2102 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2103 case SVETypeFlags::ImmCheck0_31: 2104 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2105 HasError = true; 2106 break; 2107 case SVETypeFlags::ImmCheck0_13: 2108 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2109 HasError = true; 2110 break; 2111 case SVETypeFlags::ImmCheck1_16: 2112 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2113 HasError = true; 2114 break; 2115 case SVETypeFlags::ImmCheck0_7: 2116 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2117 HasError = true; 2118 break; 2119 case SVETypeFlags::ImmCheckExtract: 2120 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2121 (2048 / ElementSizeInBits) - 1)) 2122 HasError = true; 2123 break; 2124 case SVETypeFlags::ImmCheckShiftRight: 2125 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2126 HasError = true; 2127 break; 2128 case SVETypeFlags::ImmCheckShiftRightNarrow: 2129 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2130 ElementSizeInBits / 2)) 2131 HasError = true; 2132 break; 2133 case SVETypeFlags::ImmCheckShiftLeft: 2134 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2135 ElementSizeInBits - 1)) 2136 HasError = true; 2137 break; 2138 case SVETypeFlags::ImmCheckLaneIndex: 2139 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2140 (128 / (1 * ElementSizeInBits)) - 1)) 2141 HasError = true; 2142 break; 2143 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2144 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2145 (128 / (2 * ElementSizeInBits)) - 1)) 2146 HasError = true; 2147 break; 2148 case SVETypeFlags::ImmCheckLaneIndexDot: 2149 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2150 (128 / (4 * ElementSizeInBits)) - 1)) 2151 HasError = true; 2152 break; 2153 case SVETypeFlags::ImmCheckComplexRot90_270: 2154 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2155 diag::err_rotation_argument_to_cadd)) 2156 HasError = true; 2157 break; 2158 case SVETypeFlags::ImmCheckComplexRotAll90: 2159 if (CheckImmediateInSet( 2160 [](int64_t V) { 2161 return V == 0 || V == 90 || V == 180 || V == 270; 2162 }, 2163 diag::err_rotation_argument_to_cmla)) 2164 HasError = true; 2165 break; 2166 case SVETypeFlags::ImmCheck0_1: 2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2168 HasError = true; 2169 break; 2170 case SVETypeFlags::ImmCheck0_2: 2171 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2172 HasError = true; 2173 break; 2174 case SVETypeFlags::ImmCheck0_3: 2175 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2176 HasError = true; 2177 break; 2178 } 2179 } 2180 2181 return HasError; 2182 } 2183 2184 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2185 unsigned BuiltinID, CallExpr *TheCall) { 2186 llvm::APSInt Result; 2187 uint64_t mask = 0; 2188 unsigned TV = 0; 2189 int PtrArgNum = -1; 2190 bool HasConstPtr = false; 2191 switch (BuiltinID) { 2192 #define GET_NEON_OVERLOAD_CHECK 2193 #include "clang/Basic/arm_neon.inc" 2194 #include "clang/Basic/arm_fp16.inc" 2195 #undef GET_NEON_OVERLOAD_CHECK 2196 } 2197 2198 // For NEON intrinsics which are overloaded on vector element type, validate 2199 // the immediate which specifies which variant to emit. 2200 unsigned ImmArg = TheCall->getNumArgs()-1; 2201 if (mask) { 2202 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2203 return true; 2204 2205 TV = Result.getLimitedValue(64); 2206 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2207 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2208 << TheCall->getArg(ImmArg)->getSourceRange(); 2209 } 2210 2211 if (PtrArgNum >= 0) { 2212 // Check that pointer arguments have the specified type. 2213 Expr *Arg = TheCall->getArg(PtrArgNum); 2214 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2215 Arg = ICE->getSubExpr(); 2216 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2217 QualType RHSTy = RHS.get()->getType(); 2218 2219 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2220 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2221 Arch == llvm::Triple::aarch64_32 || 2222 Arch == llvm::Triple::aarch64_be; 2223 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2224 QualType EltTy = 2225 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2226 if (HasConstPtr) 2227 EltTy = EltTy.withConst(); 2228 QualType LHSTy = Context.getPointerType(EltTy); 2229 AssignConvertType ConvTy; 2230 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2231 if (RHS.isInvalid()) 2232 return true; 2233 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2234 RHS.get(), AA_Assigning)) 2235 return true; 2236 } 2237 2238 // For NEON intrinsics which take an immediate value as part of the 2239 // instruction, range check them here. 2240 unsigned i = 0, l = 0, u = 0; 2241 switch (BuiltinID) { 2242 default: 2243 return false; 2244 #define GET_NEON_IMMEDIATE_CHECK 2245 #include "clang/Basic/arm_neon.inc" 2246 #include "clang/Basic/arm_fp16.inc" 2247 #undef GET_NEON_IMMEDIATE_CHECK 2248 } 2249 2250 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2251 } 2252 2253 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2254 switch (BuiltinID) { 2255 default: 2256 return false; 2257 #include "clang/Basic/arm_mve_builtin_sema.inc" 2258 } 2259 } 2260 2261 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2262 CallExpr *TheCall) { 2263 bool Err = false; 2264 switch (BuiltinID) { 2265 default: 2266 return false; 2267 #include "clang/Basic/arm_cde_builtin_sema.inc" 2268 } 2269 2270 if (Err) 2271 return true; 2272 2273 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2274 } 2275 2276 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2277 const Expr *CoprocArg, bool WantCDE) { 2278 if (isConstantEvaluated()) 2279 return false; 2280 2281 // We can't check the value of a dependent argument. 2282 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2283 return false; 2284 2285 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2286 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2287 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2288 2289 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2290 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2291 2292 if (IsCDECoproc != WantCDE) 2293 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2294 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2295 2296 return false; 2297 } 2298 2299 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2300 unsigned MaxWidth) { 2301 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2302 BuiltinID == ARM::BI__builtin_arm_ldaex || 2303 BuiltinID == ARM::BI__builtin_arm_strex || 2304 BuiltinID == ARM::BI__builtin_arm_stlex || 2305 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2306 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2307 BuiltinID == AArch64::BI__builtin_arm_strex || 2308 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2309 "unexpected ARM builtin"); 2310 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2311 BuiltinID == ARM::BI__builtin_arm_ldaex || 2312 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2313 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2314 2315 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2316 2317 // Ensure that we have the proper number of arguments. 2318 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2319 return true; 2320 2321 // Inspect the pointer argument of the atomic builtin. This should always be 2322 // a pointer type, whose element is an integral scalar or pointer type. 2323 // Because it is a pointer type, we don't have to worry about any implicit 2324 // casts here. 2325 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2326 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2327 if (PointerArgRes.isInvalid()) 2328 return true; 2329 PointerArg = PointerArgRes.get(); 2330 2331 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2332 if (!pointerType) { 2333 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2334 << PointerArg->getType() << PointerArg->getSourceRange(); 2335 return true; 2336 } 2337 2338 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2339 // task is to insert the appropriate casts into the AST. First work out just 2340 // what the appropriate type is. 2341 QualType ValType = pointerType->getPointeeType(); 2342 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2343 if (IsLdrex) 2344 AddrType.addConst(); 2345 2346 // Issue a warning if the cast is dodgy. 2347 CastKind CastNeeded = CK_NoOp; 2348 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2349 CastNeeded = CK_BitCast; 2350 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2351 << PointerArg->getType() << Context.getPointerType(AddrType) 2352 << AA_Passing << PointerArg->getSourceRange(); 2353 } 2354 2355 // Finally, do the cast and replace the argument with the corrected version. 2356 AddrType = Context.getPointerType(AddrType); 2357 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2358 if (PointerArgRes.isInvalid()) 2359 return true; 2360 PointerArg = PointerArgRes.get(); 2361 2362 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2363 2364 // In general, we allow ints, floats and pointers to be loaded and stored. 2365 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2366 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2367 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2368 << PointerArg->getType() << PointerArg->getSourceRange(); 2369 return true; 2370 } 2371 2372 // But ARM doesn't have instructions to deal with 128-bit versions. 2373 if (Context.getTypeSize(ValType) > MaxWidth) { 2374 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2375 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2376 << PointerArg->getType() << PointerArg->getSourceRange(); 2377 return true; 2378 } 2379 2380 switch (ValType.getObjCLifetime()) { 2381 case Qualifiers::OCL_None: 2382 case Qualifiers::OCL_ExplicitNone: 2383 // okay 2384 break; 2385 2386 case Qualifiers::OCL_Weak: 2387 case Qualifiers::OCL_Strong: 2388 case Qualifiers::OCL_Autoreleasing: 2389 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2390 << ValType << PointerArg->getSourceRange(); 2391 return true; 2392 } 2393 2394 if (IsLdrex) { 2395 TheCall->setType(ValType); 2396 return false; 2397 } 2398 2399 // Initialize the argument to be stored. 2400 ExprResult ValArg = TheCall->getArg(0); 2401 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2402 Context, ValType, /*consume*/ false); 2403 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2404 if (ValArg.isInvalid()) 2405 return true; 2406 TheCall->setArg(0, ValArg.get()); 2407 2408 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2409 // but the custom checker bypasses all default analysis. 2410 TheCall->setType(Context.IntTy); 2411 return false; 2412 } 2413 2414 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2415 CallExpr *TheCall) { 2416 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2417 BuiltinID == ARM::BI__builtin_arm_ldaex || 2418 BuiltinID == ARM::BI__builtin_arm_strex || 2419 BuiltinID == ARM::BI__builtin_arm_stlex) { 2420 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2421 } 2422 2423 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2424 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2425 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2426 } 2427 2428 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2429 BuiltinID == ARM::BI__builtin_arm_wsr64) 2430 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2431 2432 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2433 BuiltinID == ARM::BI__builtin_arm_rsrp || 2434 BuiltinID == ARM::BI__builtin_arm_wsr || 2435 BuiltinID == ARM::BI__builtin_arm_wsrp) 2436 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2437 2438 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2439 return true; 2440 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2441 return true; 2442 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2443 return true; 2444 2445 // For intrinsics which take an immediate value as part of the instruction, 2446 // range check them here. 2447 // FIXME: VFP Intrinsics should error if VFP not present. 2448 switch (BuiltinID) { 2449 default: return false; 2450 case ARM::BI__builtin_arm_ssat: 2451 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2452 case ARM::BI__builtin_arm_usat: 2453 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2454 case ARM::BI__builtin_arm_ssat16: 2455 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2456 case ARM::BI__builtin_arm_usat16: 2457 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2458 case ARM::BI__builtin_arm_vcvtr_f: 2459 case ARM::BI__builtin_arm_vcvtr_d: 2460 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2461 case ARM::BI__builtin_arm_dmb: 2462 case ARM::BI__builtin_arm_dsb: 2463 case ARM::BI__builtin_arm_isb: 2464 case ARM::BI__builtin_arm_dbg: 2465 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2466 case ARM::BI__builtin_arm_cdp: 2467 case ARM::BI__builtin_arm_cdp2: 2468 case ARM::BI__builtin_arm_mcr: 2469 case ARM::BI__builtin_arm_mcr2: 2470 case ARM::BI__builtin_arm_mrc: 2471 case ARM::BI__builtin_arm_mrc2: 2472 case ARM::BI__builtin_arm_mcrr: 2473 case ARM::BI__builtin_arm_mcrr2: 2474 case ARM::BI__builtin_arm_mrrc: 2475 case ARM::BI__builtin_arm_mrrc2: 2476 case ARM::BI__builtin_arm_ldc: 2477 case ARM::BI__builtin_arm_ldcl: 2478 case ARM::BI__builtin_arm_ldc2: 2479 case ARM::BI__builtin_arm_ldc2l: 2480 case ARM::BI__builtin_arm_stc: 2481 case ARM::BI__builtin_arm_stcl: 2482 case ARM::BI__builtin_arm_stc2: 2483 case ARM::BI__builtin_arm_stc2l: 2484 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2485 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2486 /*WantCDE*/ false); 2487 } 2488 } 2489 2490 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2491 unsigned BuiltinID, 2492 CallExpr *TheCall) { 2493 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2494 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2495 BuiltinID == AArch64::BI__builtin_arm_strex || 2496 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2497 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2498 } 2499 2500 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2501 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2502 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2503 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2504 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2505 } 2506 2507 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2508 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2509 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2510 2511 // Memory Tagging Extensions (MTE) Intrinsics 2512 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2513 BuiltinID == AArch64::BI__builtin_arm_addg || 2514 BuiltinID == AArch64::BI__builtin_arm_gmi || 2515 BuiltinID == AArch64::BI__builtin_arm_ldg || 2516 BuiltinID == AArch64::BI__builtin_arm_stg || 2517 BuiltinID == AArch64::BI__builtin_arm_subp) { 2518 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2519 } 2520 2521 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2522 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2523 BuiltinID == AArch64::BI__builtin_arm_wsr || 2524 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2525 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2526 2527 // Only check the valid encoding range. Any constant in this range would be 2528 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2529 // an exception for incorrect registers. This matches MSVC behavior. 2530 if (BuiltinID == AArch64::BI_ReadStatusReg || 2531 BuiltinID == AArch64::BI_WriteStatusReg) 2532 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2533 2534 if (BuiltinID == AArch64::BI__getReg) 2535 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2536 2537 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2538 return true; 2539 2540 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2541 return true; 2542 2543 // For intrinsics which take an immediate value as part of the instruction, 2544 // range check them here. 2545 unsigned i = 0, l = 0, u = 0; 2546 switch (BuiltinID) { 2547 default: return false; 2548 case AArch64::BI__builtin_arm_dmb: 2549 case AArch64::BI__builtin_arm_dsb: 2550 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2551 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2552 } 2553 2554 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2555 } 2556 2557 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2558 if (Arg->getType()->getAsPlaceholderType()) 2559 return false; 2560 2561 // The first argument needs to be a record field access. 2562 // If it is an array element access, we delay decision 2563 // to BPF backend to check whether the access is a 2564 // field access or not. 2565 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2566 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2567 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2568 } 2569 2570 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2571 QualType VectorTy, QualType EltTy) { 2572 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2573 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2574 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2575 << Call->getSourceRange() << VectorEltTy << EltTy; 2576 return false; 2577 } 2578 return true; 2579 } 2580 2581 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2582 QualType ArgType = Arg->getType(); 2583 if (ArgType->getAsPlaceholderType()) 2584 return false; 2585 2586 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2587 // format: 2588 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2589 // 2. <type> var; 2590 // __builtin_preserve_type_info(var, flag); 2591 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2592 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2593 return false; 2594 2595 // Typedef type. 2596 if (ArgType->getAs<TypedefType>()) 2597 return true; 2598 2599 // Record type or Enum type. 2600 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2601 if (const auto *RT = Ty->getAs<RecordType>()) { 2602 if (!RT->getDecl()->getDeclName().isEmpty()) 2603 return true; 2604 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2605 if (!ET->getDecl()->getDeclName().isEmpty()) 2606 return true; 2607 } 2608 2609 return false; 2610 } 2611 2612 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2613 QualType ArgType = Arg->getType(); 2614 if (ArgType->getAsPlaceholderType()) 2615 return false; 2616 2617 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2618 // format: 2619 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2620 // flag); 2621 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2622 if (!UO) 2623 return false; 2624 2625 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2626 if (!CE) 2627 return false; 2628 if (CE->getCastKind() != CK_IntegralToPointer && 2629 CE->getCastKind() != CK_NullToPointer) 2630 return false; 2631 2632 // The integer must be from an EnumConstantDecl. 2633 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2634 if (!DR) 2635 return false; 2636 2637 const EnumConstantDecl *Enumerator = 2638 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2639 if (!Enumerator) 2640 return false; 2641 2642 // The type must be EnumType. 2643 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2644 const auto *ET = Ty->getAs<EnumType>(); 2645 if (!ET) 2646 return false; 2647 2648 // The enum value must be supported. 2649 for (auto *EDI : ET->getDecl()->enumerators()) { 2650 if (EDI == Enumerator) 2651 return true; 2652 } 2653 2654 return false; 2655 } 2656 2657 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2658 CallExpr *TheCall) { 2659 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2660 BuiltinID == BPF::BI__builtin_btf_type_id || 2661 BuiltinID == BPF::BI__builtin_preserve_type_info || 2662 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2663 "unexpected BPF builtin"); 2664 2665 if (checkArgCount(*this, TheCall, 2)) 2666 return true; 2667 2668 // The second argument needs to be a constant int 2669 Expr *Arg = TheCall->getArg(1); 2670 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2671 diag::kind kind; 2672 if (!Value) { 2673 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2674 kind = diag::err_preserve_field_info_not_const; 2675 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2676 kind = diag::err_btf_type_id_not_const; 2677 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2678 kind = diag::err_preserve_type_info_not_const; 2679 else 2680 kind = diag::err_preserve_enum_value_not_const; 2681 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2682 return true; 2683 } 2684 2685 // The first argument 2686 Arg = TheCall->getArg(0); 2687 bool InvalidArg = false; 2688 bool ReturnUnsignedInt = true; 2689 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2690 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2691 InvalidArg = true; 2692 kind = diag::err_preserve_field_info_not_field; 2693 } 2694 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2695 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2696 InvalidArg = true; 2697 kind = diag::err_preserve_type_info_invalid; 2698 } 2699 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2700 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2701 InvalidArg = true; 2702 kind = diag::err_preserve_enum_value_invalid; 2703 } 2704 ReturnUnsignedInt = false; 2705 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2706 ReturnUnsignedInt = false; 2707 } 2708 2709 if (InvalidArg) { 2710 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2711 return true; 2712 } 2713 2714 if (ReturnUnsignedInt) 2715 TheCall->setType(Context.UnsignedIntTy); 2716 else 2717 TheCall->setType(Context.UnsignedLongTy); 2718 return false; 2719 } 2720 2721 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2722 struct ArgInfo { 2723 uint8_t OpNum; 2724 bool IsSigned; 2725 uint8_t BitWidth; 2726 uint8_t Align; 2727 }; 2728 struct BuiltinInfo { 2729 unsigned BuiltinID; 2730 ArgInfo Infos[2]; 2731 }; 2732 2733 static BuiltinInfo Infos[] = { 2734 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2735 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2736 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2737 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2738 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2739 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2740 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2741 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2742 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2743 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2744 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2745 2746 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2747 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2748 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2749 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2750 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2751 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2752 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2753 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2754 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2755 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2756 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2757 2758 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2759 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2760 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2761 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2762 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2763 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2764 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2765 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2766 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2777 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2810 {{ 1, false, 6, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2818 {{ 1, false, 5, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2825 { 2, false, 5, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2827 { 2, false, 6, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2829 { 3, false, 5, 0 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2831 { 3, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2848 {{ 2, false, 4, 0 }, 2849 { 3, false, 5, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2851 {{ 2, false, 4, 0 }, 2852 { 3, false, 5, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2854 {{ 2, false, 4, 0 }, 2855 { 3, false, 5, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2857 {{ 2, false, 4, 0 }, 2858 { 3, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2869 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2870 { 2, false, 5, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2872 { 2, false, 6, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2882 {{ 1, false, 4, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2885 {{ 1, false, 4, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2906 {{ 3, false, 1, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2911 {{ 3, false, 1, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2916 {{ 3, false, 1, 0 }} }, 2917 }; 2918 2919 // Use a dynamically initialized static to sort the table exactly once on 2920 // first run. 2921 static const bool SortOnce = 2922 (llvm::sort(Infos, 2923 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2924 return LHS.BuiltinID < RHS.BuiltinID; 2925 }), 2926 true); 2927 (void)SortOnce; 2928 2929 const BuiltinInfo *F = llvm::partition_point( 2930 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2931 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2932 return false; 2933 2934 bool Error = false; 2935 2936 for (const ArgInfo &A : F->Infos) { 2937 // Ignore empty ArgInfo elements. 2938 if (A.BitWidth == 0) 2939 continue; 2940 2941 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2942 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2943 if (!A.Align) { 2944 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2945 } else { 2946 unsigned M = 1 << A.Align; 2947 Min *= M; 2948 Max *= M; 2949 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2950 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2951 } 2952 } 2953 return Error; 2954 } 2955 2956 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2957 CallExpr *TheCall) { 2958 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2959 } 2960 2961 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2962 unsigned BuiltinID, CallExpr *TheCall) { 2963 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2964 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2965 } 2966 2967 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2968 CallExpr *TheCall) { 2969 2970 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2971 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2972 if (!TI.hasFeature("dsp")) 2973 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2974 } 2975 2976 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 2977 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 2978 if (!TI.hasFeature("dspr2")) 2979 return Diag(TheCall->getBeginLoc(), 2980 diag::err_mips_builtin_requires_dspr2); 2981 } 2982 2983 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 2984 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 2985 if (!TI.hasFeature("msa")) 2986 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 2987 } 2988 2989 return false; 2990 } 2991 2992 // CheckMipsBuiltinArgument - Checks the constant value passed to the 2993 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2994 // ordering for DSP is unspecified. MSA is ordered by the data format used 2995 // by the underlying instruction i.e., df/m, df/n and then by size. 2996 // 2997 // FIXME: The size tests here should instead be tablegen'd along with the 2998 // definitions from include/clang/Basic/BuiltinsMips.def. 2999 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3000 // be too. 3001 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3002 unsigned i = 0, l = 0, u = 0, m = 0; 3003 switch (BuiltinID) { 3004 default: return false; 3005 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3006 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3007 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3008 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3009 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3010 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3011 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3012 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3013 // df/m field. 3014 // These intrinsics take an unsigned 3 bit immediate. 3015 case Mips::BI__builtin_msa_bclri_b: 3016 case Mips::BI__builtin_msa_bnegi_b: 3017 case Mips::BI__builtin_msa_bseti_b: 3018 case Mips::BI__builtin_msa_sat_s_b: 3019 case Mips::BI__builtin_msa_sat_u_b: 3020 case Mips::BI__builtin_msa_slli_b: 3021 case Mips::BI__builtin_msa_srai_b: 3022 case Mips::BI__builtin_msa_srari_b: 3023 case Mips::BI__builtin_msa_srli_b: 3024 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3025 case Mips::BI__builtin_msa_binsli_b: 3026 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3027 // These intrinsics take an unsigned 4 bit immediate. 3028 case Mips::BI__builtin_msa_bclri_h: 3029 case Mips::BI__builtin_msa_bnegi_h: 3030 case Mips::BI__builtin_msa_bseti_h: 3031 case Mips::BI__builtin_msa_sat_s_h: 3032 case Mips::BI__builtin_msa_sat_u_h: 3033 case Mips::BI__builtin_msa_slli_h: 3034 case Mips::BI__builtin_msa_srai_h: 3035 case Mips::BI__builtin_msa_srari_h: 3036 case Mips::BI__builtin_msa_srli_h: 3037 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3038 case Mips::BI__builtin_msa_binsli_h: 3039 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3040 // These intrinsics take an unsigned 5 bit immediate. 3041 // The first block of intrinsics actually have an unsigned 5 bit field, 3042 // not a df/n field. 3043 case Mips::BI__builtin_msa_cfcmsa: 3044 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3045 case Mips::BI__builtin_msa_clei_u_b: 3046 case Mips::BI__builtin_msa_clei_u_h: 3047 case Mips::BI__builtin_msa_clei_u_w: 3048 case Mips::BI__builtin_msa_clei_u_d: 3049 case Mips::BI__builtin_msa_clti_u_b: 3050 case Mips::BI__builtin_msa_clti_u_h: 3051 case Mips::BI__builtin_msa_clti_u_w: 3052 case Mips::BI__builtin_msa_clti_u_d: 3053 case Mips::BI__builtin_msa_maxi_u_b: 3054 case Mips::BI__builtin_msa_maxi_u_h: 3055 case Mips::BI__builtin_msa_maxi_u_w: 3056 case Mips::BI__builtin_msa_maxi_u_d: 3057 case Mips::BI__builtin_msa_mini_u_b: 3058 case Mips::BI__builtin_msa_mini_u_h: 3059 case Mips::BI__builtin_msa_mini_u_w: 3060 case Mips::BI__builtin_msa_mini_u_d: 3061 case Mips::BI__builtin_msa_addvi_b: 3062 case Mips::BI__builtin_msa_addvi_h: 3063 case Mips::BI__builtin_msa_addvi_w: 3064 case Mips::BI__builtin_msa_addvi_d: 3065 case Mips::BI__builtin_msa_bclri_w: 3066 case Mips::BI__builtin_msa_bnegi_w: 3067 case Mips::BI__builtin_msa_bseti_w: 3068 case Mips::BI__builtin_msa_sat_s_w: 3069 case Mips::BI__builtin_msa_sat_u_w: 3070 case Mips::BI__builtin_msa_slli_w: 3071 case Mips::BI__builtin_msa_srai_w: 3072 case Mips::BI__builtin_msa_srari_w: 3073 case Mips::BI__builtin_msa_srli_w: 3074 case Mips::BI__builtin_msa_srlri_w: 3075 case Mips::BI__builtin_msa_subvi_b: 3076 case Mips::BI__builtin_msa_subvi_h: 3077 case Mips::BI__builtin_msa_subvi_w: 3078 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3079 case Mips::BI__builtin_msa_binsli_w: 3080 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3081 // These intrinsics take an unsigned 6 bit immediate. 3082 case Mips::BI__builtin_msa_bclri_d: 3083 case Mips::BI__builtin_msa_bnegi_d: 3084 case Mips::BI__builtin_msa_bseti_d: 3085 case Mips::BI__builtin_msa_sat_s_d: 3086 case Mips::BI__builtin_msa_sat_u_d: 3087 case Mips::BI__builtin_msa_slli_d: 3088 case Mips::BI__builtin_msa_srai_d: 3089 case Mips::BI__builtin_msa_srari_d: 3090 case Mips::BI__builtin_msa_srli_d: 3091 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3092 case Mips::BI__builtin_msa_binsli_d: 3093 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3094 // These intrinsics take a signed 5 bit immediate. 3095 case Mips::BI__builtin_msa_ceqi_b: 3096 case Mips::BI__builtin_msa_ceqi_h: 3097 case Mips::BI__builtin_msa_ceqi_w: 3098 case Mips::BI__builtin_msa_ceqi_d: 3099 case Mips::BI__builtin_msa_clti_s_b: 3100 case Mips::BI__builtin_msa_clti_s_h: 3101 case Mips::BI__builtin_msa_clti_s_w: 3102 case Mips::BI__builtin_msa_clti_s_d: 3103 case Mips::BI__builtin_msa_clei_s_b: 3104 case Mips::BI__builtin_msa_clei_s_h: 3105 case Mips::BI__builtin_msa_clei_s_w: 3106 case Mips::BI__builtin_msa_clei_s_d: 3107 case Mips::BI__builtin_msa_maxi_s_b: 3108 case Mips::BI__builtin_msa_maxi_s_h: 3109 case Mips::BI__builtin_msa_maxi_s_w: 3110 case Mips::BI__builtin_msa_maxi_s_d: 3111 case Mips::BI__builtin_msa_mini_s_b: 3112 case Mips::BI__builtin_msa_mini_s_h: 3113 case Mips::BI__builtin_msa_mini_s_w: 3114 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3115 // These intrinsics take an unsigned 8 bit immediate. 3116 case Mips::BI__builtin_msa_andi_b: 3117 case Mips::BI__builtin_msa_nori_b: 3118 case Mips::BI__builtin_msa_ori_b: 3119 case Mips::BI__builtin_msa_shf_b: 3120 case Mips::BI__builtin_msa_shf_h: 3121 case Mips::BI__builtin_msa_shf_w: 3122 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3123 case Mips::BI__builtin_msa_bseli_b: 3124 case Mips::BI__builtin_msa_bmnzi_b: 3125 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3126 // df/n format 3127 // These intrinsics take an unsigned 4 bit immediate. 3128 case Mips::BI__builtin_msa_copy_s_b: 3129 case Mips::BI__builtin_msa_copy_u_b: 3130 case Mips::BI__builtin_msa_insve_b: 3131 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3132 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3133 // These intrinsics take an unsigned 3 bit immediate. 3134 case Mips::BI__builtin_msa_copy_s_h: 3135 case Mips::BI__builtin_msa_copy_u_h: 3136 case Mips::BI__builtin_msa_insve_h: 3137 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3138 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3139 // These intrinsics take an unsigned 2 bit immediate. 3140 case Mips::BI__builtin_msa_copy_s_w: 3141 case Mips::BI__builtin_msa_copy_u_w: 3142 case Mips::BI__builtin_msa_insve_w: 3143 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3144 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3145 // These intrinsics take an unsigned 1 bit immediate. 3146 case Mips::BI__builtin_msa_copy_s_d: 3147 case Mips::BI__builtin_msa_copy_u_d: 3148 case Mips::BI__builtin_msa_insve_d: 3149 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3150 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3151 // Memory offsets and immediate loads. 3152 // These intrinsics take a signed 10 bit immediate. 3153 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3154 case Mips::BI__builtin_msa_ldi_h: 3155 case Mips::BI__builtin_msa_ldi_w: 3156 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3157 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3158 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3159 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3160 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3161 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3162 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3163 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3164 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3165 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3166 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3167 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3168 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3169 } 3170 3171 if (!m) 3172 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3173 3174 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3175 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3176 } 3177 3178 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3179 /// advancing the pointer over the consumed characters. The decoded type is 3180 /// returned. If the decoded type represents a constant integer with a 3181 /// constraint on its value then Mask is set to that value. The type descriptors 3182 /// used in Str are specific to PPC MMA builtins and are documented in the file 3183 /// defining the PPC builtins. 3184 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3185 unsigned &Mask) { 3186 bool RequireICE = false; 3187 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3188 switch (*Str++) { 3189 case 'V': 3190 return Context.getVectorType(Context.UnsignedCharTy, 16, 3191 VectorType::VectorKind::AltiVecVector); 3192 case 'i': { 3193 char *End; 3194 unsigned size = strtoul(Str, &End, 10); 3195 assert(End != Str && "Missing constant parameter constraint"); 3196 Str = End; 3197 Mask = size; 3198 return Context.IntTy; 3199 } 3200 case 'W': { 3201 char *End; 3202 unsigned size = strtoul(Str, &End, 10); 3203 assert(End != Str && "Missing PowerPC MMA type size"); 3204 Str = End; 3205 QualType Type; 3206 switch (size) { 3207 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3208 case size: Type = Context.Id##Ty; break; 3209 #include "clang/Basic/PPCTypes.def" 3210 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3211 } 3212 bool CheckVectorArgs = false; 3213 while (!CheckVectorArgs) { 3214 switch (*Str++) { 3215 case '*': 3216 Type = Context.getPointerType(Type); 3217 break; 3218 case 'C': 3219 Type = Type.withConst(); 3220 break; 3221 default: 3222 CheckVectorArgs = true; 3223 --Str; 3224 break; 3225 } 3226 } 3227 return Type; 3228 } 3229 default: 3230 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3231 } 3232 } 3233 3234 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3235 CallExpr *TheCall) { 3236 unsigned i = 0, l = 0, u = 0; 3237 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3238 BuiltinID == PPC::BI__builtin_divdeu || 3239 BuiltinID == PPC::BI__builtin_bpermd; 3240 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3241 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3242 BuiltinID == PPC::BI__builtin_divweu || 3243 BuiltinID == PPC::BI__builtin_divde || 3244 BuiltinID == PPC::BI__builtin_divdeu; 3245 3246 if (Is64BitBltin && !IsTarget64Bit) 3247 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3248 << TheCall->getSourceRange(); 3249 3250 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3251 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3252 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3253 << TheCall->getSourceRange(); 3254 3255 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3256 if (!TI.hasFeature("vsx")) 3257 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3258 << TheCall->getSourceRange(); 3259 return false; 3260 }; 3261 3262 switch (BuiltinID) { 3263 default: return false; 3264 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3265 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3266 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3267 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3268 case PPC::BI__builtin_altivec_dss: 3269 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3270 case PPC::BI__builtin_tbegin: 3271 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3272 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3273 case PPC::BI__builtin_tabortwc: 3274 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3275 case PPC::BI__builtin_tabortwci: 3276 case PPC::BI__builtin_tabortdci: 3277 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3278 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3279 case PPC::BI__builtin_altivec_dst: 3280 case PPC::BI__builtin_altivec_dstt: 3281 case PPC::BI__builtin_altivec_dstst: 3282 case PPC::BI__builtin_altivec_dststt: 3283 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3284 case PPC::BI__builtin_vsx_xxpermdi: 3285 case PPC::BI__builtin_vsx_xxsldwi: 3286 return SemaBuiltinVSX(TheCall); 3287 case PPC::BI__builtin_unpack_vector_int128: 3288 return SemaVSXCheck(TheCall) || 3289 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3290 case PPC::BI__builtin_pack_vector_int128: 3291 return SemaVSXCheck(TheCall); 3292 case PPC::BI__builtin_altivec_vgnb: 3293 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3294 case PPC::BI__builtin_altivec_vec_replace_elt: 3295 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3296 QualType VecTy = TheCall->getArg(0)->getType(); 3297 QualType EltTy = TheCall->getArg(1)->getType(); 3298 unsigned Width = Context.getIntWidth(EltTy); 3299 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3300 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3301 } 3302 case PPC::BI__builtin_vsx_xxeval: 3303 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3304 case PPC::BI__builtin_altivec_vsldbi: 3305 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3306 case PPC::BI__builtin_altivec_vsrdbi: 3307 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3308 case PPC::BI__builtin_vsx_xxpermx: 3309 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3310 #define CUSTOM_BUILTIN(Name, Types, Acc) \ 3311 case PPC::BI__builtin_##Name: \ 3312 return SemaBuiltinPPCMMACall(TheCall, Types); 3313 #include "clang/Basic/BuiltinsPPC.def" 3314 } 3315 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3316 } 3317 3318 // Check if the given type is a non-pointer PPC MMA type. This function is used 3319 // in Sema to prevent invalid uses of restricted PPC MMA types. 3320 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3321 if (Type->isPointerType() || Type->isArrayType()) 3322 return false; 3323 3324 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3325 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3326 if (false 3327 #include "clang/Basic/PPCTypes.def" 3328 ) { 3329 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3330 return true; 3331 } 3332 return false; 3333 } 3334 3335 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3336 CallExpr *TheCall) { 3337 // position of memory order and scope arguments in the builtin 3338 unsigned OrderIndex, ScopeIndex; 3339 switch (BuiltinID) { 3340 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3341 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3342 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3343 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3344 OrderIndex = 2; 3345 ScopeIndex = 3; 3346 break; 3347 case AMDGPU::BI__builtin_amdgcn_fence: 3348 OrderIndex = 0; 3349 ScopeIndex = 1; 3350 break; 3351 default: 3352 return false; 3353 } 3354 3355 ExprResult Arg = TheCall->getArg(OrderIndex); 3356 auto ArgExpr = Arg.get(); 3357 Expr::EvalResult ArgResult; 3358 3359 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3360 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3361 << ArgExpr->getType(); 3362 int ord = ArgResult.Val.getInt().getZExtValue(); 3363 3364 // Check valididty of memory ordering as per C11 / C++11's memody model. 3365 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { 3366 case llvm::AtomicOrderingCABI::acquire: 3367 case llvm::AtomicOrderingCABI::release: 3368 case llvm::AtomicOrderingCABI::acq_rel: 3369 case llvm::AtomicOrderingCABI::seq_cst: 3370 break; 3371 default: { 3372 return Diag(ArgExpr->getBeginLoc(), 3373 diag::warn_atomic_op_has_invalid_memory_order) 3374 << ArgExpr->getSourceRange(); 3375 } 3376 } 3377 3378 Arg = TheCall->getArg(ScopeIndex); 3379 ArgExpr = Arg.get(); 3380 Expr::EvalResult ArgResult1; 3381 // Check that sync scope is a constant literal 3382 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3383 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3384 << ArgExpr->getType(); 3385 3386 return false; 3387 } 3388 3389 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3390 CallExpr *TheCall) { 3391 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3392 Expr *Arg = TheCall->getArg(0); 3393 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3394 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3395 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3396 << Arg->getSourceRange(); 3397 } 3398 3399 // For intrinsics which take an immediate value as part of the instruction, 3400 // range check them here. 3401 unsigned i = 0, l = 0, u = 0; 3402 switch (BuiltinID) { 3403 default: return false; 3404 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3405 case SystemZ::BI__builtin_s390_verimb: 3406 case SystemZ::BI__builtin_s390_verimh: 3407 case SystemZ::BI__builtin_s390_verimf: 3408 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3409 case SystemZ::BI__builtin_s390_vfaeb: 3410 case SystemZ::BI__builtin_s390_vfaeh: 3411 case SystemZ::BI__builtin_s390_vfaef: 3412 case SystemZ::BI__builtin_s390_vfaebs: 3413 case SystemZ::BI__builtin_s390_vfaehs: 3414 case SystemZ::BI__builtin_s390_vfaefs: 3415 case SystemZ::BI__builtin_s390_vfaezb: 3416 case SystemZ::BI__builtin_s390_vfaezh: 3417 case SystemZ::BI__builtin_s390_vfaezf: 3418 case SystemZ::BI__builtin_s390_vfaezbs: 3419 case SystemZ::BI__builtin_s390_vfaezhs: 3420 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3421 case SystemZ::BI__builtin_s390_vfisb: 3422 case SystemZ::BI__builtin_s390_vfidb: 3423 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3424 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3425 case SystemZ::BI__builtin_s390_vftcisb: 3426 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3427 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3428 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3429 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3430 case SystemZ::BI__builtin_s390_vstrcb: 3431 case SystemZ::BI__builtin_s390_vstrch: 3432 case SystemZ::BI__builtin_s390_vstrcf: 3433 case SystemZ::BI__builtin_s390_vstrczb: 3434 case SystemZ::BI__builtin_s390_vstrczh: 3435 case SystemZ::BI__builtin_s390_vstrczf: 3436 case SystemZ::BI__builtin_s390_vstrcbs: 3437 case SystemZ::BI__builtin_s390_vstrchs: 3438 case SystemZ::BI__builtin_s390_vstrcfs: 3439 case SystemZ::BI__builtin_s390_vstrczbs: 3440 case SystemZ::BI__builtin_s390_vstrczhs: 3441 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3442 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3443 case SystemZ::BI__builtin_s390_vfminsb: 3444 case SystemZ::BI__builtin_s390_vfmaxsb: 3445 case SystemZ::BI__builtin_s390_vfmindb: 3446 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3447 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3448 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3449 } 3450 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3451 } 3452 3453 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3454 /// This checks that the target supports __builtin_cpu_supports and 3455 /// that the string argument is constant and valid. 3456 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3457 CallExpr *TheCall) { 3458 Expr *Arg = TheCall->getArg(0); 3459 3460 // Check if the argument is a string literal. 3461 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3462 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3463 << Arg->getSourceRange(); 3464 3465 // Check the contents of the string. 3466 StringRef Feature = 3467 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3468 if (!TI.validateCpuSupports(Feature)) 3469 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3470 << Arg->getSourceRange(); 3471 return false; 3472 } 3473 3474 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3475 /// This checks that the target supports __builtin_cpu_is and 3476 /// that the string argument is constant and valid. 3477 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3478 Expr *Arg = TheCall->getArg(0); 3479 3480 // Check if the argument is a string literal. 3481 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3482 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3483 << Arg->getSourceRange(); 3484 3485 // Check the contents of the string. 3486 StringRef Feature = 3487 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3488 if (!TI.validateCpuIs(Feature)) 3489 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3490 << Arg->getSourceRange(); 3491 return false; 3492 } 3493 3494 // Check if the rounding mode is legal. 3495 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3496 // Indicates if this instruction has rounding control or just SAE. 3497 bool HasRC = false; 3498 3499 unsigned ArgNum = 0; 3500 switch (BuiltinID) { 3501 default: 3502 return false; 3503 case X86::BI__builtin_ia32_vcvttsd2si32: 3504 case X86::BI__builtin_ia32_vcvttsd2si64: 3505 case X86::BI__builtin_ia32_vcvttsd2usi32: 3506 case X86::BI__builtin_ia32_vcvttsd2usi64: 3507 case X86::BI__builtin_ia32_vcvttss2si32: 3508 case X86::BI__builtin_ia32_vcvttss2si64: 3509 case X86::BI__builtin_ia32_vcvttss2usi32: 3510 case X86::BI__builtin_ia32_vcvttss2usi64: 3511 ArgNum = 1; 3512 break; 3513 case X86::BI__builtin_ia32_maxpd512: 3514 case X86::BI__builtin_ia32_maxps512: 3515 case X86::BI__builtin_ia32_minpd512: 3516 case X86::BI__builtin_ia32_minps512: 3517 ArgNum = 2; 3518 break; 3519 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3520 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3521 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3522 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3523 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3524 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3525 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3526 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3527 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3528 case X86::BI__builtin_ia32_exp2pd_mask: 3529 case X86::BI__builtin_ia32_exp2ps_mask: 3530 case X86::BI__builtin_ia32_getexppd512_mask: 3531 case X86::BI__builtin_ia32_getexpps512_mask: 3532 case X86::BI__builtin_ia32_rcp28pd_mask: 3533 case X86::BI__builtin_ia32_rcp28ps_mask: 3534 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3535 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3536 case X86::BI__builtin_ia32_vcomisd: 3537 case X86::BI__builtin_ia32_vcomiss: 3538 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3539 ArgNum = 3; 3540 break; 3541 case X86::BI__builtin_ia32_cmppd512_mask: 3542 case X86::BI__builtin_ia32_cmpps512_mask: 3543 case X86::BI__builtin_ia32_cmpsd_mask: 3544 case X86::BI__builtin_ia32_cmpss_mask: 3545 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3546 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3547 case X86::BI__builtin_ia32_getexpss128_round_mask: 3548 case X86::BI__builtin_ia32_getmantpd512_mask: 3549 case X86::BI__builtin_ia32_getmantps512_mask: 3550 case X86::BI__builtin_ia32_maxsd_round_mask: 3551 case X86::BI__builtin_ia32_maxss_round_mask: 3552 case X86::BI__builtin_ia32_minsd_round_mask: 3553 case X86::BI__builtin_ia32_minss_round_mask: 3554 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3555 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3556 case X86::BI__builtin_ia32_reducepd512_mask: 3557 case X86::BI__builtin_ia32_reduceps512_mask: 3558 case X86::BI__builtin_ia32_rndscalepd_mask: 3559 case X86::BI__builtin_ia32_rndscaleps_mask: 3560 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3561 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3562 ArgNum = 4; 3563 break; 3564 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3565 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3566 case X86::BI__builtin_ia32_fixupimmps512_mask: 3567 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3568 case X86::BI__builtin_ia32_fixupimmsd_mask: 3569 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3570 case X86::BI__builtin_ia32_fixupimmss_mask: 3571 case X86::BI__builtin_ia32_fixupimmss_maskz: 3572 case X86::BI__builtin_ia32_getmantsd_round_mask: 3573 case X86::BI__builtin_ia32_getmantss_round_mask: 3574 case X86::BI__builtin_ia32_rangepd512_mask: 3575 case X86::BI__builtin_ia32_rangeps512_mask: 3576 case X86::BI__builtin_ia32_rangesd128_round_mask: 3577 case X86::BI__builtin_ia32_rangess128_round_mask: 3578 case X86::BI__builtin_ia32_reducesd_mask: 3579 case X86::BI__builtin_ia32_reducess_mask: 3580 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3581 case X86::BI__builtin_ia32_rndscaless_round_mask: 3582 ArgNum = 5; 3583 break; 3584 case X86::BI__builtin_ia32_vcvtsd2si64: 3585 case X86::BI__builtin_ia32_vcvtsd2si32: 3586 case X86::BI__builtin_ia32_vcvtsd2usi32: 3587 case X86::BI__builtin_ia32_vcvtsd2usi64: 3588 case X86::BI__builtin_ia32_vcvtss2si32: 3589 case X86::BI__builtin_ia32_vcvtss2si64: 3590 case X86::BI__builtin_ia32_vcvtss2usi32: 3591 case X86::BI__builtin_ia32_vcvtss2usi64: 3592 case X86::BI__builtin_ia32_sqrtpd512: 3593 case X86::BI__builtin_ia32_sqrtps512: 3594 ArgNum = 1; 3595 HasRC = true; 3596 break; 3597 case X86::BI__builtin_ia32_addpd512: 3598 case X86::BI__builtin_ia32_addps512: 3599 case X86::BI__builtin_ia32_divpd512: 3600 case X86::BI__builtin_ia32_divps512: 3601 case X86::BI__builtin_ia32_mulpd512: 3602 case X86::BI__builtin_ia32_mulps512: 3603 case X86::BI__builtin_ia32_subpd512: 3604 case X86::BI__builtin_ia32_subps512: 3605 case X86::BI__builtin_ia32_cvtsi2sd64: 3606 case X86::BI__builtin_ia32_cvtsi2ss32: 3607 case X86::BI__builtin_ia32_cvtsi2ss64: 3608 case X86::BI__builtin_ia32_cvtusi2sd64: 3609 case X86::BI__builtin_ia32_cvtusi2ss32: 3610 case X86::BI__builtin_ia32_cvtusi2ss64: 3611 ArgNum = 2; 3612 HasRC = true; 3613 break; 3614 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3615 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3616 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3617 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3618 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3619 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3620 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3621 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3622 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3623 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3624 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3625 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3626 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3627 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3628 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3629 ArgNum = 3; 3630 HasRC = true; 3631 break; 3632 case X86::BI__builtin_ia32_addss_round_mask: 3633 case X86::BI__builtin_ia32_addsd_round_mask: 3634 case X86::BI__builtin_ia32_divss_round_mask: 3635 case X86::BI__builtin_ia32_divsd_round_mask: 3636 case X86::BI__builtin_ia32_mulss_round_mask: 3637 case X86::BI__builtin_ia32_mulsd_round_mask: 3638 case X86::BI__builtin_ia32_subss_round_mask: 3639 case X86::BI__builtin_ia32_subsd_round_mask: 3640 case X86::BI__builtin_ia32_scalefpd512_mask: 3641 case X86::BI__builtin_ia32_scalefps512_mask: 3642 case X86::BI__builtin_ia32_scalefsd_round_mask: 3643 case X86::BI__builtin_ia32_scalefss_round_mask: 3644 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3645 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3646 case X86::BI__builtin_ia32_sqrtss_round_mask: 3647 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3648 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3649 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3650 case X86::BI__builtin_ia32_vfmaddss3_mask: 3651 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3652 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3653 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3654 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3655 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3656 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3657 case X86::BI__builtin_ia32_vfmaddps512_mask: 3658 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3659 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3660 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3661 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3662 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3663 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3664 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3665 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3666 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3667 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3668 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3669 ArgNum = 4; 3670 HasRC = true; 3671 break; 3672 } 3673 3674 llvm::APSInt Result; 3675 3676 // We can't check the value of a dependent argument. 3677 Expr *Arg = TheCall->getArg(ArgNum); 3678 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3679 return false; 3680 3681 // Check constant-ness first. 3682 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3683 return true; 3684 3685 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3686 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3687 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3688 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3689 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3690 Result == 8/*ROUND_NO_EXC*/ || 3691 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3692 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3693 return false; 3694 3695 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3696 << Arg->getSourceRange(); 3697 } 3698 3699 // Check if the gather/scatter scale is legal. 3700 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3701 CallExpr *TheCall) { 3702 unsigned ArgNum = 0; 3703 switch (BuiltinID) { 3704 default: 3705 return false; 3706 case X86::BI__builtin_ia32_gatherpfdpd: 3707 case X86::BI__builtin_ia32_gatherpfdps: 3708 case X86::BI__builtin_ia32_gatherpfqpd: 3709 case X86::BI__builtin_ia32_gatherpfqps: 3710 case X86::BI__builtin_ia32_scatterpfdpd: 3711 case X86::BI__builtin_ia32_scatterpfdps: 3712 case X86::BI__builtin_ia32_scatterpfqpd: 3713 case X86::BI__builtin_ia32_scatterpfqps: 3714 ArgNum = 3; 3715 break; 3716 case X86::BI__builtin_ia32_gatherd_pd: 3717 case X86::BI__builtin_ia32_gatherd_pd256: 3718 case X86::BI__builtin_ia32_gatherq_pd: 3719 case X86::BI__builtin_ia32_gatherq_pd256: 3720 case X86::BI__builtin_ia32_gatherd_ps: 3721 case X86::BI__builtin_ia32_gatherd_ps256: 3722 case X86::BI__builtin_ia32_gatherq_ps: 3723 case X86::BI__builtin_ia32_gatherq_ps256: 3724 case X86::BI__builtin_ia32_gatherd_q: 3725 case X86::BI__builtin_ia32_gatherd_q256: 3726 case X86::BI__builtin_ia32_gatherq_q: 3727 case X86::BI__builtin_ia32_gatherq_q256: 3728 case X86::BI__builtin_ia32_gatherd_d: 3729 case X86::BI__builtin_ia32_gatherd_d256: 3730 case X86::BI__builtin_ia32_gatherq_d: 3731 case X86::BI__builtin_ia32_gatherq_d256: 3732 case X86::BI__builtin_ia32_gather3div2df: 3733 case X86::BI__builtin_ia32_gather3div2di: 3734 case X86::BI__builtin_ia32_gather3div4df: 3735 case X86::BI__builtin_ia32_gather3div4di: 3736 case X86::BI__builtin_ia32_gather3div4sf: 3737 case X86::BI__builtin_ia32_gather3div4si: 3738 case X86::BI__builtin_ia32_gather3div8sf: 3739 case X86::BI__builtin_ia32_gather3div8si: 3740 case X86::BI__builtin_ia32_gather3siv2df: 3741 case X86::BI__builtin_ia32_gather3siv2di: 3742 case X86::BI__builtin_ia32_gather3siv4df: 3743 case X86::BI__builtin_ia32_gather3siv4di: 3744 case X86::BI__builtin_ia32_gather3siv4sf: 3745 case X86::BI__builtin_ia32_gather3siv4si: 3746 case X86::BI__builtin_ia32_gather3siv8sf: 3747 case X86::BI__builtin_ia32_gather3siv8si: 3748 case X86::BI__builtin_ia32_gathersiv8df: 3749 case X86::BI__builtin_ia32_gathersiv16sf: 3750 case X86::BI__builtin_ia32_gatherdiv8df: 3751 case X86::BI__builtin_ia32_gatherdiv16sf: 3752 case X86::BI__builtin_ia32_gathersiv8di: 3753 case X86::BI__builtin_ia32_gathersiv16si: 3754 case X86::BI__builtin_ia32_gatherdiv8di: 3755 case X86::BI__builtin_ia32_gatherdiv16si: 3756 case X86::BI__builtin_ia32_scatterdiv2df: 3757 case X86::BI__builtin_ia32_scatterdiv2di: 3758 case X86::BI__builtin_ia32_scatterdiv4df: 3759 case X86::BI__builtin_ia32_scatterdiv4di: 3760 case X86::BI__builtin_ia32_scatterdiv4sf: 3761 case X86::BI__builtin_ia32_scatterdiv4si: 3762 case X86::BI__builtin_ia32_scatterdiv8sf: 3763 case X86::BI__builtin_ia32_scatterdiv8si: 3764 case X86::BI__builtin_ia32_scattersiv2df: 3765 case X86::BI__builtin_ia32_scattersiv2di: 3766 case X86::BI__builtin_ia32_scattersiv4df: 3767 case X86::BI__builtin_ia32_scattersiv4di: 3768 case X86::BI__builtin_ia32_scattersiv4sf: 3769 case X86::BI__builtin_ia32_scattersiv4si: 3770 case X86::BI__builtin_ia32_scattersiv8sf: 3771 case X86::BI__builtin_ia32_scattersiv8si: 3772 case X86::BI__builtin_ia32_scattersiv8df: 3773 case X86::BI__builtin_ia32_scattersiv16sf: 3774 case X86::BI__builtin_ia32_scatterdiv8df: 3775 case X86::BI__builtin_ia32_scatterdiv16sf: 3776 case X86::BI__builtin_ia32_scattersiv8di: 3777 case X86::BI__builtin_ia32_scattersiv16si: 3778 case X86::BI__builtin_ia32_scatterdiv8di: 3779 case X86::BI__builtin_ia32_scatterdiv16si: 3780 ArgNum = 4; 3781 break; 3782 } 3783 3784 llvm::APSInt Result; 3785 3786 // We can't check the value of a dependent argument. 3787 Expr *Arg = TheCall->getArg(ArgNum); 3788 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3789 return false; 3790 3791 // Check constant-ness first. 3792 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3793 return true; 3794 3795 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3796 return false; 3797 3798 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3799 << Arg->getSourceRange(); 3800 } 3801 3802 enum { TileRegLow = 0, TileRegHigh = 7 }; 3803 3804 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 3805 ArrayRef<int> ArgNums) { 3806 for (int ArgNum : ArgNums) { 3807 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 3808 return true; 3809 } 3810 return false; 3811 } 3812 3813 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 3814 ArrayRef<int> ArgNums) { 3815 // Because the max number of tile register is TileRegHigh + 1, so here we use 3816 // each bit to represent the usage of them in bitset. 3817 std::bitset<TileRegHigh + 1> ArgValues; 3818 for (int ArgNum : ArgNums) { 3819 Expr *Arg = TheCall->getArg(ArgNum); 3820 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3821 continue; 3822 3823 llvm::APSInt Result; 3824 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3825 return true; 3826 int ArgExtValue = Result.getExtValue(); 3827 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 3828 "Incorrect tile register num."); 3829 if (ArgValues.test(ArgExtValue)) 3830 return Diag(TheCall->getBeginLoc(), 3831 diag::err_x86_builtin_tile_arg_duplicate) 3832 << TheCall->getArg(ArgNum)->getSourceRange(); 3833 ArgValues.set(ArgExtValue); 3834 } 3835 return false; 3836 } 3837 3838 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 3839 ArrayRef<int> ArgNums) { 3840 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 3841 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 3842 } 3843 3844 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 3845 switch (BuiltinID) { 3846 default: 3847 return false; 3848 case X86::BI__builtin_ia32_tileloadd64: 3849 case X86::BI__builtin_ia32_tileloaddt164: 3850 case X86::BI__builtin_ia32_tilestored64: 3851 case X86::BI__builtin_ia32_tilezero: 3852 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 3853 case X86::BI__builtin_ia32_tdpbssd: 3854 case X86::BI__builtin_ia32_tdpbsud: 3855 case X86::BI__builtin_ia32_tdpbusd: 3856 case X86::BI__builtin_ia32_tdpbuud: 3857 case X86::BI__builtin_ia32_tdpbf16ps: 3858 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 3859 } 3860 } 3861 static bool isX86_32Builtin(unsigned BuiltinID) { 3862 // These builtins only work on x86-32 targets. 3863 switch (BuiltinID) { 3864 case X86::BI__builtin_ia32_readeflags_u32: 3865 case X86::BI__builtin_ia32_writeeflags_u32: 3866 return true; 3867 } 3868 3869 return false; 3870 } 3871 3872 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3873 CallExpr *TheCall) { 3874 if (BuiltinID == X86::BI__builtin_cpu_supports) 3875 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3876 3877 if (BuiltinID == X86::BI__builtin_cpu_is) 3878 return SemaBuiltinCpuIs(*this, TI, TheCall); 3879 3880 // Check for 32-bit only builtins on a 64-bit target. 3881 const llvm::Triple &TT = TI.getTriple(); 3882 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3883 return Diag(TheCall->getCallee()->getBeginLoc(), 3884 diag::err_32_bit_builtin_64_bit_tgt); 3885 3886 // If the intrinsic has rounding or SAE make sure its valid. 3887 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3888 return true; 3889 3890 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3891 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3892 return true; 3893 3894 // If the intrinsic has a tile arguments, make sure they are valid. 3895 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 3896 return true; 3897 3898 // For intrinsics which take an immediate value as part of the instruction, 3899 // range check them here. 3900 int i = 0, l = 0, u = 0; 3901 switch (BuiltinID) { 3902 default: 3903 return false; 3904 case X86::BI__builtin_ia32_vec_ext_v2si: 3905 case X86::BI__builtin_ia32_vec_ext_v2di: 3906 case X86::BI__builtin_ia32_vextractf128_pd256: 3907 case X86::BI__builtin_ia32_vextractf128_ps256: 3908 case X86::BI__builtin_ia32_vextractf128_si256: 3909 case X86::BI__builtin_ia32_extract128i256: 3910 case X86::BI__builtin_ia32_extractf64x4_mask: 3911 case X86::BI__builtin_ia32_extracti64x4_mask: 3912 case X86::BI__builtin_ia32_extractf32x8_mask: 3913 case X86::BI__builtin_ia32_extracti32x8_mask: 3914 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3915 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3916 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3917 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3918 i = 1; l = 0; u = 1; 3919 break; 3920 case X86::BI__builtin_ia32_vec_set_v2di: 3921 case X86::BI__builtin_ia32_vinsertf128_pd256: 3922 case X86::BI__builtin_ia32_vinsertf128_ps256: 3923 case X86::BI__builtin_ia32_vinsertf128_si256: 3924 case X86::BI__builtin_ia32_insert128i256: 3925 case X86::BI__builtin_ia32_insertf32x8: 3926 case X86::BI__builtin_ia32_inserti32x8: 3927 case X86::BI__builtin_ia32_insertf64x4: 3928 case X86::BI__builtin_ia32_inserti64x4: 3929 case X86::BI__builtin_ia32_insertf64x2_256: 3930 case X86::BI__builtin_ia32_inserti64x2_256: 3931 case X86::BI__builtin_ia32_insertf32x4_256: 3932 case X86::BI__builtin_ia32_inserti32x4_256: 3933 i = 2; l = 0; u = 1; 3934 break; 3935 case X86::BI__builtin_ia32_vpermilpd: 3936 case X86::BI__builtin_ia32_vec_ext_v4hi: 3937 case X86::BI__builtin_ia32_vec_ext_v4si: 3938 case X86::BI__builtin_ia32_vec_ext_v4sf: 3939 case X86::BI__builtin_ia32_vec_ext_v4di: 3940 case X86::BI__builtin_ia32_extractf32x4_mask: 3941 case X86::BI__builtin_ia32_extracti32x4_mask: 3942 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3943 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3944 i = 1; l = 0; u = 3; 3945 break; 3946 case X86::BI_mm_prefetch: 3947 case X86::BI__builtin_ia32_vec_ext_v8hi: 3948 case X86::BI__builtin_ia32_vec_ext_v8si: 3949 i = 1; l = 0; u = 7; 3950 break; 3951 case X86::BI__builtin_ia32_sha1rnds4: 3952 case X86::BI__builtin_ia32_blendpd: 3953 case X86::BI__builtin_ia32_shufpd: 3954 case X86::BI__builtin_ia32_vec_set_v4hi: 3955 case X86::BI__builtin_ia32_vec_set_v4si: 3956 case X86::BI__builtin_ia32_vec_set_v4di: 3957 case X86::BI__builtin_ia32_shuf_f32x4_256: 3958 case X86::BI__builtin_ia32_shuf_f64x2_256: 3959 case X86::BI__builtin_ia32_shuf_i32x4_256: 3960 case X86::BI__builtin_ia32_shuf_i64x2_256: 3961 case X86::BI__builtin_ia32_insertf64x2_512: 3962 case X86::BI__builtin_ia32_inserti64x2_512: 3963 case X86::BI__builtin_ia32_insertf32x4: 3964 case X86::BI__builtin_ia32_inserti32x4: 3965 i = 2; l = 0; u = 3; 3966 break; 3967 case X86::BI__builtin_ia32_vpermil2pd: 3968 case X86::BI__builtin_ia32_vpermil2pd256: 3969 case X86::BI__builtin_ia32_vpermil2ps: 3970 case X86::BI__builtin_ia32_vpermil2ps256: 3971 i = 3; l = 0; u = 3; 3972 break; 3973 case X86::BI__builtin_ia32_cmpb128_mask: 3974 case X86::BI__builtin_ia32_cmpw128_mask: 3975 case X86::BI__builtin_ia32_cmpd128_mask: 3976 case X86::BI__builtin_ia32_cmpq128_mask: 3977 case X86::BI__builtin_ia32_cmpb256_mask: 3978 case X86::BI__builtin_ia32_cmpw256_mask: 3979 case X86::BI__builtin_ia32_cmpd256_mask: 3980 case X86::BI__builtin_ia32_cmpq256_mask: 3981 case X86::BI__builtin_ia32_cmpb512_mask: 3982 case X86::BI__builtin_ia32_cmpw512_mask: 3983 case X86::BI__builtin_ia32_cmpd512_mask: 3984 case X86::BI__builtin_ia32_cmpq512_mask: 3985 case X86::BI__builtin_ia32_ucmpb128_mask: 3986 case X86::BI__builtin_ia32_ucmpw128_mask: 3987 case X86::BI__builtin_ia32_ucmpd128_mask: 3988 case X86::BI__builtin_ia32_ucmpq128_mask: 3989 case X86::BI__builtin_ia32_ucmpb256_mask: 3990 case X86::BI__builtin_ia32_ucmpw256_mask: 3991 case X86::BI__builtin_ia32_ucmpd256_mask: 3992 case X86::BI__builtin_ia32_ucmpq256_mask: 3993 case X86::BI__builtin_ia32_ucmpb512_mask: 3994 case X86::BI__builtin_ia32_ucmpw512_mask: 3995 case X86::BI__builtin_ia32_ucmpd512_mask: 3996 case X86::BI__builtin_ia32_ucmpq512_mask: 3997 case X86::BI__builtin_ia32_vpcomub: 3998 case X86::BI__builtin_ia32_vpcomuw: 3999 case X86::BI__builtin_ia32_vpcomud: 4000 case X86::BI__builtin_ia32_vpcomuq: 4001 case X86::BI__builtin_ia32_vpcomb: 4002 case X86::BI__builtin_ia32_vpcomw: 4003 case X86::BI__builtin_ia32_vpcomd: 4004 case X86::BI__builtin_ia32_vpcomq: 4005 case X86::BI__builtin_ia32_vec_set_v8hi: 4006 case X86::BI__builtin_ia32_vec_set_v8si: 4007 i = 2; l = 0; u = 7; 4008 break; 4009 case X86::BI__builtin_ia32_vpermilpd256: 4010 case X86::BI__builtin_ia32_roundps: 4011 case X86::BI__builtin_ia32_roundpd: 4012 case X86::BI__builtin_ia32_roundps256: 4013 case X86::BI__builtin_ia32_roundpd256: 4014 case X86::BI__builtin_ia32_getmantpd128_mask: 4015 case X86::BI__builtin_ia32_getmantpd256_mask: 4016 case X86::BI__builtin_ia32_getmantps128_mask: 4017 case X86::BI__builtin_ia32_getmantps256_mask: 4018 case X86::BI__builtin_ia32_getmantpd512_mask: 4019 case X86::BI__builtin_ia32_getmantps512_mask: 4020 case X86::BI__builtin_ia32_vec_ext_v16qi: 4021 case X86::BI__builtin_ia32_vec_ext_v16hi: 4022 i = 1; l = 0; u = 15; 4023 break; 4024 case X86::BI__builtin_ia32_pblendd128: 4025 case X86::BI__builtin_ia32_blendps: 4026 case X86::BI__builtin_ia32_blendpd256: 4027 case X86::BI__builtin_ia32_shufpd256: 4028 case X86::BI__builtin_ia32_roundss: 4029 case X86::BI__builtin_ia32_roundsd: 4030 case X86::BI__builtin_ia32_rangepd128_mask: 4031 case X86::BI__builtin_ia32_rangepd256_mask: 4032 case X86::BI__builtin_ia32_rangepd512_mask: 4033 case X86::BI__builtin_ia32_rangeps128_mask: 4034 case X86::BI__builtin_ia32_rangeps256_mask: 4035 case X86::BI__builtin_ia32_rangeps512_mask: 4036 case X86::BI__builtin_ia32_getmantsd_round_mask: 4037 case X86::BI__builtin_ia32_getmantss_round_mask: 4038 case X86::BI__builtin_ia32_vec_set_v16qi: 4039 case X86::BI__builtin_ia32_vec_set_v16hi: 4040 i = 2; l = 0; u = 15; 4041 break; 4042 case X86::BI__builtin_ia32_vec_ext_v32qi: 4043 i = 1; l = 0; u = 31; 4044 break; 4045 case X86::BI__builtin_ia32_cmpps: 4046 case X86::BI__builtin_ia32_cmpss: 4047 case X86::BI__builtin_ia32_cmppd: 4048 case X86::BI__builtin_ia32_cmpsd: 4049 case X86::BI__builtin_ia32_cmpps256: 4050 case X86::BI__builtin_ia32_cmppd256: 4051 case X86::BI__builtin_ia32_cmpps128_mask: 4052 case X86::BI__builtin_ia32_cmppd128_mask: 4053 case X86::BI__builtin_ia32_cmpps256_mask: 4054 case X86::BI__builtin_ia32_cmppd256_mask: 4055 case X86::BI__builtin_ia32_cmpps512_mask: 4056 case X86::BI__builtin_ia32_cmppd512_mask: 4057 case X86::BI__builtin_ia32_cmpsd_mask: 4058 case X86::BI__builtin_ia32_cmpss_mask: 4059 case X86::BI__builtin_ia32_vec_set_v32qi: 4060 i = 2; l = 0; u = 31; 4061 break; 4062 case X86::BI__builtin_ia32_permdf256: 4063 case X86::BI__builtin_ia32_permdi256: 4064 case X86::BI__builtin_ia32_permdf512: 4065 case X86::BI__builtin_ia32_permdi512: 4066 case X86::BI__builtin_ia32_vpermilps: 4067 case X86::BI__builtin_ia32_vpermilps256: 4068 case X86::BI__builtin_ia32_vpermilpd512: 4069 case X86::BI__builtin_ia32_vpermilps512: 4070 case X86::BI__builtin_ia32_pshufd: 4071 case X86::BI__builtin_ia32_pshufd256: 4072 case X86::BI__builtin_ia32_pshufd512: 4073 case X86::BI__builtin_ia32_pshufhw: 4074 case X86::BI__builtin_ia32_pshufhw256: 4075 case X86::BI__builtin_ia32_pshufhw512: 4076 case X86::BI__builtin_ia32_pshuflw: 4077 case X86::BI__builtin_ia32_pshuflw256: 4078 case X86::BI__builtin_ia32_pshuflw512: 4079 case X86::BI__builtin_ia32_vcvtps2ph: 4080 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4081 case X86::BI__builtin_ia32_vcvtps2ph256: 4082 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4083 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4084 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4085 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4086 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4087 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4088 case X86::BI__builtin_ia32_rndscaleps_mask: 4089 case X86::BI__builtin_ia32_rndscalepd_mask: 4090 case X86::BI__builtin_ia32_reducepd128_mask: 4091 case X86::BI__builtin_ia32_reducepd256_mask: 4092 case X86::BI__builtin_ia32_reducepd512_mask: 4093 case X86::BI__builtin_ia32_reduceps128_mask: 4094 case X86::BI__builtin_ia32_reduceps256_mask: 4095 case X86::BI__builtin_ia32_reduceps512_mask: 4096 case X86::BI__builtin_ia32_prold512: 4097 case X86::BI__builtin_ia32_prolq512: 4098 case X86::BI__builtin_ia32_prold128: 4099 case X86::BI__builtin_ia32_prold256: 4100 case X86::BI__builtin_ia32_prolq128: 4101 case X86::BI__builtin_ia32_prolq256: 4102 case X86::BI__builtin_ia32_prord512: 4103 case X86::BI__builtin_ia32_prorq512: 4104 case X86::BI__builtin_ia32_prord128: 4105 case X86::BI__builtin_ia32_prord256: 4106 case X86::BI__builtin_ia32_prorq128: 4107 case X86::BI__builtin_ia32_prorq256: 4108 case X86::BI__builtin_ia32_fpclasspd128_mask: 4109 case X86::BI__builtin_ia32_fpclasspd256_mask: 4110 case X86::BI__builtin_ia32_fpclassps128_mask: 4111 case X86::BI__builtin_ia32_fpclassps256_mask: 4112 case X86::BI__builtin_ia32_fpclassps512_mask: 4113 case X86::BI__builtin_ia32_fpclasspd512_mask: 4114 case X86::BI__builtin_ia32_fpclasssd_mask: 4115 case X86::BI__builtin_ia32_fpclassss_mask: 4116 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4117 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4118 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4119 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4120 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4121 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4122 case X86::BI__builtin_ia32_kshiftliqi: 4123 case X86::BI__builtin_ia32_kshiftlihi: 4124 case X86::BI__builtin_ia32_kshiftlisi: 4125 case X86::BI__builtin_ia32_kshiftlidi: 4126 case X86::BI__builtin_ia32_kshiftriqi: 4127 case X86::BI__builtin_ia32_kshiftrihi: 4128 case X86::BI__builtin_ia32_kshiftrisi: 4129 case X86::BI__builtin_ia32_kshiftridi: 4130 i = 1; l = 0; u = 255; 4131 break; 4132 case X86::BI__builtin_ia32_vperm2f128_pd256: 4133 case X86::BI__builtin_ia32_vperm2f128_ps256: 4134 case X86::BI__builtin_ia32_vperm2f128_si256: 4135 case X86::BI__builtin_ia32_permti256: 4136 case X86::BI__builtin_ia32_pblendw128: 4137 case X86::BI__builtin_ia32_pblendw256: 4138 case X86::BI__builtin_ia32_blendps256: 4139 case X86::BI__builtin_ia32_pblendd256: 4140 case X86::BI__builtin_ia32_palignr128: 4141 case X86::BI__builtin_ia32_palignr256: 4142 case X86::BI__builtin_ia32_palignr512: 4143 case X86::BI__builtin_ia32_alignq512: 4144 case X86::BI__builtin_ia32_alignd512: 4145 case X86::BI__builtin_ia32_alignd128: 4146 case X86::BI__builtin_ia32_alignd256: 4147 case X86::BI__builtin_ia32_alignq128: 4148 case X86::BI__builtin_ia32_alignq256: 4149 case X86::BI__builtin_ia32_vcomisd: 4150 case X86::BI__builtin_ia32_vcomiss: 4151 case X86::BI__builtin_ia32_shuf_f32x4: 4152 case X86::BI__builtin_ia32_shuf_f64x2: 4153 case X86::BI__builtin_ia32_shuf_i32x4: 4154 case X86::BI__builtin_ia32_shuf_i64x2: 4155 case X86::BI__builtin_ia32_shufpd512: 4156 case X86::BI__builtin_ia32_shufps: 4157 case X86::BI__builtin_ia32_shufps256: 4158 case X86::BI__builtin_ia32_shufps512: 4159 case X86::BI__builtin_ia32_dbpsadbw128: 4160 case X86::BI__builtin_ia32_dbpsadbw256: 4161 case X86::BI__builtin_ia32_dbpsadbw512: 4162 case X86::BI__builtin_ia32_vpshldd128: 4163 case X86::BI__builtin_ia32_vpshldd256: 4164 case X86::BI__builtin_ia32_vpshldd512: 4165 case X86::BI__builtin_ia32_vpshldq128: 4166 case X86::BI__builtin_ia32_vpshldq256: 4167 case X86::BI__builtin_ia32_vpshldq512: 4168 case X86::BI__builtin_ia32_vpshldw128: 4169 case X86::BI__builtin_ia32_vpshldw256: 4170 case X86::BI__builtin_ia32_vpshldw512: 4171 case X86::BI__builtin_ia32_vpshrdd128: 4172 case X86::BI__builtin_ia32_vpshrdd256: 4173 case X86::BI__builtin_ia32_vpshrdd512: 4174 case X86::BI__builtin_ia32_vpshrdq128: 4175 case X86::BI__builtin_ia32_vpshrdq256: 4176 case X86::BI__builtin_ia32_vpshrdq512: 4177 case X86::BI__builtin_ia32_vpshrdw128: 4178 case X86::BI__builtin_ia32_vpshrdw256: 4179 case X86::BI__builtin_ia32_vpshrdw512: 4180 i = 2; l = 0; u = 255; 4181 break; 4182 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4183 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4184 case X86::BI__builtin_ia32_fixupimmps512_mask: 4185 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4186 case X86::BI__builtin_ia32_fixupimmsd_mask: 4187 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4188 case X86::BI__builtin_ia32_fixupimmss_mask: 4189 case X86::BI__builtin_ia32_fixupimmss_maskz: 4190 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4191 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4192 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4193 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4194 case X86::BI__builtin_ia32_fixupimmps128_mask: 4195 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4196 case X86::BI__builtin_ia32_fixupimmps256_mask: 4197 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4198 case X86::BI__builtin_ia32_pternlogd512_mask: 4199 case X86::BI__builtin_ia32_pternlogd512_maskz: 4200 case X86::BI__builtin_ia32_pternlogq512_mask: 4201 case X86::BI__builtin_ia32_pternlogq512_maskz: 4202 case X86::BI__builtin_ia32_pternlogd128_mask: 4203 case X86::BI__builtin_ia32_pternlogd128_maskz: 4204 case X86::BI__builtin_ia32_pternlogd256_mask: 4205 case X86::BI__builtin_ia32_pternlogd256_maskz: 4206 case X86::BI__builtin_ia32_pternlogq128_mask: 4207 case X86::BI__builtin_ia32_pternlogq128_maskz: 4208 case X86::BI__builtin_ia32_pternlogq256_mask: 4209 case X86::BI__builtin_ia32_pternlogq256_maskz: 4210 i = 3; l = 0; u = 255; 4211 break; 4212 case X86::BI__builtin_ia32_gatherpfdpd: 4213 case X86::BI__builtin_ia32_gatherpfdps: 4214 case X86::BI__builtin_ia32_gatherpfqpd: 4215 case X86::BI__builtin_ia32_gatherpfqps: 4216 case X86::BI__builtin_ia32_scatterpfdpd: 4217 case X86::BI__builtin_ia32_scatterpfdps: 4218 case X86::BI__builtin_ia32_scatterpfqpd: 4219 case X86::BI__builtin_ia32_scatterpfqps: 4220 i = 4; l = 2; u = 3; 4221 break; 4222 case X86::BI__builtin_ia32_reducesd_mask: 4223 case X86::BI__builtin_ia32_reducess_mask: 4224 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4225 case X86::BI__builtin_ia32_rndscaless_round_mask: 4226 i = 4; l = 0; u = 255; 4227 break; 4228 } 4229 4230 // Note that we don't force a hard error on the range check here, allowing 4231 // template-generated or macro-generated dead code to potentially have out-of- 4232 // range values. These need to code generate, but don't need to necessarily 4233 // make any sense. We use a warning that defaults to an error. 4234 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4235 } 4236 4237 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4238 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4239 /// Returns true when the format fits the function and the FormatStringInfo has 4240 /// been populated. 4241 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4242 FormatStringInfo *FSI) { 4243 FSI->HasVAListArg = Format->getFirstArg() == 0; 4244 FSI->FormatIdx = Format->getFormatIdx() - 1; 4245 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4246 4247 // The way the format attribute works in GCC, the implicit this argument 4248 // of member functions is counted. However, it doesn't appear in our own 4249 // lists, so decrement format_idx in that case. 4250 if (IsCXXMember) { 4251 if(FSI->FormatIdx == 0) 4252 return false; 4253 --FSI->FormatIdx; 4254 if (FSI->FirstDataArg != 0) 4255 --FSI->FirstDataArg; 4256 } 4257 return true; 4258 } 4259 4260 /// Checks if a the given expression evaluates to null. 4261 /// 4262 /// Returns true if the value evaluates to null. 4263 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4264 // If the expression has non-null type, it doesn't evaluate to null. 4265 if (auto nullability 4266 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4267 if (*nullability == NullabilityKind::NonNull) 4268 return false; 4269 } 4270 4271 // As a special case, transparent unions initialized with zero are 4272 // considered null for the purposes of the nonnull attribute. 4273 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4274 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4275 if (const CompoundLiteralExpr *CLE = 4276 dyn_cast<CompoundLiteralExpr>(Expr)) 4277 if (const InitListExpr *ILE = 4278 dyn_cast<InitListExpr>(CLE->getInitializer())) 4279 Expr = ILE->getInit(0); 4280 } 4281 4282 bool Result; 4283 return (!Expr->isValueDependent() && 4284 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4285 !Result); 4286 } 4287 4288 static void CheckNonNullArgument(Sema &S, 4289 const Expr *ArgExpr, 4290 SourceLocation CallSiteLoc) { 4291 if (CheckNonNullExpr(S, ArgExpr)) 4292 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4293 S.PDiag(diag::warn_null_arg) 4294 << ArgExpr->getSourceRange()); 4295 } 4296 4297 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4298 FormatStringInfo FSI; 4299 if ((GetFormatStringType(Format) == FST_NSString) && 4300 getFormatStringInfo(Format, false, &FSI)) { 4301 Idx = FSI.FormatIdx; 4302 return true; 4303 } 4304 return false; 4305 } 4306 4307 /// Diagnose use of %s directive in an NSString which is being passed 4308 /// as formatting string to formatting method. 4309 static void 4310 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4311 const NamedDecl *FDecl, 4312 Expr **Args, 4313 unsigned NumArgs) { 4314 unsigned Idx = 0; 4315 bool Format = false; 4316 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4317 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4318 Idx = 2; 4319 Format = true; 4320 } 4321 else 4322 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4323 if (S.GetFormatNSStringIdx(I, Idx)) { 4324 Format = true; 4325 break; 4326 } 4327 } 4328 if (!Format || NumArgs <= Idx) 4329 return; 4330 const Expr *FormatExpr = Args[Idx]; 4331 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4332 FormatExpr = CSCE->getSubExpr(); 4333 const StringLiteral *FormatString; 4334 if (const ObjCStringLiteral *OSL = 4335 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4336 FormatString = OSL->getString(); 4337 else 4338 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4339 if (!FormatString) 4340 return; 4341 if (S.FormatStringHasSArg(FormatString)) { 4342 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4343 << "%s" << 1 << 1; 4344 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4345 << FDecl->getDeclName(); 4346 } 4347 } 4348 4349 /// Determine whether the given type has a non-null nullability annotation. 4350 static bool isNonNullType(ASTContext &ctx, QualType type) { 4351 if (auto nullability = type->getNullability(ctx)) 4352 return *nullability == NullabilityKind::NonNull; 4353 4354 return false; 4355 } 4356 4357 static void CheckNonNullArguments(Sema &S, 4358 const NamedDecl *FDecl, 4359 const FunctionProtoType *Proto, 4360 ArrayRef<const Expr *> Args, 4361 SourceLocation CallSiteLoc) { 4362 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4363 4364 // Already checked by by constant evaluator. 4365 if (S.isConstantEvaluated()) 4366 return; 4367 // Check the attributes attached to the method/function itself. 4368 llvm::SmallBitVector NonNullArgs; 4369 if (FDecl) { 4370 // Handle the nonnull attribute on the function/method declaration itself. 4371 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4372 if (!NonNull->args_size()) { 4373 // Easy case: all pointer arguments are nonnull. 4374 for (const auto *Arg : Args) 4375 if (S.isValidPointerAttrType(Arg->getType())) 4376 CheckNonNullArgument(S, Arg, CallSiteLoc); 4377 return; 4378 } 4379 4380 for (const ParamIdx &Idx : NonNull->args()) { 4381 unsigned IdxAST = Idx.getASTIndex(); 4382 if (IdxAST >= Args.size()) 4383 continue; 4384 if (NonNullArgs.empty()) 4385 NonNullArgs.resize(Args.size()); 4386 NonNullArgs.set(IdxAST); 4387 } 4388 } 4389 } 4390 4391 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4392 // Handle the nonnull attribute on the parameters of the 4393 // function/method. 4394 ArrayRef<ParmVarDecl*> parms; 4395 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4396 parms = FD->parameters(); 4397 else 4398 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4399 4400 unsigned ParamIndex = 0; 4401 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4402 I != E; ++I, ++ParamIndex) { 4403 const ParmVarDecl *PVD = *I; 4404 if (PVD->hasAttr<NonNullAttr>() || 4405 isNonNullType(S.Context, PVD->getType())) { 4406 if (NonNullArgs.empty()) 4407 NonNullArgs.resize(Args.size()); 4408 4409 NonNullArgs.set(ParamIndex); 4410 } 4411 } 4412 } else { 4413 // If we have a non-function, non-method declaration but no 4414 // function prototype, try to dig out the function prototype. 4415 if (!Proto) { 4416 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4417 QualType type = VD->getType().getNonReferenceType(); 4418 if (auto pointerType = type->getAs<PointerType>()) 4419 type = pointerType->getPointeeType(); 4420 else if (auto blockType = type->getAs<BlockPointerType>()) 4421 type = blockType->getPointeeType(); 4422 // FIXME: data member pointers? 4423 4424 // Dig out the function prototype, if there is one. 4425 Proto = type->getAs<FunctionProtoType>(); 4426 } 4427 } 4428 4429 // Fill in non-null argument information from the nullability 4430 // information on the parameter types (if we have them). 4431 if (Proto) { 4432 unsigned Index = 0; 4433 for (auto paramType : Proto->getParamTypes()) { 4434 if (isNonNullType(S.Context, paramType)) { 4435 if (NonNullArgs.empty()) 4436 NonNullArgs.resize(Args.size()); 4437 4438 NonNullArgs.set(Index); 4439 } 4440 4441 ++Index; 4442 } 4443 } 4444 } 4445 4446 // Check for non-null arguments. 4447 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4448 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4449 if (NonNullArgs[ArgIndex]) 4450 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4451 } 4452 } 4453 4454 /// Handles the checks for format strings, non-POD arguments to vararg 4455 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4456 /// attributes. 4457 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4458 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4459 bool IsMemberFunction, SourceLocation Loc, 4460 SourceRange Range, VariadicCallType CallType) { 4461 // FIXME: We should check as much as we can in the template definition. 4462 if (CurContext->isDependentContext()) 4463 return; 4464 4465 // Printf and scanf checking. 4466 llvm::SmallBitVector CheckedVarArgs; 4467 if (FDecl) { 4468 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4469 // Only create vector if there are format attributes. 4470 CheckedVarArgs.resize(Args.size()); 4471 4472 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4473 CheckedVarArgs); 4474 } 4475 } 4476 4477 // Refuse POD arguments that weren't caught by the format string 4478 // checks above. 4479 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4480 if (CallType != VariadicDoesNotApply && 4481 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4482 unsigned NumParams = Proto ? Proto->getNumParams() 4483 : FDecl && isa<FunctionDecl>(FDecl) 4484 ? cast<FunctionDecl>(FDecl)->getNumParams() 4485 : FDecl && isa<ObjCMethodDecl>(FDecl) 4486 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4487 : 0; 4488 4489 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4490 // Args[ArgIdx] can be null in malformed code. 4491 if (const Expr *Arg = Args[ArgIdx]) { 4492 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4493 checkVariadicArgument(Arg, CallType); 4494 } 4495 } 4496 } 4497 4498 if (FDecl || Proto) { 4499 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4500 4501 // Type safety checking. 4502 if (FDecl) { 4503 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4504 CheckArgumentWithTypeTag(I, Args, Loc); 4505 } 4506 } 4507 4508 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4509 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4510 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4511 if (!Arg->isValueDependent()) { 4512 Expr::EvalResult Align; 4513 if (Arg->EvaluateAsInt(Align, Context)) { 4514 const llvm::APSInt &I = Align.Val.getInt(); 4515 if (!I.isPowerOf2()) 4516 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4517 << Arg->getSourceRange(); 4518 4519 if (I > Sema::MaximumAlignment) 4520 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4521 << Arg->getSourceRange() << Sema::MaximumAlignment; 4522 } 4523 } 4524 } 4525 4526 if (FD) 4527 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4528 } 4529 4530 /// CheckConstructorCall - Check a constructor call for correctness and safety 4531 /// properties not enforced by the C type system. 4532 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4533 ArrayRef<const Expr *> Args, 4534 const FunctionProtoType *Proto, 4535 SourceLocation Loc) { 4536 VariadicCallType CallType = 4537 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4538 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4539 Loc, SourceRange(), CallType); 4540 } 4541 4542 /// CheckFunctionCall - Check a direct function call for various correctness 4543 /// and safety properties not strictly enforced by the C type system. 4544 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4545 const FunctionProtoType *Proto) { 4546 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4547 isa<CXXMethodDecl>(FDecl); 4548 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4549 IsMemberOperatorCall; 4550 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4551 TheCall->getCallee()); 4552 Expr** Args = TheCall->getArgs(); 4553 unsigned NumArgs = TheCall->getNumArgs(); 4554 4555 Expr *ImplicitThis = nullptr; 4556 if (IsMemberOperatorCall) { 4557 // If this is a call to a member operator, hide the first argument 4558 // from checkCall. 4559 // FIXME: Our choice of AST representation here is less than ideal. 4560 ImplicitThis = Args[0]; 4561 ++Args; 4562 --NumArgs; 4563 } else if (IsMemberFunction) 4564 ImplicitThis = 4565 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4566 4567 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4568 IsMemberFunction, TheCall->getRParenLoc(), 4569 TheCall->getCallee()->getSourceRange(), CallType); 4570 4571 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4572 // None of the checks below are needed for functions that don't have 4573 // simple names (e.g., C++ conversion functions). 4574 if (!FnInfo) 4575 return false; 4576 4577 CheckTCBEnforcement(TheCall, FDecl); 4578 4579 CheckAbsoluteValueFunction(TheCall, FDecl); 4580 CheckMaxUnsignedZero(TheCall, FDecl); 4581 4582 if (getLangOpts().ObjC) 4583 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4584 4585 unsigned CMId = FDecl->getMemoryFunctionKind(); 4586 4587 // Handle memory setting and copying functions. 4588 switch (CMId) { 4589 case 0: 4590 return false; 4591 case Builtin::BIstrlcpy: // fallthrough 4592 case Builtin::BIstrlcat: 4593 CheckStrlcpycatArguments(TheCall, FnInfo); 4594 break; 4595 case Builtin::BIstrncat: 4596 CheckStrncatArguments(TheCall, FnInfo); 4597 break; 4598 case Builtin::BIfree: 4599 CheckFreeArguments(TheCall); 4600 break; 4601 default: 4602 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4603 } 4604 4605 return false; 4606 } 4607 4608 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4609 ArrayRef<const Expr *> Args) { 4610 VariadicCallType CallType = 4611 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4612 4613 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4614 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4615 CallType); 4616 4617 return false; 4618 } 4619 4620 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4621 const FunctionProtoType *Proto) { 4622 QualType Ty; 4623 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4624 Ty = V->getType().getNonReferenceType(); 4625 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4626 Ty = F->getType().getNonReferenceType(); 4627 else 4628 return false; 4629 4630 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4631 !Ty->isFunctionProtoType()) 4632 return false; 4633 4634 VariadicCallType CallType; 4635 if (!Proto || !Proto->isVariadic()) { 4636 CallType = VariadicDoesNotApply; 4637 } else if (Ty->isBlockPointerType()) { 4638 CallType = VariadicBlock; 4639 } else { // Ty->isFunctionPointerType() 4640 CallType = VariadicFunction; 4641 } 4642 4643 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4644 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4645 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4646 TheCall->getCallee()->getSourceRange(), CallType); 4647 4648 return false; 4649 } 4650 4651 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4652 /// such as function pointers returned from functions. 4653 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4654 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4655 TheCall->getCallee()); 4656 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4657 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4658 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4659 TheCall->getCallee()->getSourceRange(), CallType); 4660 4661 return false; 4662 } 4663 4664 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4665 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4666 return false; 4667 4668 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4669 switch (Op) { 4670 case AtomicExpr::AO__c11_atomic_init: 4671 case AtomicExpr::AO__opencl_atomic_init: 4672 llvm_unreachable("There is no ordering argument for an init"); 4673 4674 case AtomicExpr::AO__c11_atomic_load: 4675 case AtomicExpr::AO__opencl_atomic_load: 4676 case AtomicExpr::AO__atomic_load_n: 4677 case AtomicExpr::AO__atomic_load: 4678 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4679 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4680 4681 case AtomicExpr::AO__c11_atomic_store: 4682 case AtomicExpr::AO__opencl_atomic_store: 4683 case AtomicExpr::AO__atomic_store: 4684 case AtomicExpr::AO__atomic_store_n: 4685 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4686 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4687 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4688 4689 default: 4690 return true; 4691 } 4692 } 4693 4694 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4695 AtomicExpr::AtomicOp Op) { 4696 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4697 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4698 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4699 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4700 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4701 Op); 4702 } 4703 4704 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4705 SourceLocation RParenLoc, MultiExprArg Args, 4706 AtomicExpr::AtomicOp Op, 4707 AtomicArgumentOrder ArgOrder) { 4708 // All the non-OpenCL operations take one of the following forms. 4709 // The OpenCL operations take the __c11 forms with one extra argument for 4710 // synchronization scope. 4711 enum { 4712 // C __c11_atomic_init(A *, C) 4713 Init, 4714 4715 // C __c11_atomic_load(A *, int) 4716 Load, 4717 4718 // void __atomic_load(A *, CP, int) 4719 LoadCopy, 4720 4721 // void __atomic_store(A *, CP, int) 4722 Copy, 4723 4724 // C __c11_atomic_add(A *, M, int) 4725 Arithmetic, 4726 4727 // C __atomic_exchange_n(A *, CP, int) 4728 Xchg, 4729 4730 // void __atomic_exchange(A *, C *, CP, int) 4731 GNUXchg, 4732 4733 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4734 C11CmpXchg, 4735 4736 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4737 GNUCmpXchg 4738 } Form = Init; 4739 4740 const unsigned NumForm = GNUCmpXchg + 1; 4741 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4742 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4743 // where: 4744 // C is an appropriate type, 4745 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4746 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4747 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4748 // the int parameters are for orderings. 4749 4750 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4751 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4752 "need to update code for modified forms"); 4753 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4754 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4755 AtomicExpr::AO__atomic_load, 4756 "need to update code for modified C11 atomics"); 4757 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4758 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4759 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4760 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4761 IsOpenCL; 4762 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4763 Op == AtomicExpr::AO__atomic_store_n || 4764 Op == AtomicExpr::AO__atomic_exchange_n || 4765 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4766 bool IsAddSub = false; 4767 4768 switch (Op) { 4769 case AtomicExpr::AO__c11_atomic_init: 4770 case AtomicExpr::AO__opencl_atomic_init: 4771 Form = Init; 4772 break; 4773 4774 case AtomicExpr::AO__c11_atomic_load: 4775 case AtomicExpr::AO__opencl_atomic_load: 4776 case AtomicExpr::AO__atomic_load_n: 4777 Form = Load; 4778 break; 4779 4780 case AtomicExpr::AO__atomic_load: 4781 Form = LoadCopy; 4782 break; 4783 4784 case AtomicExpr::AO__c11_atomic_store: 4785 case AtomicExpr::AO__opencl_atomic_store: 4786 case AtomicExpr::AO__atomic_store: 4787 case AtomicExpr::AO__atomic_store_n: 4788 Form = Copy; 4789 break; 4790 4791 case AtomicExpr::AO__c11_atomic_fetch_add: 4792 case AtomicExpr::AO__c11_atomic_fetch_sub: 4793 case AtomicExpr::AO__opencl_atomic_fetch_add: 4794 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4795 case AtomicExpr::AO__atomic_fetch_add: 4796 case AtomicExpr::AO__atomic_fetch_sub: 4797 case AtomicExpr::AO__atomic_add_fetch: 4798 case AtomicExpr::AO__atomic_sub_fetch: 4799 IsAddSub = true; 4800 LLVM_FALLTHROUGH; 4801 case AtomicExpr::AO__c11_atomic_fetch_and: 4802 case AtomicExpr::AO__c11_atomic_fetch_or: 4803 case AtomicExpr::AO__c11_atomic_fetch_xor: 4804 case AtomicExpr::AO__opencl_atomic_fetch_and: 4805 case AtomicExpr::AO__opencl_atomic_fetch_or: 4806 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4807 case AtomicExpr::AO__atomic_fetch_and: 4808 case AtomicExpr::AO__atomic_fetch_or: 4809 case AtomicExpr::AO__atomic_fetch_xor: 4810 case AtomicExpr::AO__atomic_fetch_nand: 4811 case AtomicExpr::AO__atomic_and_fetch: 4812 case AtomicExpr::AO__atomic_or_fetch: 4813 case AtomicExpr::AO__atomic_xor_fetch: 4814 case AtomicExpr::AO__atomic_nand_fetch: 4815 case AtomicExpr::AO__c11_atomic_fetch_min: 4816 case AtomicExpr::AO__c11_atomic_fetch_max: 4817 case AtomicExpr::AO__opencl_atomic_fetch_min: 4818 case AtomicExpr::AO__opencl_atomic_fetch_max: 4819 case AtomicExpr::AO__atomic_min_fetch: 4820 case AtomicExpr::AO__atomic_max_fetch: 4821 case AtomicExpr::AO__atomic_fetch_min: 4822 case AtomicExpr::AO__atomic_fetch_max: 4823 Form = Arithmetic; 4824 break; 4825 4826 case AtomicExpr::AO__c11_atomic_exchange: 4827 case AtomicExpr::AO__opencl_atomic_exchange: 4828 case AtomicExpr::AO__atomic_exchange_n: 4829 Form = Xchg; 4830 break; 4831 4832 case AtomicExpr::AO__atomic_exchange: 4833 Form = GNUXchg; 4834 break; 4835 4836 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4837 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4838 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4839 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4840 Form = C11CmpXchg; 4841 break; 4842 4843 case AtomicExpr::AO__atomic_compare_exchange: 4844 case AtomicExpr::AO__atomic_compare_exchange_n: 4845 Form = GNUCmpXchg; 4846 break; 4847 } 4848 4849 unsigned AdjustedNumArgs = NumArgs[Form]; 4850 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4851 ++AdjustedNumArgs; 4852 // Check we have the right number of arguments. 4853 if (Args.size() < AdjustedNumArgs) { 4854 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4855 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4856 << ExprRange; 4857 return ExprError(); 4858 } else if (Args.size() > AdjustedNumArgs) { 4859 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4860 diag::err_typecheck_call_too_many_args) 4861 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4862 << ExprRange; 4863 return ExprError(); 4864 } 4865 4866 // Inspect the first argument of the atomic operation. 4867 Expr *Ptr = Args[0]; 4868 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4869 if (ConvertedPtr.isInvalid()) 4870 return ExprError(); 4871 4872 Ptr = ConvertedPtr.get(); 4873 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4874 if (!pointerType) { 4875 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4876 << Ptr->getType() << Ptr->getSourceRange(); 4877 return ExprError(); 4878 } 4879 4880 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4881 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4882 QualType ValType = AtomTy; // 'C' 4883 if (IsC11) { 4884 if (!AtomTy->isAtomicType()) { 4885 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4886 << Ptr->getType() << Ptr->getSourceRange(); 4887 return ExprError(); 4888 } 4889 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4890 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4891 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4892 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4893 << Ptr->getSourceRange(); 4894 return ExprError(); 4895 } 4896 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4897 } else if (Form != Load && Form != LoadCopy) { 4898 if (ValType.isConstQualified()) { 4899 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4900 << Ptr->getType() << Ptr->getSourceRange(); 4901 return ExprError(); 4902 } 4903 } 4904 4905 // For an arithmetic operation, the implied arithmetic must be well-formed. 4906 if (Form == Arithmetic) { 4907 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4908 if (IsAddSub && !ValType->isIntegerType() 4909 && !ValType->isPointerType()) { 4910 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4911 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4912 return ExprError(); 4913 } 4914 if (!IsAddSub && !ValType->isIntegerType()) { 4915 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 4916 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4917 return ExprError(); 4918 } 4919 if (IsC11 && ValType->isPointerType() && 4920 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4921 diag::err_incomplete_type)) { 4922 return ExprError(); 4923 } 4924 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4925 // For __atomic_*_n operations, the value type must be a scalar integral or 4926 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4927 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4928 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4929 return ExprError(); 4930 } 4931 4932 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4933 !AtomTy->isScalarType()) { 4934 // For GNU atomics, require a trivially-copyable type. This is not part of 4935 // the GNU atomics specification, but we enforce it for sanity. 4936 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4937 << Ptr->getType() << Ptr->getSourceRange(); 4938 return ExprError(); 4939 } 4940 4941 switch (ValType.getObjCLifetime()) { 4942 case Qualifiers::OCL_None: 4943 case Qualifiers::OCL_ExplicitNone: 4944 // okay 4945 break; 4946 4947 case Qualifiers::OCL_Weak: 4948 case Qualifiers::OCL_Strong: 4949 case Qualifiers::OCL_Autoreleasing: 4950 // FIXME: Can this happen? By this point, ValType should be known 4951 // to be trivially copyable. 4952 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4953 << ValType << Ptr->getSourceRange(); 4954 return ExprError(); 4955 } 4956 4957 // All atomic operations have an overload which takes a pointer to a volatile 4958 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4959 // into the result or the other operands. Similarly atomic_load takes a 4960 // pointer to a const 'A'. 4961 ValType.removeLocalVolatile(); 4962 ValType.removeLocalConst(); 4963 QualType ResultType = ValType; 4964 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4965 Form == Init) 4966 ResultType = Context.VoidTy; 4967 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4968 ResultType = Context.BoolTy; 4969 4970 // The type of a parameter passed 'by value'. In the GNU atomics, such 4971 // arguments are actually passed as pointers. 4972 QualType ByValType = ValType; // 'CP' 4973 bool IsPassedByAddress = false; 4974 if (!IsC11 && !IsN) { 4975 ByValType = Ptr->getType(); 4976 IsPassedByAddress = true; 4977 } 4978 4979 SmallVector<Expr *, 5> APIOrderedArgs; 4980 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4981 APIOrderedArgs.push_back(Args[0]); 4982 switch (Form) { 4983 case Init: 4984 case Load: 4985 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4986 break; 4987 case LoadCopy: 4988 case Copy: 4989 case Arithmetic: 4990 case Xchg: 4991 APIOrderedArgs.push_back(Args[2]); // Val1 4992 APIOrderedArgs.push_back(Args[1]); // Order 4993 break; 4994 case GNUXchg: 4995 APIOrderedArgs.push_back(Args[2]); // Val1 4996 APIOrderedArgs.push_back(Args[3]); // Val2 4997 APIOrderedArgs.push_back(Args[1]); // Order 4998 break; 4999 case C11CmpXchg: 5000 APIOrderedArgs.push_back(Args[2]); // Val1 5001 APIOrderedArgs.push_back(Args[4]); // Val2 5002 APIOrderedArgs.push_back(Args[1]); // Order 5003 APIOrderedArgs.push_back(Args[3]); // OrderFail 5004 break; 5005 case GNUCmpXchg: 5006 APIOrderedArgs.push_back(Args[2]); // Val1 5007 APIOrderedArgs.push_back(Args[4]); // Val2 5008 APIOrderedArgs.push_back(Args[5]); // Weak 5009 APIOrderedArgs.push_back(Args[1]); // Order 5010 APIOrderedArgs.push_back(Args[3]); // OrderFail 5011 break; 5012 } 5013 } else 5014 APIOrderedArgs.append(Args.begin(), Args.end()); 5015 5016 // The first argument's non-CV pointer type is used to deduce the type of 5017 // subsequent arguments, except for: 5018 // - weak flag (always converted to bool) 5019 // - memory order (always converted to int) 5020 // - scope (always converted to int) 5021 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5022 QualType Ty; 5023 if (i < NumVals[Form] + 1) { 5024 switch (i) { 5025 case 0: 5026 // The first argument is always a pointer. It has a fixed type. 5027 // It is always dereferenced, a nullptr is undefined. 5028 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5029 // Nothing else to do: we already know all we want about this pointer. 5030 continue; 5031 case 1: 5032 // The second argument is the non-atomic operand. For arithmetic, this 5033 // is always passed by value, and for a compare_exchange it is always 5034 // passed by address. For the rest, GNU uses by-address and C11 uses 5035 // by-value. 5036 assert(Form != Load); 5037 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 5038 Ty = ValType; 5039 else if (Form == Copy || Form == Xchg) { 5040 if (IsPassedByAddress) { 5041 // The value pointer is always dereferenced, a nullptr is undefined. 5042 CheckNonNullArgument(*this, APIOrderedArgs[i], 5043 ExprRange.getBegin()); 5044 } 5045 Ty = ByValType; 5046 } else if (Form == Arithmetic) 5047 Ty = Context.getPointerDiffType(); 5048 else { 5049 Expr *ValArg = APIOrderedArgs[i]; 5050 // The value pointer is always dereferenced, a nullptr is undefined. 5051 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5052 LangAS AS = LangAS::Default; 5053 // Keep address space of non-atomic pointer type. 5054 if (const PointerType *PtrTy = 5055 ValArg->getType()->getAs<PointerType>()) { 5056 AS = PtrTy->getPointeeType().getAddressSpace(); 5057 } 5058 Ty = Context.getPointerType( 5059 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5060 } 5061 break; 5062 case 2: 5063 // The third argument to compare_exchange / GNU exchange is the desired 5064 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5065 if (IsPassedByAddress) 5066 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5067 Ty = ByValType; 5068 break; 5069 case 3: 5070 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5071 Ty = Context.BoolTy; 5072 break; 5073 } 5074 } else { 5075 // The order(s) and scope are always converted to int. 5076 Ty = Context.IntTy; 5077 } 5078 5079 InitializedEntity Entity = 5080 InitializedEntity::InitializeParameter(Context, Ty, false); 5081 ExprResult Arg = APIOrderedArgs[i]; 5082 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5083 if (Arg.isInvalid()) 5084 return true; 5085 APIOrderedArgs[i] = Arg.get(); 5086 } 5087 5088 // Permute the arguments into a 'consistent' order. 5089 SmallVector<Expr*, 5> SubExprs; 5090 SubExprs.push_back(Ptr); 5091 switch (Form) { 5092 case Init: 5093 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5094 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5095 break; 5096 case Load: 5097 SubExprs.push_back(APIOrderedArgs[1]); // Order 5098 break; 5099 case LoadCopy: 5100 case Copy: 5101 case Arithmetic: 5102 case Xchg: 5103 SubExprs.push_back(APIOrderedArgs[2]); // Order 5104 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5105 break; 5106 case GNUXchg: 5107 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5108 SubExprs.push_back(APIOrderedArgs[3]); // Order 5109 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5110 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5111 break; 5112 case C11CmpXchg: 5113 SubExprs.push_back(APIOrderedArgs[3]); // Order 5114 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5115 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5116 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5117 break; 5118 case GNUCmpXchg: 5119 SubExprs.push_back(APIOrderedArgs[4]); // Order 5120 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5121 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5122 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5123 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5124 break; 5125 } 5126 5127 if (SubExprs.size() >= 2 && Form != Init) { 5128 if (Optional<llvm::APSInt> Result = 5129 SubExprs[1]->getIntegerConstantExpr(Context)) 5130 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5131 Diag(SubExprs[1]->getBeginLoc(), 5132 diag::warn_atomic_op_has_invalid_memory_order) 5133 << SubExprs[1]->getSourceRange(); 5134 } 5135 5136 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5137 auto *Scope = Args[Args.size() - 1]; 5138 if (Optional<llvm::APSInt> Result = 5139 Scope->getIntegerConstantExpr(Context)) { 5140 if (!ScopeModel->isValid(Result->getZExtValue())) 5141 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5142 << Scope->getSourceRange(); 5143 } 5144 SubExprs.push_back(Scope); 5145 } 5146 5147 AtomicExpr *AE = new (Context) 5148 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5149 5150 if ((Op == AtomicExpr::AO__c11_atomic_load || 5151 Op == AtomicExpr::AO__c11_atomic_store || 5152 Op == AtomicExpr::AO__opencl_atomic_load || 5153 Op == AtomicExpr::AO__opencl_atomic_store ) && 5154 Context.AtomicUsesUnsupportedLibcall(AE)) 5155 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5156 << ((Op == AtomicExpr::AO__c11_atomic_load || 5157 Op == AtomicExpr::AO__opencl_atomic_load) 5158 ? 0 5159 : 1); 5160 5161 if (ValType->isExtIntType()) { 5162 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5163 return ExprError(); 5164 } 5165 5166 return AE; 5167 } 5168 5169 /// checkBuiltinArgument - Given a call to a builtin function, perform 5170 /// normal type-checking on the given argument, updating the call in 5171 /// place. This is useful when a builtin function requires custom 5172 /// type-checking for some of its arguments but not necessarily all of 5173 /// them. 5174 /// 5175 /// Returns true on error. 5176 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5177 FunctionDecl *Fn = E->getDirectCallee(); 5178 assert(Fn && "builtin call without direct callee!"); 5179 5180 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5181 InitializedEntity Entity = 5182 InitializedEntity::InitializeParameter(S.Context, Param); 5183 5184 ExprResult Arg = E->getArg(0); 5185 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5186 if (Arg.isInvalid()) 5187 return true; 5188 5189 E->setArg(ArgIndex, Arg.get()); 5190 return false; 5191 } 5192 5193 /// We have a call to a function like __sync_fetch_and_add, which is an 5194 /// overloaded function based on the pointer type of its first argument. 5195 /// The main BuildCallExpr routines have already promoted the types of 5196 /// arguments because all of these calls are prototyped as void(...). 5197 /// 5198 /// This function goes through and does final semantic checking for these 5199 /// builtins, as well as generating any warnings. 5200 ExprResult 5201 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5202 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5203 Expr *Callee = TheCall->getCallee(); 5204 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5205 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5206 5207 // Ensure that we have at least one argument to do type inference from. 5208 if (TheCall->getNumArgs() < 1) { 5209 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5210 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5211 return ExprError(); 5212 } 5213 5214 // Inspect the first argument of the atomic builtin. This should always be 5215 // a pointer type, whose element is an integral scalar or pointer type. 5216 // Because it is a pointer type, we don't have to worry about any implicit 5217 // casts here. 5218 // FIXME: We don't allow floating point scalars as input. 5219 Expr *FirstArg = TheCall->getArg(0); 5220 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5221 if (FirstArgResult.isInvalid()) 5222 return ExprError(); 5223 FirstArg = FirstArgResult.get(); 5224 TheCall->setArg(0, FirstArg); 5225 5226 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5227 if (!pointerType) { 5228 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5229 << FirstArg->getType() << FirstArg->getSourceRange(); 5230 return ExprError(); 5231 } 5232 5233 QualType ValType = pointerType->getPointeeType(); 5234 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5235 !ValType->isBlockPointerType()) { 5236 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5237 << FirstArg->getType() << FirstArg->getSourceRange(); 5238 return ExprError(); 5239 } 5240 5241 if (ValType.isConstQualified()) { 5242 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5243 << FirstArg->getType() << FirstArg->getSourceRange(); 5244 return ExprError(); 5245 } 5246 5247 switch (ValType.getObjCLifetime()) { 5248 case Qualifiers::OCL_None: 5249 case Qualifiers::OCL_ExplicitNone: 5250 // okay 5251 break; 5252 5253 case Qualifiers::OCL_Weak: 5254 case Qualifiers::OCL_Strong: 5255 case Qualifiers::OCL_Autoreleasing: 5256 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5257 << ValType << FirstArg->getSourceRange(); 5258 return ExprError(); 5259 } 5260 5261 // Strip any qualifiers off ValType. 5262 ValType = ValType.getUnqualifiedType(); 5263 5264 // The majority of builtins return a value, but a few have special return 5265 // types, so allow them to override appropriately below. 5266 QualType ResultType = ValType; 5267 5268 // We need to figure out which concrete builtin this maps onto. For example, 5269 // __sync_fetch_and_add with a 2 byte object turns into 5270 // __sync_fetch_and_add_2. 5271 #define BUILTIN_ROW(x) \ 5272 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5273 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5274 5275 static const unsigned BuiltinIndices[][5] = { 5276 BUILTIN_ROW(__sync_fetch_and_add), 5277 BUILTIN_ROW(__sync_fetch_and_sub), 5278 BUILTIN_ROW(__sync_fetch_and_or), 5279 BUILTIN_ROW(__sync_fetch_and_and), 5280 BUILTIN_ROW(__sync_fetch_and_xor), 5281 BUILTIN_ROW(__sync_fetch_and_nand), 5282 5283 BUILTIN_ROW(__sync_add_and_fetch), 5284 BUILTIN_ROW(__sync_sub_and_fetch), 5285 BUILTIN_ROW(__sync_and_and_fetch), 5286 BUILTIN_ROW(__sync_or_and_fetch), 5287 BUILTIN_ROW(__sync_xor_and_fetch), 5288 BUILTIN_ROW(__sync_nand_and_fetch), 5289 5290 BUILTIN_ROW(__sync_val_compare_and_swap), 5291 BUILTIN_ROW(__sync_bool_compare_and_swap), 5292 BUILTIN_ROW(__sync_lock_test_and_set), 5293 BUILTIN_ROW(__sync_lock_release), 5294 BUILTIN_ROW(__sync_swap) 5295 }; 5296 #undef BUILTIN_ROW 5297 5298 // Determine the index of the size. 5299 unsigned SizeIndex; 5300 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5301 case 1: SizeIndex = 0; break; 5302 case 2: SizeIndex = 1; break; 5303 case 4: SizeIndex = 2; break; 5304 case 8: SizeIndex = 3; break; 5305 case 16: SizeIndex = 4; break; 5306 default: 5307 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5308 << FirstArg->getType() << FirstArg->getSourceRange(); 5309 return ExprError(); 5310 } 5311 5312 // Each of these builtins has one pointer argument, followed by some number of 5313 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5314 // that we ignore. Find out which row of BuiltinIndices to read from as well 5315 // as the number of fixed args. 5316 unsigned BuiltinID = FDecl->getBuiltinID(); 5317 unsigned BuiltinIndex, NumFixed = 1; 5318 bool WarnAboutSemanticsChange = false; 5319 switch (BuiltinID) { 5320 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5321 case Builtin::BI__sync_fetch_and_add: 5322 case Builtin::BI__sync_fetch_and_add_1: 5323 case Builtin::BI__sync_fetch_and_add_2: 5324 case Builtin::BI__sync_fetch_and_add_4: 5325 case Builtin::BI__sync_fetch_and_add_8: 5326 case Builtin::BI__sync_fetch_and_add_16: 5327 BuiltinIndex = 0; 5328 break; 5329 5330 case Builtin::BI__sync_fetch_and_sub: 5331 case Builtin::BI__sync_fetch_and_sub_1: 5332 case Builtin::BI__sync_fetch_and_sub_2: 5333 case Builtin::BI__sync_fetch_and_sub_4: 5334 case Builtin::BI__sync_fetch_and_sub_8: 5335 case Builtin::BI__sync_fetch_and_sub_16: 5336 BuiltinIndex = 1; 5337 break; 5338 5339 case Builtin::BI__sync_fetch_and_or: 5340 case Builtin::BI__sync_fetch_and_or_1: 5341 case Builtin::BI__sync_fetch_and_or_2: 5342 case Builtin::BI__sync_fetch_and_or_4: 5343 case Builtin::BI__sync_fetch_and_or_8: 5344 case Builtin::BI__sync_fetch_and_or_16: 5345 BuiltinIndex = 2; 5346 break; 5347 5348 case Builtin::BI__sync_fetch_and_and: 5349 case Builtin::BI__sync_fetch_and_and_1: 5350 case Builtin::BI__sync_fetch_and_and_2: 5351 case Builtin::BI__sync_fetch_and_and_4: 5352 case Builtin::BI__sync_fetch_and_and_8: 5353 case Builtin::BI__sync_fetch_and_and_16: 5354 BuiltinIndex = 3; 5355 break; 5356 5357 case Builtin::BI__sync_fetch_and_xor: 5358 case Builtin::BI__sync_fetch_and_xor_1: 5359 case Builtin::BI__sync_fetch_and_xor_2: 5360 case Builtin::BI__sync_fetch_and_xor_4: 5361 case Builtin::BI__sync_fetch_and_xor_8: 5362 case Builtin::BI__sync_fetch_and_xor_16: 5363 BuiltinIndex = 4; 5364 break; 5365 5366 case Builtin::BI__sync_fetch_and_nand: 5367 case Builtin::BI__sync_fetch_and_nand_1: 5368 case Builtin::BI__sync_fetch_and_nand_2: 5369 case Builtin::BI__sync_fetch_and_nand_4: 5370 case Builtin::BI__sync_fetch_and_nand_8: 5371 case Builtin::BI__sync_fetch_and_nand_16: 5372 BuiltinIndex = 5; 5373 WarnAboutSemanticsChange = true; 5374 break; 5375 5376 case Builtin::BI__sync_add_and_fetch: 5377 case Builtin::BI__sync_add_and_fetch_1: 5378 case Builtin::BI__sync_add_and_fetch_2: 5379 case Builtin::BI__sync_add_and_fetch_4: 5380 case Builtin::BI__sync_add_and_fetch_8: 5381 case Builtin::BI__sync_add_and_fetch_16: 5382 BuiltinIndex = 6; 5383 break; 5384 5385 case Builtin::BI__sync_sub_and_fetch: 5386 case Builtin::BI__sync_sub_and_fetch_1: 5387 case Builtin::BI__sync_sub_and_fetch_2: 5388 case Builtin::BI__sync_sub_and_fetch_4: 5389 case Builtin::BI__sync_sub_and_fetch_8: 5390 case Builtin::BI__sync_sub_and_fetch_16: 5391 BuiltinIndex = 7; 5392 break; 5393 5394 case Builtin::BI__sync_and_and_fetch: 5395 case Builtin::BI__sync_and_and_fetch_1: 5396 case Builtin::BI__sync_and_and_fetch_2: 5397 case Builtin::BI__sync_and_and_fetch_4: 5398 case Builtin::BI__sync_and_and_fetch_8: 5399 case Builtin::BI__sync_and_and_fetch_16: 5400 BuiltinIndex = 8; 5401 break; 5402 5403 case Builtin::BI__sync_or_and_fetch: 5404 case Builtin::BI__sync_or_and_fetch_1: 5405 case Builtin::BI__sync_or_and_fetch_2: 5406 case Builtin::BI__sync_or_and_fetch_4: 5407 case Builtin::BI__sync_or_and_fetch_8: 5408 case Builtin::BI__sync_or_and_fetch_16: 5409 BuiltinIndex = 9; 5410 break; 5411 5412 case Builtin::BI__sync_xor_and_fetch: 5413 case Builtin::BI__sync_xor_and_fetch_1: 5414 case Builtin::BI__sync_xor_and_fetch_2: 5415 case Builtin::BI__sync_xor_and_fetch_4: 5416 case Builtin::BI__sync_xor_and_fetch_8: 5417 case Builtin::BI__sync_xor_and_fetch_16: 5418 BuiltinIndex = 10; 5419 break; 5420 5421 case Builtin::BI__sync_nand_and_fetch: 5422 case Builtin::BI__sync_nand_and_fetch_1: 5423 case Builtin::BI__sync_nand_and_fetch_2: 5424 case Builtin::BI__sync_nand_and_fetch_4: 5425 case Builtin::BI__sync_nand_and_fetch_8: 5426 case Builtin::BI__sync_nand_and_fetch_16: 5427 BuiltinIndex = 11; 5428 WarnAboutSemanticsChange = true; 5429 break; 5430 5431 case Builtin::BI__sync_val_compare_and_swap: 5432 case Builtin::BI__sync_val_compare_and_swap_1: 5433 case Builtin::BI__sync_val_compare_and_swap_2: 5434 case Builtin::BI__sync_val_compare_and_swap_4: 5435 case Builtin::BI__sync_val_compare_and_swap_8: 5436 case Builtin::BI__sync_val_compare_and_swap_16: 5437 BuiltinIndex = 12; 5438 NumFixed = 2; 5439 break; 5440 5441 case Builtin::BI__sync_bool_compare_and_swap: 5442 case Builtin::BI__sync_bool_compare_and_swap_1: 5443 case Builtin::BI__sync_bool_compare_and_swap_2: 5444 case Builtin::BI__sync_bool_compare_and_swap_4: 5445 case Builtin::BI__sync_bool_compare_and_swap_8: 5446 case Builtin::BI__sync_bool_compare_and_swap_16: 5447 BuiltinIndex = 13; 5448 NumFixed = 2; 5449 ResultType = Context.BoolTy; 5450 break; 5451 5452 case Builtin::BI__sync_lock_test_and_set: 5453 case Builtin::BI__sync_lock_test_and_set_1: 5454 case Builtin::BI__sync_lock_test_and_set_2: 5455 case Builtin::BI__sync_lock_test_and_set_4: 5456 case Builtin::BI__sync_lock_test_and_set_8: 5457 case Builtin::BI__sync_lock_test_and_set_16: 5458 BuiltinIndex = 14; 5459 break; 5460 5461 case Builtin::BI__sync_lock_release: 5462 case Builtin::BI__sync_lock_release_1: 5463 case Builtin::BI__sync_lock_release_2: 5464 case Builtin::BI__sync_lock_release_4: 5465 case Builtin::BI__sync_lock_release_8: 5466 case Builtin::BI__sync_lock_release_16: 5467 BuiltinIndex = 15; 5468 NumFixed = 0; 5469 ResultType = Context.VoidTy; 5470 break; 5471 5472 case Builtin::BI__sync_swap: 5473 case Builtin::BI__sync_swap_1: 5474 case Builtin::BI__sync_swap_2: 5475 case Builtin::BI__sync_swap_4: 5476 case Builtin::BI__sync_swap_8: 5477 case Builtin::BI__sync_swap_16: 5478 BuiltinIndex = 16; 5479 break; 5480 } 5481 5482 // Now that we know how many fixed arguments we expect, first check that we 5483 // have at least that many. 5484 if (TheCall->getNumArgs() < 1+NumFixed) { 5485 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5486 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5487 << Callee->getSourceRange(); 5488 return ExprError(); 5489 } 5490 5491 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5492 << Callee->getSourceRange(); 5493 5494 if (WarnAboutSemanticsChange) { 5495 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5496 << Callee->getSourceRange(); 5497 } 5498 5499 // Get the decl for the concrete builtin from this, we can tell what the 5500 // concrete integer type we should convert to is. 5501 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5502 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5503 FunctionDecl *NewBuiltinDecl; 5504 if (NewBuiltinID == BuiltinID) 5505 NewBuiltinDecl = FDecl; 5506 else { 5507 // Perform builtin lookup to avoid redeclaring it. 5508 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5509 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5510 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5511 assert(Res.getFoundDecl()); 5512 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5513 if (!NewBuiltinDecl) 5514 return ExprError(); 5515 } 5516 5517 // The first argument --- the pointer --- has a fixed type; we 5518 // deduce the types of the rest of the arguments accordingly. Walk 5519 // the remaining arguments, converting them to the deduced value type. 5520 for (unsigned i = 0; i != NumFixed; ++i) { 5521 ExprResult Arg = TheCall->getArg(i+1); 5522 5523 // GCC does an implicit conversion to the pointer or integer ValType. This 5524 // can fail in some cases (1i -> int**), check for this error case now. 5525 // Initialize the argument. 5526 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5527 ValType, /*consume*/ false); 5528 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5529 if (Arg.isInvalid()) 5530 return ExprError(); 5531 5532 // Okay, we have something that *can* be converted to the right type. Check 5533 // to see if there is a potentially weird extension going on here. This can 5534 // happen when you do an atomic operation on something like an char* and 5535 // pass in 42. The 42 gets converted to char. This is even more strange 5536 // for things like 45.123 -> char, etc. 5537 // FIXME: Do this check. 5538 TheCall->setArg(i+1, Arg.get()); 5539 } 5540 5541 // Create a new DeclRefExpr to refer to the new decl. 5542 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5543 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5544 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5545 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5546 5547 // Set the callee in the CallExpr. 5548 // FIXME: This loses syntactic information. 5549 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5550 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5551 CK_BuiltinFnToFnPtr); 5552 TheCall->setCallee(PromotedCall.get()); 5553 5554 // Change the result type of the call to match the original value type. This 5555 // is arbitrary, but the codegen for these builtins ins design to handle it 5556 // gracefully. 5557 TheCall->setType(ResultType); 5558 5559 // Prohibit use of _ExtInt with atomic builtins. 5560 // The arguments would have already been converted to the first argument's 5561 // type, so only need to check the first argument. 5562 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 5563 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 5564 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 5565 return ExprError(); 5566 } 5567 5568 return TheCallResult; 5569 } 5570 5571 /// SemaBuiltinNontemporalOverloaded - We have a call to 5572 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5573 /// overloaded function based on the pointer type of its last argument. 5574 /// 5575 /// This function goes through and does final semantic checking for these 5576 /// builtins. 5577 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5578 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5579 DeclRefExpr *DRE = 5580 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5581 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5582 unsigned BuiltinID = FDecl->getBuiltinID(); 5583 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5584 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5585 "Unexpected nontemporal load/store builtin!"); 5586 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5587 unsigned numArgs = isStore ? 2 : 1; 5588 5589 // Ensure that we have the proper number of arguments. 5590 if (checkArgCount(*this, TheCall, numArgs)) 5591 return ExprError(); 5592 5593 // Inspect the last argument of the nontemporal builtin. This should always 5594 // be a pointer type, from which we imply the type of the memory access. 5595 // Because it is a pointer type, we don't have to worry about any implicit 5596 // casts here. 5597 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5598 ExprResult PointerArgResult = 5599 DefaultFunctionArrayLvalueConversion(PointerArg); 5600 5601 if (PointerArgResult.isInvalid()) 5602 return ExprError(); 5603 PointerArg = PointerArgResult.get(); 5604 TheCall->setArg(numArgs - 1, PointerArg); 5605 5606 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5607 if (!pointerType) { 5608 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5609 << PointerArg->getType() << PointerArg->getSourceRange(); 5610 return ExprError(); 5611 } 5612 5613 QualType ValType = pointerType->getPointeeType(); 5614 5615 // Strip any qualifiers off ValType. 5616 ValType = ValType.getUnqualifiedType(); 5617 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5618 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5619 !ValType->isVectorType()) { 5620 Diag(DRE->getBeginLoc(), 5621 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5622 << PointerArg->getType() << PointerArg->getSourceRange(); 5623 return ExprError(); 5624 } 5625 5626 if (!isStore) { 5627 TheCall->setType(ValType); 5628 return TheCallResult; 5629 } 5630 5631 ExprResult ValArg = TheCall->getArg(0); 5632 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5633 Context, ValType, /*consume*/ false); 5634 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5635 if (ValArg.isInvalid()) 5636 return ExprError(); 5637 5638 TheCall->setArg(0, ValArg.get()); 5639 TheCall->setType(Context.VoidTy); 5640 return TheCallResult; 5641 } 5642 5643 /// CheckObjCString - Checks that the argument to the builtin 5644 /// CFString constructor is correct 5645 /// Note: It might also make sense to do the UTF-16 conversion here (would 5646 /// simplify the backend). 5647 bool Sema::CheckObjCString(Expr *Arg) { 5648 Arg = Arg->IgnoreParenCasts(); 5649 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5650 5651 if (!Literal || !Literal->isAscii()) { 5652 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5653 << Arg->getSourceRange(); 5654 return true; 5655 } 5656 5657 if (Literal->containsNonAsciiOrNull()) { 5658 StringRef String = Literal->getString(); 5659 unsigned NumBytes = String.size(); 5660 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5661 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5662 llvm::UTF16 *ToPtr = &ToBuf[0]; 5663 5664 llvm::ConversionResult Result = 5665 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5666 ToPtr + NumBytes, llvm::strictConversion); 5667 // Check for conversion failure. 5668 if (Result != llvm::conversionOK) 5669 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5670 << Arg->getSourceRange(); 5671 } 5672 return false; 5673 } 5674 5675 /// CheckObjCString - Checks that the format string argument to the os_log() 5676 /// and os_trace() functions is correct, and converts it to const char *. 5677 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5678 Arg = Arg->IgnoreParenCasts(); 5679 auto *Literal = dyn_cast<StringLiteral>(Arg); 5680 if (!Literal) { 5681 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5682 Literal = ObjcLiteral->getString(); 5683 } 5684 } 5685 5686 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5687 return ExprError( 5688 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5689 << Arg->getSourceRange()); 5690 } 5691 5692 ExprResult Result(Literal); 5693 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5694 InitializedEntity Entity = 5695 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5696 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5697 return Result; 5698 } 5699 5700 /// Check that the user is calling the appropriate va_start builtin for the 5701 /// target and calling convention. 5702 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5703 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5704 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5705 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5706 TT.getArch() == llvm::Triple::aarch64_32); 5707 bool IsWindows = TT.isOSWindows(); 5708 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5709 if (IsX64 || IsAArch64) { 5710 CallingConv CC = CC_C; 5711 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5712 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5713 if (IsMSVAStart) { 5714 // Don't allow this in System V ABI functions. 5715 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5716 return S.Diag(Fn->getBeginLoc(), 5717 diag::err_ms_va_start_used_in_sysv_function); 5718 } else { 5719 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5720 // On x64 Windows, don't allow this in System V ABI functions. 5721 // (Yes, that means there's no corresponding way to support variadic 5722 // System V ABI functions on Windows.) 5723 if ((IsWindows && CC == CC_X86_64SysV) || 5724 (!IsWindows && CC == CC_Win64)) 5725 return S.Diag(Fn->getBeginLoc(), 5726 diag::err_va_start_used_in_wrong_abi_function) 5727 << !IsWindows; 5728 } 5729 return false; 5730 } 5731 5732 if (IsMSVAStart) 5733 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5734 return false; 5735 } 5736 5737 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5738 ParmVarDecl **LastParam = nullptr) { 5739 // Determine whether the current function, block, or obj-c method is variadic 5740 // and get its parameter list. 5741 bool IsVariadic = false; 5742 ArrayRef<ParmVarDecl *> Params; 5743 DeclContext *Caller = S.CurContext; 5744 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5745 IsVariadic = Block->isVariadic(); 5746 Params = Block->parameters(); 5747 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5748 IsVariadic = FD->isVariadic(); 5749 Params = FD->parameters(); 5750 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5751 IsVariadic = MD->isVariadic(); 5752 // FIXME: This isn't correct for methods (results in bogus warning). 5753 Params = MD->parameters(); 5754 } else if (isa<CapturedDecl>(Caller)) { 5755 // We don't support va_start in a CapturedDecl. 5756 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5757 return true; 5758 } else { 5759 // This must be some other declcontext that parses exprs. 5760 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5761 return true; 5762 } 5763 5764 if (!IsVariadic) { 5765 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5766 return true; 5767 } 5768 5769 if (LastParam) 5770 *LastParam = Params.empty() ? nullptr : Params.back(); 5771 5772 return false; 5773 } 5774 5775 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5776 /// for validity. Emit an error and return true on failure; return false 5777 /// on success. 5778 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5779 Expr *Fn = TheCall->getCallee(); 5780 5781 if (checkVAStartABI(*this, BuiltinID, Fn)) 5782 return true; 5783 5784 if (checkArgCount(*this, TheCall, 2)) 5785 return true; 5786 5787 // Type-check the first argument normally. 5788 if (checkBuiltinArgument(*this, TheCall, 0)) 5789 return true; 5790 5791 // Check that the current function is variadic, and get its last parameter. 5792 ParmVarDecl *LastParam; 5793 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5794 return true; 5795 5796 // Verify that the second argument to the builtin is the last argument of the 5797 // current function or method. 5798 bool SecondArgIsLastNamedArgument = false; 5799 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5800 5801 // These are valid if SecondArgIsLastNamedArgument is false after the next 5802 // block. 5803 QualType Type; 5804 SourceLocation ParamLoc; 5805 bool IsCRegister = false; 5806 5807 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5808 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5809 SecondArgIsLastNamedArgument = PV == LastParam; 5810 5811 Type = PV->getType(); 5812 ParamLoc = PV->getLocation(); 5813 IsCRegister = 5814 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5815 } 5816 } 5817 5818 if (!SecondArgIsLastNamedArgument) 5819 Diag(TheCall->getArg(1)->getBeginLoc(), 5820 diag::warn_second_arg_of_va_start_not_last_named_param); 5821 else if (IsCRegister || Type->isReferenceType() || 5822 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5823 // Promotable integers are UB, but enumerations need a bit of 5824 // extra checking to see what their promotable type actually is. 5825 if (!Type->isPromotableIntegerType()) 5826 return false; 5827 if (!Type->isEnumeralType()) 5828 return true; 5829 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5830 return !(ED && 5831 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5832 }()) { 5833 unsigned Reason = 0; 5834 if (Type->isReferenceType()) Reason = 1; 5835 else if (IsCRegister) Reason = 2; 5836 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5837 Diag(ParamLoc, diag::note_parameter_type) << Type; 5838 } 5839 5840 TheCall->setType(Context.VoidTy); 5841 return false; 5842 } 5843 5844 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5845 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5846 // const char *named_addr); 5847 5848 Expr *Func = Call->getCallee(); 5849 5850 if (Call->getNumArgs() < 3) 5851 return Diag(Call->getEndLoc(), 5852 diag::err_typecheck_call_too_few_args_at_least) 5853 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5854 5855 // Type-check the first argument normally. 5856 if (checkBuiltinArgument(*this, Call, 0)) 5857 return true; 5858 5859 // Check that the current function is variadic. 5860 if (checkVAStartIsInVariadicFunction(*this, Func)) 5861 return true; 5862 5863 // __va_start on Windows does not validate the parameter qualifiers 5864 5865 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5866 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5867 5868 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5869 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5870 5871 const QualType &ConstCharPtrTy = 5872 Context.getPointerType(Context.CharTy.withConst()); 5873 if (!Arg1Ty->isPointerType() || 5874 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5875 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5876 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5877 << 0 /* qualifier difference */ 5878 << 3 /* parameter mismatch */ 5879 << 2 << Arg1->getType() << ConstCharPtrTy; 5880 5881 const QualType SizeTy = Context.getSizeType(); 5882 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5883 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5884 << Arg2->getType() << SizeTy << 1 /* different class */ 5885 << 0 /* qualifier difference */ 5886 << 3 /* parameter mismatch */ 5887 << 3 << Arg2->getType() << SizeTy; 5888 5889 return false; 5890 } 5891 5892 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5893 /// friends. This is declared to take (...), so we have to check everything. 5894 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5895 if (checkArgCount(*this, TheCall, 2)) 5896 return true; 5897 5898 ExprResult OrigArg0 = TheCall->getArg(0); 5899 ExprResult OrigArg1 = TheCall->getArg(1); 5900 5901 // Do standard promotions between the two arguments, returning their common 5902 // type. 5903 QualType Res = UsualArithmeticConversions( 5904 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 5905 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5906 return true; 5907 5908 // Make sure any conversions are pushed back into the call; this is 5909 // type safe since unordered compare builtins are declared as "_Bool 5910 // foo(...)". 5911 TheCall->setArg(0, OrigArg0.get()); 5912 TheCall->setArg(1, OrigArg1.get()); 5913 5914 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5915 return false; 5916 5917 // If the common type isn't a real floating type, then the arguments were 5918 // invalid for this operation. 5919 if (Res.isNull() || !Res->isRealFloatingType()) 5920 return Diag(OrigArg0.get()->getBeginLoc(), 5921 diag::err_typecheck_call_invalid_ordered_compare) 5922 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5923 << SourceRange(OrigArg0.get()->getBeginLoc(), 5924 OrigArg1.get()->getEndLoc()); 5925 5926 return false; 5927 } 5928 5929 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5930 /// __builtin_isnan and friends. This is declared to take (...), so we have 5931 /// to check everything. We expect the last argument to be a floating point 5932 /// value. 5933 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5934 if (checkArgCount(*this, TheCall, NumArgs)) 5935 return true; 5936 5937 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 5938 // on all preceding parameters just being int. Try all of those. 5939 for (unsigned i = 0; i < NumArgs - 1; ++i) { 5940 Expr *Arg = TheCall->getArg(i); 5941 5942 if (Arg->isTypeDependent()) 5943 return false; 5944 5945 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 5946 5947 if (Res.isInvalid()) 5948 return true; 5949 TheCall->setArg(i, Res.get()); 5950 } 5951 5952 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5953 5954 if (OrigArg->isTypeDependent()) 5955 return false; 5956 5957 // Usual Unary Conversions will convert half to float, which we want for 5958 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 5959 // type how it is, but do normal L->Rvalue conversions. 5960 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 5961 OrigArg = UsualUnaryConversions(OrigArg).get(); 5962 else 5963 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 5964 TheCall->setArg(NumArgs - 1, OrigArg); 5965 5966 // This operation requires a non-_Complex floating-point number. 5967 if (!OrigArg->getType()->isRealFloatingType()) 5968 return Diag(OrigArg->getBeginLoc(), 5969 diag::err_typecheck_call_invalid_unary_fp) 5970 << OrigArg->getType() << OrigArg->getSourceRange(); 5971 5972 return false; 5973 } 5974 5975 /// Perform semantic analysis for a call to __builtin_complex. 5976 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 5977 if (checkArgCount(*this, TheCall, 2)) 5978 return true; 5979 5980 bool Dependent = false; 5981 for (unsigned I = 0; I != 2; ++I) { 5982 Expr *Arg = TheCall->getArg(I); 5983 QualType T = Arg->getType(); 5984 if (T->isDependentType()) { 5985 Dependent = true; 5986 continue; 5987 } 5988 5989 // Despite supporting _Complex int, GCC requires a real floating point type 5990 // for the operands of __builtin_complex. 5991 if (!T->isRealFloatingType()) { 5992 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 5993 << Arg->getType() << Arg->getSourceRange(); 5994 } 5995 5996 ExprResult Converted = DefaultLvalueConversion(Arg); 5997 if (Converted.isInvalid()) 5998 return true; 5999 TheCall->setArg(I, Converted.get()); 6000 } 6001 6002 if (Dependent) { 6003 TheCall->setType(Context.DependentTy); 6004 return false; 6005 } 6006 6007 Expr *Real = TheCall->getArg(0); 6008 Expr *Imag = TheCall->getArg(1); 6009 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6010 return Diag(Real->getBeginLoc(), 6011 diag::err_typecheck_call_different_arg_types) 6012 << Real->getType() << Imag->getType() 6013 << Real->getSourceRange() << Imag->getSourceRange(); 6014 } 6015 6016 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6017 // don't allow this builtin to form those types either. 6018 // FIXME: Should we allow these types? 6019 if (Real->getType()->isFloat16Type()) 6020 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6021 << "_Float16"; 6022 if (Real->getType()->isHalfType()) 6023 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6024 << "half"; 6025 6026 TheCall->setType(Context.getComplexType(Real->getType())); 6027 return false; 6028 } 6029 6030 // Customized Sema Checking for VSX builtins that have the following signature: 6031 // vector [...] builtinName(vector [...], vector [...], const int); 6032 // Which takes the same type of vectors (any legal vector type) for the first 6033 // two arguments and takes compile time constant for the third argument. 6034 // Example builtins are : 6035 // vector double vec_xxpermdi(vector double, vector double, int); 6036 // vector short vec_xxsldwi(vector short, vector short, int); 6037 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6038 unsigned ExpectedNumArgs = 3; 6039 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6040 return true; 6041 6042 // Check the third argument is a compile time constant 6043 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6044 return Diag(TheCall->getBeginLoc(), 6045 diag::err_vsx_builtin_nonconstant_argument) 6046 << 3 /* argument index */ << TheCall->getDirectCallee() 6047 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6048 TheCall->getArg(2)->getEndLoc()); 6049 6050 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6051 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6052 6053 // Check the type of argument 1 and argument 2 are vectors. 6054 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6055 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6056 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6057 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6058 << TheCall->getDirectCallee() 6059 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6060 TheCall->getArg(1)->getEndLoc()); 6061 } 6062 6063 // Check the first two arguments are the same type. 6064 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6065 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6066 << TheCall->getDirectCallee() 6067 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6068 TheCall->getArg(1)->getEndLoc()); 6069 } 6070 6071 // When default clang type checking is turned off and the customized type 6072 // checking is used, the returning type of the function must be explicitly 6073 // set. Otherwise it is _Bool by default. 6074 TheCall->setType(Arg1Ty); 6075 6076 return false; 6077 } 6078 6079 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6080 // This is declared to take (...), so we have to check everything. 6081 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6082 if (TheCall->getNumArgs() < 2) 6083 return ExprError(Diag(TheCall->getEndLoc(), 6084 diag::err_typecheck_call_too_few_args_at_least) 6085 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6086 << TheCall->getSourceRange()); 6087 6088 // Determine which of the following types of shufflevector we're checking: 6089 // 1) unary, vector mask: (lhs, mask) 6090 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6091 QualType resType = TheCall->getArg(0)->getType(); 6092 unsigned numElements = 0; 6093 6094 if (!TheCall->getArg(0)->isTypeDependent() && 6095 !TheCall->getArg(1)->isTypeDependent()) { 6096 QualType LHSType = TheCall->getArg(0)->getType(); 6097 QualType RHSType = TheCall->getArg(1)->getType(); 6098 6099 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6100 return ExprError( 6101 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6102 << TheCall->getDirectCallee() 6103 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6104 TheCall->getArg(1)->getEndLoc())); 6105 6106 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6107 unsigned numResElements = TheCall->getNumArgs() - 2; 6108 6109 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6110 // with mask. If so, verify that RHS is an integer vector type with the 6111 // same number of elts as lhs. 6112 if (TheCall->getNumArgs() == 2) { 6113 if (!RHSType->hasIntegerRepresentation() || 6114 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6115 return ExprError(Diag(TheCall->getBeginLoc(), 6116 diag::err_vec_builtin_incompatible_vector) 6117 << TheCall->getDirectCallee() 6118 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6119 TheCall->getArg(1)->getEndLoc())); 6120 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6121 return ExprError(Diag(TheCall->getBeginLoc(), 6122 diag::err_vec_builtin_incompatible_vector) 6123 << TheCall->getDirectCallee() 6124 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6125 TheCall->getArg(1)->getEndLoc())); 6126 } else if (numElements != numResElements) { 6127 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6128 resType = Context.getVectorType(eltType, numResElements, 6129 VectorType::GenericVector); 6130 } 6131 } 6132 6133 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6134 if (TheCall->getArg(i)->isTypeDependent() || 6135 TheCall->getArg(i)->isValueDependent()) 6136 continue; 6137 6138 Optional<llvm::APSInt> Result; 6139 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6140 return ExprError(Diag(TheCall->getBeginLoc(), 6141 diag::err_shufflevector_nonconstant_argument) 6142 << TheCall->getArg(i)->getSourceRange()); 6143 6144 // Allow -1 which will be translated to undef in the IR. 6145 if (Result->isSigned() && Result->isAllOnesValue()) 6146 continue; 6147 6148 if (Result->getActiveBits() > 64 || 6149 Result->getZExtValue() >= numElements * 2) 6150 return ExprError(Diag(TheCall->getBeginLoc(), 6151 diag::err_shufflevector_argument_too_large) 6152 << TheCall->getArg(i)->getSourceRange()); 6153 } 6154 6155 SmallVector<Expr*, 32> exprs; 6156 6157 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6158 exprs.push_back(TheCall->getArg(i)); 6159 TheCall->setArg(i, nullptr); 6160 } 6161 6162 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6163 TheCall->getCallee()->getBeginLoc(), 6164 TheCall->getRParenLoc()); 6165 } 6166 6167 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6168 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6169 SourceLocation BuiltinLoc, 6170 SourceLocation RParenLoc) { 6171 ExprValueKind VK = VK_RValue; 6172 ExprObjectKind OK = OK_Ordinary; 6173 QualType DstTy = TInfo->getType(); 6174 QualType SrcTy = E->getType(); 6175 6176 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6177 return ExprError(Diag(BuiltinLoc, 6178 diag::err_convertvector_non_vector) 6179 << E->getSourceRange()); 6180 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6181 return ExprError(Diag(BuiltinLoc, 6182 diag::err_convertvector_non_vector_type)); 6183 6184 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6185 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6186 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6187 if (SrcElts != DstElts) 6188 return ExprError(Diag(BuiltinLoc, 6189 diag::err_convertvector_incompatible_vector) 6190 << E->getSourceRange()); 6191 } 6192 6193 return new (Context) 6194 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6195 } 6196 6197 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6198 // This is declared to take (const void*, ...) and can take two 6199 // optional constant int args. 6200 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6201 unsigned NumArgs = TheCall->getNumArgs(); 6202 6203 if (NumArgs > 3) 6204 return Diag(TheCall->getEndLoc(), 6205 diag::err_typecheck_call_too_many_args_at_most) 6206 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6207 6208 // Argument 0 is checked for us and the remaining arguments must be 6209 // constant integers. 6210 for (unsigned i = 1; i != NumArgs; ++i) 6211 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6212 return true; 6213 6214 return false; 6215 } 6216 6217 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6218 // __assume does not evaluate its arguments, and should warn if its argument 6219 // has side effects. 6220 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6221 Expr *Arg = TheCall->getArg(0); 6222 if (Arg->isInstantiationDependent()) return false; 6223 6224 if (Arg->HasSideEffects(Context)) 6225 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6226 << Arg->getSourceRange() 6227 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6228 6229 return false; 6230 } 6231 6232 /// Handle __builtin_alloca_with_align. This is declared 6233 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6234 /// than 8. 6235 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6236 // The alignment must be a constant integer. 6237 Expr *Arg = TheCall->getArg(1); 6238 6239 // We can't check the value of a dependent argument. 6240 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6241 if (const auto *UE = 6242 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6243 if (UE->getKind() == UETT_AlignOf || 6244 UE->getKind() == UETT_PreferredAlignOf) 6245 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6246 << Arg->getSourceRange(); 6247 6248 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6249 6250 if (!Result.isPowerOf2()) 6251 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6252 << Arg->getSourceRange(); 6253 6254 if (Result < Context.getCharWidth()) 6255 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6256 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6257 6258 if (Result > std::numeric_limits<int32_t>::max()) 6259 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6260 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6261 } 6262 6263 return false; 6264 } 6265 6266 /// Handle __builtin_assume_aligned. This is declared 6267 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6268 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6269 unsigned NumArgs = TheCall->getNumArgs(); 6270 6271 if (NumArgs > 3) 6272 return Diag(TheCall->getEndLoc(), 6273 diag::err_typecheck_call_too_many_args_at_most) 6274 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6275 6276 // The alignment must be a constant integer. 6277 Expr *Arg = TheCall->getArg(1); 6278 6279 // We can't check the value of a dependent argument. 6280 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6281 llvm::APSInt Result; 6282 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6283 return true; 6284 6285 if (!Result.isPowerOf2()) 6286 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6287 << Arg->getSourceRange(); 6288 6289 if (Result > Sema::MaximumAlignment) 6290 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6291 << Arg->getSourceRange() << Sema::MaximumAlignment; 6292 } 6293 6294 if (NumArgs > 2) { 6295 ExprResult Arg(TheCall->getArg(2)); 6296 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6297 Context.getSizeType(), false); 6298 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6299 if (Arg.isInvalid()) return true; 6300 TheCall->setArg(2, Arg.get()); 6301 } 6302 6303 return false; 6304 } 6305 6306 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6307 unsigned BuiltinID = 6308 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6309 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6310 6311 unsigned NumArgs = TheCall->getNumArgs(); 6312 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6313 if (NumArgs < NumRequiredArgs) { 6314 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6315 << 0 /* function call */ << NumRequiredArgs << NumArgs 6316 << TheCall->getSourceRange(); 6317 } 6318 if (NumArgs >= NumRequiredArgs + 0x100) { 6319 return Diag(TheCall->getEndLoc(), 6320 diag::err_typecheck_call_too_many_args_at_most) 6321 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6322 << TheCall->getSourceRange(); 6323 } 6324 unsigned i = 0; 6325 6326 // For formatting call, check buffer arg. 6327 if (!IsSizeCall) { 6328 ExprResult Arg(TheCall->getArg(i)); 6329 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6330 Context, Context.VoidPtrTy, false); 6331 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6332 if (Arg.isInvalid()) 6333 return true; 6334 TheCall->setArg(i, Arg.get()); 6335 i++; 6336 } 6337 6338 // Check string literal arg. 6339 unsigned FormatIdx = i; 6340 { 6341 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6342 if (Arg.isInvalid()) 6343 return true; 6344 TheCall->setArg(i, Arg.get()); 6345 i++; 6346 } 6347 6348 // Make sure variadic args are scalar. 6349 unsigned FirstDataArg = i; 6350 while (i < NumArgs) { 6351 ExprResult Arg = DefaultVariadicArgumentPromotion( 6352 TheCall->getArg(i), VariadicFunction, nullptr); 6353 if (Arg.isInvalid()) 6354 return true; 6355 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6356 if (ArgSize.getQuantity() >= 0x100) { 6357 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6358 << i << (int)ArgSize.getQuantity() << 0xff 6359 << TheCall->getSourceRange(); 6360 } 6361 TheCall->setArg(i, Arg.get()); 6362 i++; 6363 } 6364 6365 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6366 // call to avoid duplicate diagnostics. 6367 if (!IsSizeCall) { 6368 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6369 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6370 bool Success = CheckFormatArguments( 6371 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6372 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6373 CheckedVarArgs); 6374 if (!Success) 6375 return true; 6376 } 6377 6378 if (IsSizeCall) { 6379 TheCall->setType(Context.getSizeType()); 6380 } else { 6381 TheCall->setType(Context.VoidPtrTy); 6382 } 6383 return false; 6384 } 6385 6386 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6387 /// TheCall is a constant expression. 6388 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6389 llvm::APSInt &Result) { 6390 Expr *Arg = TheCall->getArg(ArgNum); 6391 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6392 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6393 6394 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6395 6396 Optional<llvm::APSInt> R; 6397 if (!(R = Arg->getIntegerConstantExpr(Context))) 6398 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6399 << FDecl->getDeclName() << Arg->getSourceRange(); 6400 Result = *R; 6401 return false; 6402 } 6403 6404 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6405 /// TheCall is a constant expression in the range [Low, High]. 6406 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6407 int Low, int High, bool RangeIsError) { 6408 if (isConstantEvaluated()) 6409 return false; 6410 llvm::APSInt Result; 6411 6412 // We can't check the value of a dependent argument. 6413 Expr *Arg = TheCall->getArg(ArgNum); 6414 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6415 return false; 6416 6417 // Check constant-ness first. 6418 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6419 return true; 6420 6421 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6422 if (RangeIsError) 6423 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6424 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6425 else 6426 // Defer the warning until we know if the code will be emitted so that 6427 // dead code can ignore this. 6428 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6429 PDiag(diag::warn_argument_invalid_range) 6430 << Result.toString(10) << Low << High 6431 << Arg->getSourceRange()); 6432 } 6433 6434 return false; 6435 } 6436 6437 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6438 /// TheCall is a constant expression is a multiple of Num.. 6439 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6440 unsigned Num) { 6441 llvm::APSInt Result; 6442 6443 // We can't check the value of a dependent argument. 6444 Expr *Arg = TheCall->getArg(ArgNum); 6445 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6446 return false; 6447 6448 // Check constant-ness first. 6449 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6450 return true; 6451 6452 if (Result.getSExtValue() % Num != 0) 6453 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6454 << Num << Arg->getSourceRange(); 6455 6456 return false; 6457 } 6458 6459 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6460 /// constant expression representing a power of 2. 6461 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6462 llvm::APSInt Result; 6463 6464 // We can't check the value of a dependent argument. 6465 Expr *Arg = TheCall->getArg(ArgNum); 6466 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6467 return false; 6468 6469 // Check constant-ness first. 6470 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6471 return true; 6472 6473 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6474 // and only if x is a power of 2. 6475 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6476 return false; 6477 6478 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6479 << Arg->getSourceRange(); 6480 } 6481 6482 static bool IsShiftedByte(llvm::APSInt Value) { 6483 if (Value.isNegative()) 6484 return false; 6485 6486 // Check if it's a shifted byte, by shifting it down 6487 while (true) { 6488 // If the value fits in the bottom byte, the check passes. 6489 if (Value < 0x100) 6490 return true; 6491 6492 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6493 // fails. 6494 if ((Value & 0xFF) != 0) 6495 return false; 6496 6497 // If the bottom 8 bits are all 0, but something above that is nonzero, 6498 // then shifting the value right by 8 bits won't affect whether it's a 6499 // shifted byte or not. So do that, and go round again. 6500 Value >>= 8; 6501 } 6502 } 6503 6504 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6505 /// a constant expression representing an arbitrary byte value shifted left by 6506 /// a multiple of 8 bits. 6507 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6508 unsigned ArgBits) { 6509 llvm::APSInt Result; 6510 6511 // We can't check the value of a dependent argument. 6512 Expr *Arg = TheCall->getArg(ArgNum); 6513 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6514 return false; 6515 6516 // Check constant-ness first. 6517 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6518 return true; 6519 6520 // Truncate to the given size. 6521 Result = Result.getLoBits(ArgBits); 6522 Result.setIsUnsigned(true); 6523 6524 if (IsShiftedByte(Result)) 6525 return false; 6526 6527 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6528 << Arg->getSourceRange(); 6529 } 6530 6531 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6532 /// TheCall is a constant expression representing either a shifted byte value, 6533 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6534 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6535 /// Arm MVE intrinsics. 6536 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6537 int ArgNum, 6538 unsigned ArgBits) { 6539 llvm::APSInt Result; 6540 6541 // We can't check the value of a dependent argument. 6542 Expr *Arg = TheCall->getArg(ArgNum); 6543 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6544 return false; 6545 6546 // Check constant-ness first. 6547 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6548 return true; 6549 6550 // Truncate to the given size. 6551 Result = Result.getLoBits(ArgBits); 6552 Result.setIsUnsigned(true); 6553 6554 // Check to see if it's in either of the required forms. 6555 if (IsShiftedByte(Result) || 6556 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6557 return false; 6558 6559 return Diag(TheCall->getBeginLoc(), 6560 diag::err_argument_not_shifted_byte_or_xxff) 6561 << Arg->getSourceRange(); 6562 } 6563 6564 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6565 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6566 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6567 if (checkArgCount(*this, TheCall, 2)) 6568 return true; 6569 Expr *Arg0 = TheCall->getArg(0); 6570 Expr *Arg1 = TheCall->getArg(1); 6571 6572 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6573 if (FirstArg.isInvalid()) 6574 return true; 6575 QualType FirstArgType = FirstArg.get()->getType(); 6576 if (!FirstArgType->isAnyPointerType()) 6577 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6578 << "first" << FirstArgType << Arg0->getSourceRange(); 6579 TheCall->setArg(0, FirstArg.get()); 6580 6581 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6582 if (SecArg.isInvalid()) 6583 return true; 6584 QualType SecArgType = SecArg.get()->getType(); 6585 if (!SecArgType->isIntegerType()) 6586 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6587 << "second" << SecArgType << Arg1->getSourceRange(); 6588 6589 // Derive the return type from the pointer argument. 6590 TheCall->setType(FirstArgType); 6591 return false; 6592 } 6593 6594 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6595 if (checkArgCount(*this, TheCall, 2)) 6596 return true; 6597 6598 Expr *Arg0 = TheCall->getArg(0); 6599 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6600 if (FirstArg.isInvalid()) 6601 return true; 6602 QualType FirstArgType = FirstArg.get()->getType(); 6603 if (!FirstArgType->isAnyPointerType()) 6604 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6605 << "first" << FirstArgType << Arg0->getSourceRange(); 6606 TheCall->setArg(0, FirstArg.get()); 6607 6608 // Derive the return type from the pointer argument. 6609 TheCall->setType(FirstArgType); 6610 6611 // Second arg must be an constant in range [0,15] 6612 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6613 } 6614 6615 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6616 if (checkArgCount(*this, TheCall, 2)) 6617 return true; 6618 Expr *Arg0 = TheCall->getArg(0); 6619 Expr *Arg1 = TheCall->getArg(1); 6620 6621 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6622 if (FirstArg.isInvalid()) 6623 return true; 6624 QualType FirstArgType = FirstArg.get()->getType(); 6625 if (!FirstArgType->isAnyPointerType()) 6626 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6627 << "first" << FirstArgType << Arg0->getSourceRange(); 6628 6629 QualType SecArgType = Arg1->getType(); 6630 if (!SecArgType->isIntegerType()) 6631 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6632 << "second" << SecArgType << Arg1->getSourceRange(); 6633 TheCall->setType(Context.IntTy); 6634 return false; 6635 } 6636 6637 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6638 BuiltinID == AArch64::BI__builtin_arm_stg) { 6639 if (checkArgCount(*this, TheCall, 1)) 6640 return true; 6641 Expr *Arg0 = TheCall->getArg(0); 6642 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6643 if (FirstArg.isInvalid()) 6644 return true; 6645 6646 QualType FirstArgType = FirstArg.get()->getType(); 6647 if (!FirstArgType->isAnyPointerType()) 6648 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6649 << "first" << FirstArgType << Arg0->getSourceRange(); 6650 TheCall->setArg(0, FirstArg.get()); 6651 6652 // Derive the return type from the pointer argument. 6653 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6654 TheCall->setType(FirstArgType); 6655 return false; 6656 } 6657 6658 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6659 Expr *ArgA = TheCall->getArg(0); 6660 Expr *ArgB = TheCall->getArg(1); 6661 6662 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6663 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6664 6665 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6666 return true; 6667 6668 QualType ArgTypeA = ArgExprA.get()->getType(); 6669 QualType ArgTypeB = ArgExprB.get()->getType(); 6670 6671 auto isNull = [&] (Expr *E) -> bool { 6672 return E->isNullPointerConstant( 6673 Context, Expr::NPC_ValueDependentIsNotNull); }; 6674 6675 // argument should be either a pointer or null 6676 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6677 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6678 << "first" << ArgTypeA << ArgA->getSourceRange(); 6679 6680 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6681 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6682 << "second" << ArgTypeB << ArgB->getSourceRange(); 6683 6684 // Ensure Pointee types are compatible 6685 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6686 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6687 QualType pointeeA = ArgTypeA->getPointeeType(); 6688 QualType pointeeB = ArgTypeB->getPointeeType(); 6689 if (!Context.typesAreCompatible( 6690 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6691 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6692 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6693 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6694 << ArgB->getSourceRange(); 6695 } 6696 } 6697 6698 // at least one argument should be pointer type 6699 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6700 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6701 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6702 6703 if (isNull(ArgA)) // adopt type of the other pointer 6704 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6705 6706 if (isNull(ArgB)) 6707 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6708 6709 TheCall->setArg(0, ArgExprA.get()); 6710 TheCall->setArg(1, ArgExprB.get()); 6711 TheCall->setType(Context.LongLongTy); 6712 return false; 6713 } 6714 assert(false && "Unhandled ARM MTE intrinsic"); 6715 return true; 6716 } 6717 6718 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6719 /// TheCall is an ARM/AArch64 special register string literal. 6720 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6721 int ArgNum, unsigned ExpectedFieldNum, 6722 bool AllowName) { 6723 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6724 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6725 BuiltinID == ARM::BI__builtin_arm_rsr || 6726 BuiltinID == ARM::BI__builtin_arm_rsrp || 6727 BuiltinID == ARM::BI__builtin_arm_wsr || 6728 BuiltinID == ARM::BI__builtin_arm_wsrp; 6729 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6730 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6731 BuiltinID == AArch64::BI__builtin_arm_rsr || 6732 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6733 BuiltinID == AArch64::BI__builtin_arm_wsr || 6734 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6735 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6736 6737 // We can't check the value of a dependent argument. 6738 Expr *Arg = TheCall->getArg(ArgNum); 6739 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6740 return false; 6741 6742 // Check if the argument is a string literal. 6743 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6744 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6745 << Arg->getSourceRange(); 6746 6747 // Check the type of special register given. 6748 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6749 SmallVector<StringRef, 6> Fields; 6750 Reg.split(Fields, ":"); 6751 6752 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6753 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6754 << Arg->getSourceRange(); 6755 6756 // If the string is the name of a register then we cannot check that it is 6757 // valid here but if the string is of one the forms described in ACLE then we 6758 // can check that the supplied fields are integers and within the valid 6759 // ranges. 6760 if (Fields.size() > 1) { 6761 bool FiveFields = Fields.size() == 5; 6762 6763 bool ValidString = true; 6764 if (IsARMBuiltin) { 6765 ValidString &= Fields[0].startswith_lower("cp") || 6766 Fields[0].startswith_lower("p"); 6767 if (ValidString) 6768 Fields[0] = 6769 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6770 6771 ValidString &= Fields[2].startswith_lower("c"); 6772 if (ValidString) 6773 Fields[2] = Fields[2].drop_front(1); 6774 6775 if (FiveFields) { 6776 ValidString &= Fields[3].startswith_lower("c"); 6777 if (ValidString) 6778 Fields[3] = Fields[3].drop_front(1); 6779 } 6780 } 6781 6782 SmallVector<int, 5> Ranges; 6783 if (FiveFields) 6784 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6785 else 6786 Ranges.append({15, 7, 15}); 6787 6788 for (unsigned i=0; i<Fields.size(); ++i) { 6789 int IntField; 6790 ValidString &= !Fields[i].getAsInteger(10, IntField); 6791 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6792 } 6793 6794 if (!ValidString) 6795 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6796 << Arg->getSourceRange(); 6797 } else if (IsAArch64Builtin && Fields.size() == 1) { 6798 // If the register name is one of those that appear in the condition below 6799 // and the special register builtin being used is one of the write builtins, 6800 // then we require that the argument provided for writing to the register 6801 // is an integer constant expression. This is because it will be lowered to 6802 // an MSR (immediate) instruction, so we need to know the immediate at 6803 // compile time. 6804 if (TheCall->getNumArgs() != 2) 6805 return false; 6806 6807 std::string RegLower = Reg.lower(); 6808 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6809 RegLower != "pan" && RegLower != "uao") 6810 return false; 6811 6812 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6813 } 6814 6815 return false; 6816 } 6817 6818 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 6819 /// Emit an error and return true on failure; return false on success. 6820 /// TypeStr is a string containing the type descriptor of the value returned by 6821 /// the builtin and the descriptors of the expected type of the arguments. 6822 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) { 6823 6824 assert((TypeStr[0] != '\0') && 6825 "Invalid types in PPC MMA builtin declaration"); 6826 6827 unsigned Mask = 0; 6828 unsigned ArgNum = 0; 6829 6830 // The first type in TypeStr is the type of the value returned by the 6831 // builtin. So we first read that type and change the type of TheCall. 6832 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 6833 TheCall->setType(type); 6834 6835 while (*TypeStr != '\0') { 6836 Mask = 0; 6837 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 6838 if (ArgNum >= TheCall->getNumArgs()) { 6839 ArgNum++; 6840 break; 6841 } 6842 6843 Expr *Arg = TheCall->getArg(ArgNum); 6844 QualType ArgType = Arg->getType(); 6845 6846 if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) || 6847 (!ExpectedType->isVoidPointerType() && 6848 ArgType.getCanonicalType() != ExpectedType)) 6849 return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6850 << ArgType << ExpectedType << 1 << 0 << 0; 6851 6852 // If the value of the Mask is not 0, we have a constraint in the size of 6853 // the integer argument so here we ensure the argument is a constant that 6854 // is in the valid range. 6855 if (Mask != 0 && 6856 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 6857 return true; 6858 6859 ArgNum++; 6860 } 6861 6862 // In case we exited early from the previous loop, there are other types to 6863 // read from TypeStr. So we need to read them all to ensure we have the right 6864 // number of arguments in TheCall and if it is not the case, to display a 6865 // better error message. 6866 while (*TypeStr != '\0') { 6867 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 6868 ArgNum++; 6869 } 6870 if (checkArgCount(*this, TheCall, ArgNum)) 6871 return true; 6872 6873 return false; 6874 } 6875 6876 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6877 /// This checks that the target supports __builtin_longjmp and 6878 /// that val is a constant 1. 6879 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6880 if (!Context.getTargetInfo().hasSjLjLowering()) 6881 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6882 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6883 6884 Expr *Arg = TheCall->getArg(1); 6885 llvm::APSInt Result; 6886 6887 // TODO: This is less than ideal. Overload this to take a value. 6888 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6889 return true; 6890 6891 if (Result != 1) 6892 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6893 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6894 6895 return false; 6896 } 6897 6898 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6899 /// This checks that the target supports __builtin_setjmp. 6900 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6901 if (!Context.getTargetInfo().hasSjLjLowering()) 6902 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6903 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6904 return false; 6905 } 6906 6907 namespace { 6908 6909 class UncoveredArgHandler { 6910 enum { Unknown = -1, AllCovered = -2 }; 6911 6912 signed FirstUncoveredArg = Unknown; 6913 SmallVector<const Expr *, 4> DiagnosticExprs; 6914 6915 public: 6916 UncoveredArgHandler() = default; 6917 6918 bool hasUncoveredArg() const { 6919 return (FirstUncoveredArg >= 0); 6920 } 6921 6922 unsigned getUncoveredArg() const { 6923 assert(hasUncoveredArg() && "no uncovered argument"); 6924 return FirstUncoveredArg; 6925 } 6926 6927 void setAllCovered() { 6928 // A string has been found with all arguments covered, so clear out 6929 // the diagnostics. 6930 DiagnosticExprs.clear(); 6931 FirstUncoveredArg = AllCovered; 6932 } 6933 6934 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6935 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6936 6937 // Don't update if a previous string covers all arguments. 6938 if (FirstUncoveredArg == AllCovered) 6939 return; 6940 6941 // UncoveredArgHandler tracks the highest uncovered argument index 6942 // and with it all the strings that match this index. 6943 if (NewFirstUncoveredArg == FirstUncoveredArg) 6944 DiagnosticExprs.push_back(StrExpr); 6945 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6946 DiagnosticExprs.clear(); 6947 DiagnosticExprs.push_back(StrExpr); 6948 FirstUncoveredArg = NewFirstUncoveredArg; 6949 } 6950 } 6951 6952 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6953 }; 6954 6955 enum StringLiteralCheckType { 6956 SLCT_NotALiteral, 6957 SLCT_UncheckedLiteral, 6958 SLCT_CheckedLiteral 6959 }; 6960 6961 } // namespace 6962 6963 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6964 BinaryOperatorKind BinOpKind, 6965 bool AddendIsRight) { 6966 unsigned BitWidth = Offset.getBitWidth(); 6967 unsigned AddendBitWidth = Addend.getBitWidth(); 6968 // There might be negative interim results. 6969 if (Addend.isUnsigned()) { 6970 Addend = Addend.zext(++AddendBitWidth); 6971 Addend.setIsSigned(true); 6972 } 6973 // Adjust the bit width of the APSInts. 6974 if (AddendBitWidth > BitWidth) { 6975 Offset = Offset.sext(AddendBitWidth); 6976 BitWidth = AddendBitWidth; 6977 } else if (BitWidth > AddendBitWidth) { 6978 Addend = Addend.sext(BitWidth); 6979 } 6980 6981 bool Ov = false; 6982 llvm::APSInt ResOffset = Offset; 6983 if (BinOpKind == BO_Add) 6984 ResOffset = Offset.sadd_ov(Addend, Ov); 6985 else { 6986 assert(AddendIsRight && BinOpKind == BO_Sub && 6987 "operator must be add or sub with addend on the right"); 6988 ResOffset = Offset.ssub_ov(Addend, Ov); 6989 } 6990 6991 // We add an offset to a pointer here so we should support an offset as big as 6992 // possible. 6993 if (Ov) { 6994 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6995 "index (intermediate) result too big"); 6996 Offset = Offset.sext(2 * BitWidth); 6997 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6998 return; 6999 } 7000 7001 Offset = ResOffset; 7002 } 7003 7004 namespace { 7005 7006 // This is a wrapper class around StringLiteral to support offsetted string 7007 // literals as format strings. It takes the offset into account when returning 7008 // the string and its length or the source locations to display notes correctly. 7009 class FormatStringLiteral { 7010 const StringLiteral *FExpr; 7011 int64_t Offset; 7012 7013 public: 7014 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7015 : FExpr(fexpr), Offset(Offset) {} 7016 7017 StringRef getString() const { 7018 return FExpr->getString().drop_front(Offset); 7019 } 7020 7021 unsigned getByteLength() const { 7022 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7023 } 7024 7025 unsigned getLength() const { return FExpr->getLength() - Offset; } 7026 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7027 7028 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7029 7030 QualType getType() const { return FExpr->getType(); } 7031 7032 bool isAscii() const { return FExpr->isAscii(); } 7033 bool isWide() const { return FExpr->isWide(); } 7034 bool isUTF8() const { return FExpr->isUTF8(); } 7035 bool isUTF16() const { return FExpr->isUTF16(); } 7036 bool isUTF32() const { return FExpr->isUTF32(); } 7037 bool isPascal() const { return FExpr->isPascal(); } 7038 7039 SourceLocation getLocationOfByte( 7040 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7041 const TargetInfo &Target, unsigned *StartToken = nullptr, 7042 unsigned *StartTokenByteOffset = nullptr) const { 7043 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7044 StartToken, StartTokenByteOffset); 7045 } 7046 7047 SourceLocation getBeginLoc() const LLVM_READONLY { 7048 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7049 } 7050 7051 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7052 }; 7053 7054 } // namespace 7055 7056 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7057 const Expr *OrigFormatExpr, 7058 ArrayRef<const Expr *> Args, 7059 bool HasVAListArg, unsigned format_idx, 7060 unsigned firstDataArg, 7061 Sema::FormatStringType Type, 7062 bool inFunctionCall, 7063 Sema::VariadicCallType CallType, 7064 llvm::SmallBitVector &CheckedVarArgs, 7065 UncoveredArgHandler &UncoveredArg, 7066 bool IgnoreStringsWithoutSpecifiers); 7067 7068 // Determine if an expression is a string literal or constant string. 7069 // If this function returns false on the arguments to a function expecting a 7070 // format string, we will usually need to emit a warning. 7071 // True string literals are then checked by CheckFormatString. 7072 static StringLiteralCheckType 7073 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 7074 bool HasVAListArg, unsigned format_idx, 7075 unsigned firstDataArg, Sema::FormatStringType Type, 7076 Sema::VariadicCallType CallType, bool InFunctionCall, 7077 llvm::SmallBitVector &CheckedVarArgs, 7078 UncoveredArgHandler &UncoveredArg, 7079 llvm::APSInt Offset, 7080 bool IgnoreStringsWithoutSpecifiers = false) { 7081 if (S.isConstantEvaluated()) 7082 return SLCT_NotALiteral; 7083 tryAgain: 7084 assert(Offset.isSigned() && "invalid offset"); 7085 7086 if (E->isTypeDependent() || E->isValueDependent()) 7087 return SLCT_NotALiteral; 7088 7089 E = E->IgnoreParenCasts(); 7090 7091 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 7092 // Technically -Wformat-nonliteral does not warn about this case. 7093 // The behavior of printf and friends in this case is implementation 7094 // dependent. Ideally if the format string cannot be null then 7095 // it should have a 'nonnull' attribute in the function prototype. 7096 return SLCT_UncheckedLiteral; 7097 7098 switch (E->getStmtClass()) { 7099 case Stmt::BinaryConditionalOperatorClass: 7100 case Stmt::ConditionalOperatorClass: { 7101 // The expression is a literal if both sub-expressions were, and it was 7102 // completely checked only if both sub-expressions were checked. 7103 const AbstractConditionalOperator *C = 7104 cast<AbstractConditionalOperator>(E); 7105 7106 // Determine whether it is necessary to check both sub-expressions, for 7107 // example, because the condition expression is a constant that can be 7108 // evaluated at compile time. 7109 bool CheckLeft = true, CheckRight = true; 7110 7111 bool Cond; 7112 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 7113 S.isConstantEvaluated())) { 7114 if (Cond) 7115 CheckRight = false; 7116 else 7117 CheckLeft = false; 7118 } 7119 7120 // We need to maintain the offsets for the right and the left hand side 7121 // separately to check if every possible indexed expression is a valid 7122 // string literal. They might have different offsets for different string 7123 // literals in the end. 7124 StringLiteralCheckType Left; 7125 if (!CheckLeft) 7126 Left = SLCT_UncheckedLiteral; 7127 else { 7128 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 7129 HasVAListArg, format_idx, firstDataArg, 7130 Type, CallType, InFunctionCall, 7131 CheckedVarArgs, UncoveredArg, Offset, 7132 IgnoreStringsWithoutSpecifiers); 7133 if (Left == SLCT_NotALiteral || !CheckRight) { 7134 return Left; 7135 } 7136 } 7137 7138 StringLiteralCheckType Right = checkFormatStringExpr( 7139 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 7140 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7141 IgnoreStringsWithoutSpecifiers); 7142 7143 return (CheckLeft && Left < Right) ? Left : Right; 7144 } 7145 7146 case Stmt::ImplicitCastExprClass: 7147 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 7148 goto tryAgain; 7149 7150 case Stmt::OpaqueValueExprClass: 7151 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7152 E = src; 7153 goto tryAgain; 7154 } 7155 return SLCT_NotALiteral; 7156 7157 case Stmt::PredefinedExprClass: 7158 // While __func__, etc., are technically not string literals, they 7159 // cannot contain format specifiers and thus are not a security 7160 // liability. 7161 return SLCT_UncheckedLiteral; 7162 7163 case Stmt::DeclRefExprClass: { 7164 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7165 7166 // As an exception, do not flag errors for variables binding to 7167 // const string literals. 7168 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7169 bool isConstant = false; 7170 QualType T = DR->getType(); 7171 7172 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7173 isConstant = AT->getElementType().isConstant(S.Context); 7174 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7175 isConstant = T.isConstant(S.Context) && 7176 PT->getPointeeType().isConstant(S.Context); 7177 } else if (T->isObjCObjectPointerType()) { 7178 // In ObjC, there is usually no "const ObjectPointer" type, 7179 // so don't check if the pointee type is constant. 7180 isConstant = T.isConstant(S.Context); 7181 } 7182 7183 if (isConstant) { 7184 if (const Expr *Init = VD->getAnyInitializer()) { 7185 // Look through initializers like const char c[] = { "foo" } 7186 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7187 if (InitList->isStringLiteralInit()) 7188 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7189 } 7190 return checkFormatStringExpr(S, Init, Args, 7191 HasVAListArg, format_idx, 7192 firstDataArg, Type, CallType, 7193 /*InFunctionCall*/ false, CheckedVarArgs, 7194 UncoveredArg, Offset); 7195 } 7196 } 7197 7198 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7199 // special check to see if the format string is a function parameter 7200 // of the function calling the printf function. If the function 7201 // has an attribute indicating it is a printf-like function, then we 7202 // should suppress warnings concerning non-literals being used in a call 7203 // to a vprintf function. For example: 7204 // 7205 // void 7206 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7207 // va_list ap; 7208 // va_start(ap, fmt); 7209 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7210 // ... 7211 // } 7212 if (HasVAListArg) { 7213 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7214 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7215 int PVIndex = PV->getFunctionScopeIndex() + 1; 7216 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7217 // adjust for implicit parameter 7218 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7219 if (MD->isInstance()) 7220 ++PVIndex; 7221 // We also check if the formats are compatible. 7222 // We can't pass a 'scanf' string to a 'printf' function. 7223 if (PVIndex == PVFormat->getFormatIdx() && 7224 Type == S.GetFormatStringType(PVFormat)) 7225 return SLCT_UncheckedLiteral; 7226 } 7227 } 7228 } 7229 } 7230 } 7231 7232 return SLCT_NotALiteral; 7233 } 7234 7235 case Stmt::CallExprClass: 7236 case Stmt::CXXMemberCallExprClass: { 7237 const CallExpr *CE = cast<CallExpr>(E); 7238 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7239 bool IsFirst = true; 7240 StringLiteralCheckType CommonResult; 7241 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7242 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7243 StringLiteralCheckType Result = checkFormatStringExpr( 7244 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7245 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7246 IgnoreStringsWithoutSpecifiers); 7247 if (IsFirst) { 7248 CommonResult = Result; 7249 IsFirst = false; 7250 } 7251 } 7252 if (!IsFirst) 7253 return CommonResult; 7254 7255 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7256 unsigned BuiltinID = FD->getBuiltinID(); 7257 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7258 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7259 const Expr *Arg = CE->getArg(0); 7260 return checkFormatStringExpr(S, Arg, Args, 7261 HasVAListArg, format_idx, 7262 firstDataArg, Type, CallType, 7263 InFunctionCall, CheckedVarArgs, 7264 UncoveredArg, Offset, 7265 IgnoreStringsWithoutSpecifiers); 7266 } 7267 } 7268 } 7269 7270 return SLCT_NotALiteral; 7271 } 7272 case Stmt::ObjCMessageExprClass: { 7273 const auto *ME = cast<ObjCMessageExpr>(E); 7274 if (const auto *MD = ME->getMethodDecl()) { 7275 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7276 // As a special case heuristic, if we're using the method -[NSBundle 7277 // localizedStringForKey:value:table:], ignore any key strings that lack 7278 // format specifiers. The idea is that if the key doesn't have any 7279 // format specifiers then its probably just a key to map to the 7280 // localized strings. If it does have format specifiers though, then its 7281 // likely that the text of the key is the format string in the 7282 // programmer's language, and should be checked. 7283 const ObjCInterfaceDecl *IFace; 7284 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7285 IFace->getIdentifier()->isStr("NSBundle") && 7286 MD->getSelector().isKeywordSelector( 7287 {"localizedStringForKey", "value", "table"})) { 7288 IgnoreStringsWithoutSpecifiers = true; 7289 } 7290 7291 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7292 return checkFormatStringExpr( 7293 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7294 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7295 IgnoreStringsWithoutSpecifiers); 7296 } 7297 } 7298 7299 return SLCT_NotALiteral; 7300 } 7301 case Stmt::ObjCStringLiteralClass: 7302 case Stmt::StringLiteralClass: { 7303 const StringLiteral *StrE = nullptr; 7304 7305 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7306 StrE = ObjCFExpr->getString(); 7307 else 7308 StrE = cast<StringLiteral>(E); 7309 7310 if (StrE) { 7311 if (Offset.isNegative() || Offset > StrE->getLength()) { 7312 // TODO: It would be better to have an explicit warning for out of 7313 // bounds literals. 7314 return SLCT_NotALiteral; 7315 } 7316 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7317 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7318 firstDataArg, Type, InFunctionCall, CallType, 7319 CheckedVarArgs, UncoveredArg, 7320 IgnoreStringsWithoutSpecifiers); 7321 return SLCT_CheckedLiteral; 7322 } 7323 7324 return SLCT_NotALiteral; 7325 } 7326 case Stmt::BinaryOperatorClass: { 7327 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7328 7329 // A string literal + an int offset is still a string literal. 7330 if (BinOp->isAdditiveOp()) { 7331 Expr::EvalResult LResult, RResult; 7332 7333 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7334 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7335 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7336 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7337 7338 if (LIsInt != RIsInt) { 7339 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7340 7341 if (LIsInt) { 7342 if (BinOpKind == BO_Add) { 7343 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7344 E = BinOp->getRHS(); 7345 goto tryAgain; 7346 } 7347 } else { 7348 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7349 E = BinOp->getLHS(); 7350 goto tryAgain; 7351 } 7352 } 7353 } 7354 7355 return SLCT_NotALiteral; 7356 } 7357 case Stmt::UnaryOperatorClass: { 7358 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7359 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7360 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7361 Expr::EvalResult IndexResult; 7362 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7363 Expr::SE_NoSideEffects, 7364 S.isConstantEvaluated())) { 7365 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7366 /*RHS is int*/ true); 7367 E = ASE->getBase(); 7368 goto tryAgain; 7369 } 7370 } 7371 7372 return SLCT_NotALiteral; 7373 } 7374 7375 default: 7376 return SLCT_NotALiteral; 7377 } 7378 } 7379 7380 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7381 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7382 .Case("scanf", FST_Scanf) 7383 .Cases("printf", "printf0", FST_Printf) 7384 .Cases("NSString", "CFString", FST_NSString) 7385 .Case("strftime", FST_Strftime) 7386 .Case("strfmon", FST_Strfmon) 7387 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7388 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7389 .Case("os_trace", FST_OSLog) 7390 .Case("os_log", FST_OSLog) 7391 .Default(FST_Unknown); 7392 } 7393 7394 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7395 /// functions) for correct use of format strings. 7396 /// Returns true if a format string has been fully checked. 7397 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7398 ArrayRef<const Expr *> Args, 7399 bool IsCXXMember, 7400 VariadicCallType CallType, 7401 SourceLocation Loc, SourceRange Range, 7402 llvm::SmallBitVector &CheckedVarArgs) { 7403 FormatStringInfo FSI; 7404 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7405 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7406 FSI.FirstDataArg, GetFormatStringType(Format), 7407 CallType, Loc, Range, CheckedVarArgs); 7408 return false; 7409 } 7410 7411 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7412 bool HasVAListArg, unsigned format_idx, 7413 unsigned firstDataArg, FormatStringType Type, 7414 VariadicCallType CallType, 7415 SourceLocation Loc, SourceRange Range, 7416 llvm::SmallBitVector &CheckedVarArgs) { 7417 // CHECK: printf/scanf-like function is called with no format string. 7418 if (format_idx >= Args.size()) { 7419 Diag(Loc, diag::warn_missing_format_string) << Range; 7420 return false; 7421 } 7422 7423 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7424 7425 // CHECK: format string is not a string literal. 7426 // 7427 // Dynamically generated format strings are difficult to 7428 // automatically vet at compile time. Requiring that format strings 7429 // are string literals: (1) permits the checking of format strings by 7430 // the compiler and thereby (2) can practically remove the source of 7431 // many format string exploits. 7432 7433 // Format string can be either ObjC string (e.g. @"%d") or 7434 // C string (e.g. "%d") 7435 // ObjC string uses the same format specifiers as C string, so we can use 7436 // the same format string checking logic for both ObjC and C strings. 7437 UncoveredArgHandler UncoveredArg; 7438 StringLiteralCheckType CT = 7439 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7440 format_idx, firstDataArg, Type, CallType, 7441 /*IsFunctionCall*/ true, CheckedVarArgs, 7442 UncoveredArg, 7443 /*no string offset*/ llvm::APSInt(64, false) = 0); 7444 7445 // Generate a diagnostic where an uncovered argument is detected. 7446 if (UncoveredArg.hasUncoveredArg()) { 7447 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7448 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7449 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7450 } 7451 7452 if (CT != SLCT_NotALiteral) 7453 // Literal format string found, check done! 7454 return CT == SLCT_CheckedLiteral; 7455 7456 // Strftime is particular as it always uses a single 'time' argument, 7457 // so it is safe to pass a non-literal string. 7458 if (Type == FST_Strftime) 7459 return false; 7460 7461 // Do not emit diag when the string param is a macro expansion and the 7462 // format is either NSString or CFString. This is a hack to prevent 7463 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7464 // which are usually used in place of NS and CF string literals. 7465 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7466 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7467 return false; 7468 7469 // If there are no arguments specified, warn with -Wformat-security, otherwise 7470 // warn only with -Wformat-nonliteral. 7471 if (Args.size() == firstDataArg) { 7472 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7473 << OrigFormatExpr->getSourceRange(); 7474 switch (Type) { 7475 default: 7476 break; 7477 case FST_Kprintf: 7478 case FST_FreeBSDKPrintf: 7479 case FST_Printf: 7480 Diag(FormatLoc, diag::note_format_security_fixit) 7481 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7482 break; 7483 case FST_NSString: 7484 Diag(FormatLoc, diag::note_format_security_fixit) 7485 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7486 break; 7487 } 7488 } else { 7489 Diag(FormatLoc, diag::warn_format_nonliteral) 7490 << OrigFormatExpr->getSourceRange(); 7491 } 7492 return false; 7493 } 7494 7495 namespace { 7496 7497 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7498 protected: 7499 Sema &S; 7500 const FormatStringLiteral *FExpr; 7501 const Expr *OrigFormatExpr; 7502 const Sema::FormatStringType FSType; 7503 const unsigned FirstDataArg; 7504 const unsigned NumDataArgs; 7505 const char *Beg; // Start of format string. 7506 const bool HasVAListArg; 7507 ArrayRef<const Expr *> Args; 7508 unsigned FormatIdx; 7509 llvm::SmallBitVector CoveredArgs; 7510 bool usesPositionalArgs = false; 7511 bool atFirstArg = true; 7512 bool inFunctionCall; 7513 Sema::VariadicCallType CallType; 7514 llvm::SmallBitVector &CheckedVarArgs; 7515 UncoveredArgHandler &UncoveredArg; 7516 7517 public: 7518 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7519 const Expr *origFormatExpr, 7520 const Sema::FormatStringType type, unsigned firstDataArg, 7521 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7522 ArrayRef<const Expr *> Args, unsigned formatIdx, 7523 bool inFunctionCall, Sema::VariadicCallType callType, 7524 llvm::SmallBitVector &CheckedVarArgs, 7525 UncoveredArgHandler &UncoveredArg) 7526 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7527 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7528 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7529 inFunctionCall(inFunctionCall), CallType(callType), 7530 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7531 CoveredArgs.resize(numDataArgs); 7532 CoveredArgs.reset(); 7533 } 7534 7535 void DoneProcessing(); 7536 7537 void HandleIncompleteSpecifier(const char *startSpecifier, 7538 unsigned specifierLen) override; 7539 7540 void HandleInvalidLengthModifier( 7541 const analyze_format_string::FormatSpecifier &FS, 7542 const analyze_format_string::ConversionSpecifier &CS, 7543 const char *startSpecifier, unsigned specifierLen, 7544 unsigned DiagID); 7545 7546 void HandleNonStandardLengthModifier( 7547 const analyze_format_string::FormatSpecifier &FS, 7548 const char *startSpecifier, unsigned specifierLen); 7549 7550 void HandleNonStandardConversionSpecifier( 7551 const analyze_format_string::ConversionSpecifier &CS, 7552 const char *startSpecifier, unsigned specifierLen); 7553 7554 void HandlePosition(const char *startPos, unsigned posLen) override; 7555 7556 void HandleInvalidPosition(const char *startSpecifier, 7557 unsigned specifierLen, 7558 analyze_format_string::PositionContext p) override; 7559 7560 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7561 7562 void HandleNullChar(const char *nullCharacter) override; 7563 7564 template <typename Range> 7565 static void 7566 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7567 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7568 bool IsStringLocation, Range StringRange, 7569 ArrayRef<FixItHint> Fixit = None); 7570 7571 protected: 7572 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7573 const char *startSpec, 7574 unsigned specifierLen, 7575 const char *csStart, unsigned csLen); 7576 7577 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7578 const char *startSpec, 7579 unsigned specifierLen); 7580 7581 SourceRange getFormatStringRange(); 7582 CharSourceRange getSpecifierRange(const char *startSpecifier, 7583 unsigned specifierLen); 7584 SourceLocation getLocationOfByte(const char *x); 7585 7586 const Expr *getDataArg(unsigned i) const; 7587 7588 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7589 const analyze_format_string::ConversionSpecifier &CS, 7590 const char *startSpecifier, unsigned specifierLen, 7591 unsigned argIndex); 7592 7593 template <typename Range> 7594 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7595 bool IsStringLocation, Range StringRange, 7596 ArrayRef<FixItHint> Fixit = None); 7597 }; 7598 7599 } // namespace 7600 7601 SourceRange CheckFormatHandler::getFormatStringRange() { 7602 return OrigFormatExpr->getSourceRange(); 7603 } 7604 7605 CharSourceRange CheckFormatHandler:: 7606 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7607 SourceLocation Start = getLocationOfByte(startSpecifier); 7608 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7609 7610 // Advance the end SourceLocation by one due to half-open ranges. 7611 End = End.getLocWithOffset(1); 7612 7613 return CharSourceRange::getCharRange(Start, End); 7614 } 7615 7616 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7617 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7618 S.getLangOpts(), S.Context.getTargetInfo()); 7619 } 7620 7621 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7622 unsigned specifierLen){ 7623 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7624 getLocationOfByte(startSpecifier), 7625 /*IsStringLocation*/true, 7626 getSpecifierRange(startSpecifier, specifierLen)); 7627 } 7628 7629 void CheckFormatHandler::HandleInvalidLengthModifier( 7630 const analyze_format_string::FormatSpecifier &FS, 7631 const analyze_format_string::ConversionSpecifier &CS, 7632 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7633 using namespace analyze_format_string; 7634 7635 const LengthModifier &LM = FS.getLengthModifier(); 7636 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7637 7638 // See if we know how to fix this length modifier. 7639 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7640 if (FixedLM) { 7641 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7642 getLocationOfByte(LM.getStart()), 7643 /*IsStringLocation*/true, 7644 getSpecifierRange(startSpecifier, specifierLen)); 7645 7646 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7647 << FixedLM->toString() 7648 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7649 7650 } else { 7651 FixItHint Hint; 7652 if (DiagID == diag::warn_format_nonsensical_length) 7653 Hint = FixItHint::CreateRemoval(LMRange); 7654 7655 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7656 getLocationOfByte(LM.getStart()), 7657 /*IsStringLocation*/true, 7658 getSpecifierRange(startSpecifier, specifierLen), 7659 Hint); 7660 } 7661 } 7662 7663 void CheckFormatHandler::HandleNonStandardLengthModifier( 7664 const analyze_format_string::FormatSpecifier &FS, 7665 const char *startSpecifier, unsigned specifierLen) { 7666 using namespace analyze_format_string; 7667 7668 const LengthModifier &LM = FS.getLengthModifier(); 7669 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7670 7671 // See if we know how to fix this length modifier. 7672 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7673 if (FixedLM) { 7674 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7675 << LM.toString() << 0, 7676 getLocationOfByte(LM.getStart()), 7677 /*IsStringLocation*/true, 7678 getSpecifierRange(startSpecifier, specifierLen)); 7679 7680 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7681 << FixedLM->toString() 7682 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7683 7684 } else { 7685 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7686 << LM.toString() << 0, 7687 getLocationOfByte(LM.getStart()), 7688 /*IsStringLocation*/true, 7689 getSpecifierRange(startSpecifier, specifierLen)); 7690 } 7691 } 7692 7693 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7694 const analyze_format_string::ConversionSpecifier &CS, 7695 const char *startSpecifier, unsigned specifierLen) { 7696 using namespace analyze_format_string; 7697 7698 // See if we know how to fix this conversion specifier. 7699 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7700 if (FixedCS) { 7701 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7702 << CS.toString() << /*conversion specifier*/1, 7703 getLocationOfByte(CS.getStart()), 7704 /*IsStringLocation*/true, 7705 getSpecifierRange(startSpecifier, specifierLen)); 7706 7707 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7708 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7709 << FixedCS->toString() 7710 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7711 } else { 7712 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7713 << CS.toString() << /*conversion specifier*/1, 7714 getLocationOfByte(CS.getStart()), 7715 /*IsStringLocation*/true, 7716 getSpecifierRange(startSpecifier, specifierLen)); 7717 } 7718 } 7719 7720 void CheckFormatHandler::HandlePosition(const char *startPos, 7721 unsigned posLen) { 7722 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7723 getLocationOfByte(startPos), 7724 /*IsStringLocation*/true, 7725 getSpecifierRange(startPos, posLen)); 7726 } 7727 7728 void 7729 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7730 analyze_format_string::PositionContext p) { 7731 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7732 << (unsigned) p, 7733 getLocationOfByte(startPos), /*IsStringLocation*/true, 7734 getSpecifierRange(startPos, posLen)); 7735 } 7736 7737 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7738 unsigned posLen) { 7739 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7740 getLocationOfByte(startPos), 7741 /*IsStringLocation*/true, 7742 getSpecifierRange(startPos, posLen)); 7743 } 7744 7745 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7746 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7747 // The presence of a null character is likely an error. 7748 EmitFormatDiagnostic( 7749 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7750 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7751 getFormatStringRange()); 7752 } 7753 } 7754 7755 // Note that this may return NULL if there was an error parsing or building 7756 // one of the argument expressions. 7757 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7758 return Args[FirstDataArg + i]; 7759 } 7760 7761 void CheckFormatHandler::DoneProcessing() { 7762 // Does the number of data arguments exceed the number of 7763 // format conversions in the format string? 7764 if (!HasVAListArg) { 7765 // Find any arguments that weren't covered. 7766 CoveredArgs.flip(); 7767 signed notCoveredArg = CoveredArgs.find_first(); 7768 if (notCoveredArg >= 0) { 7769 assert((unsigned)notCoveredArg < NumDataArgs); 7770 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7771 } else { 7772 UncoveredArg.setAllCovered(); 7773 } 7774 } 7775 } 7776 7777 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7778 const Expr *ArgExpr) { 7779 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7780 "Invalid state"); 7781 7782 if (!ArgExpr) 7783 return; 7784 7785 SourceLocation Loc = ArgExpr->getBeginLoc(); 7786 7787 if (S.getSourceManager().isInSystemMacro(Loc)) 7788 return; 7789 7790 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7791 for (auto E : DiagnosticExprs) 7792 PDiag << E->getSourceRange(); 7793 7794 CheckFormatHandler::EmitFormatDiagnostic( 7795 S, IsFunctionCall, DiagnosticExprs[0], 7796 PDiag, Loc, /*IsStringLocation*/false, 7797 DiagnosticExprs[0]->getSourceRange()); 7798 } 7799 7800 bool 7801 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7802 SourceLocation Loc, 7803 const char *startSpec, 7804 unsigned specifierLen, 7805 const char *csStart, 7806 unsigned csLen) { 7807 bool keepGoing = true; 7808 if (argIndex < NumDataArgs) { 7809 // Consider the argument coverered, even though the specifier doesn't 7810 // make sense. 7811 CoveredArgs.set(argIndex); 7812 } 7813 else { 7814 // If argIndex exceeds the number of data arguments we 7815 // don't issue a warning because that is just a cascade of warnings (and 7816 // they may have intended '%%' anyway). We don't want to continue processing 7817 // the format string after this point, however, as we will like just get 7818 // gibberish when trying to match arguments. 7819 keepGoing = false; 7820 } 7821 7822 StringRef Specifier(csStart, csLen); 7823 7824 // If the specifier in non-printable, it could be the first byte of a UTF-8 7825 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7826 // hex value. 7827 std::string CodePointStr; 7828 if (!llvm::sys::locale::isPrint(*csStart)) { 7829 llvm::UTF32 CodePoint; 7830 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7831 const llvm::UTF8 *E = 7832 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7833 llvm::ConversionResult Result = 7834 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7835 7836 if (Result != llvm::conversionOK) { 7837 unsigned char FirstChar = *csStart; 7838 CodePoint = (llvm::UTF32)FirstChar; 7839 } 7840 7841 llvm::raw_string_ostream OS(CodePointStr); 7842 if (CodePoint < 256) 7843 OS << "\\x" << llvm::format("%02x", CodePoint); 7844 else if (CodePoint <= 0xFFFF) 7845 OS << "\\u" << llvm::format("%04x", CodePoint); 7846 else 7847 OS << "\\U" << llvm::format("%08x", CodePoint); 7848 OS.flush(); 7849 Specifier = CodePointStr; 7850 } 7851 7852 EmitFormatDiagnostic( 7853 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7854 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7855 7856 return keepGoing; 7857 } 7858 7859 void 7860 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7861 const char *startSpec, 7862 unsigned specifierLen) { 7863 EmitFormatDiagnostic( 7864 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7865 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7866 } 7867 7868 bool 7869 CheckFormatHandler::CheckNumArgs( 7870 const analyze_format_string::FormatSpecifier &FS, 7871 const analyze_format_string::ConversionSpecifier &CS, 7872 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7873 7874 if (argIndex >= NumDataArgs) { 7875 PartialDiagnostic PDiag = FS.usesPositionalArg() 7876 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7877 << (argIndex+1) << NumDataArgs) 7878 : S.PDiag(diag::warn_printf_insufficient_data_args); 7879 EmitFormatDiagnostic( 7880 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7881 getSpecifierRange(startSpecifier, specifierLen)); 7882 7883 // Since more arguments than conversion tokens are given, by extension 7884 // all arguments are covered, so mark this as so. 7885 UncoveredArg.setAllCovered(); 7886 return false; 7887 } 7888 return true; 7889 } 7890 7891 template<typename Range> 7892 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7893 SourceLocation Loc, 7894 bool IsStringLocation, 7895 Range StringRange, 7896 ArrayRef<FixItHint> FixIt) { 7897 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7898 Loc, IsStringLocation, StringRange, FixIt); 7899 } 7900 7901 /// If the format string is not within the function call, emit a note 7902 /// so that the function call and string are in diagnostic messages. 7903 /// 7904 /// \param InFunctionCall if true, the format string is within the function 7905 /// call and only one diagnostic message will be produced. Otherwise, an 7906 /// extra note will be emitted pointing to location of the format string. 7907 /// 7908 /// \param ArgumentExpr the expression that is passed as the format string 7909 /// argument in the function call. Used for getting locations when two 7910 /// diagnostics are emitted. 7911 /// 7912 /// \param PDiag the callee should already have provided any strings for the 7913 /// diagnostic message. This function only adds locations and fixits 7914 /// to diagnostics. 7915 /// 7916 /// \param Loc primary location for diagnostic. If two diagnostics are 7917 /// required, one will be at Loc and a new SourceLocation will be created for 7918 /// the other one. 7919 /// 7920 /// \param IsStringLocation if true, Loc points to the format string should be 7921 /// used for the note. Otherwise, Loc points to the argument list and will 7922 /// be used with PDiag. 7923 /// 7924 /// \param StringRange some or all of the string to highlight. This is 7925 /// templated so it can accept either a CharSourceRange or a SourceRange. 7926 /// 7927 /// \param FixIt optional fix it hint for the format string. 7928 template <typename Range> 7929 void CheckFormatHandler::EmitFormatDiagnostic( 7930 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7931 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7932 Range StringRange, ArrayRef<FixItHint> FixIt) { 7933 if (InFunctionCall) { 7934 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7935 D << StringRange; 7936 D << FixIt; 7937 } else { 7938 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7939 << ArgumentExpr->getSourceRange(); 7940 7941 const Sema::SemaDiagnosticBuilder &Note = 7942 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7943 diag::note_format_string_defined); 7944 7945 Note << StringRange; 7946 Note << FixIt; 7947 } 7948 } 7949 7950 //===--- CHECK: Printf format string checking ------------------------------===// 7951 7952 namespace { 7953 7954 class CheckPrintfHandler : public CheckFormatHandler { 7955 public: 7956 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7957 const Expr *origFormatExpr, 7958 const Sema::FormatStringType type, unsigned firstDataArg, 7959 unsigned numDataArgs, bool isObjC, const char *beg, 7960 bool hasVAListArg, ArrayRef<const Expr *> Args, 7961 unsigned formatIdx, bool inFunctionCall, 7962 Sema::VariadicCallType CallType, 7963 llvm::SmallBitVector &CheckedVarArgs, 7964 UncoveredArgHandler &UncoveredArg) 7965 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7966 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7967 inFunctionCall, CallType, CheckedVarArgs, 7968 UncoveredArg) {} 7969 7970 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7971 7972 /// Returns true if '%@' specifiers are allowed in the format string. 7973 bool allowsObjCArg() const { 7974 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7975 FSType == Sema::FST_OSTrace; 7976 } 7977 7978 bool HandleInvalidPrintfConversionSpecifier( 7979 const analyze_printf::PrintfSpecifier &FS, 7980 const char *startSpecifier, 7981 unsigned specifierLen) override; 7982 7983 void handleInvalidMaskType(StringRef MaskType) override; 7984 7985 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7986 const char *startSpecifier, 7987 unsigned specifierLen) override; 7988 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7989 const char *StartSpecifier, 7990 unsigned SpecifierLen, 7991 const Expr *E); 7992 7993 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7994 const char *startSpecifier, unsigned specifierLen); 7995 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7996 const analyze_printf::OptionalAmount &Amt, 7997 unsigned type, 7998 const char *startSpecifier, unsigned specifierLen); 7999 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8000 const analyze_printf::OptionalFlag &flag, 8001 const char *startSpecifier, unsigned specifierLen); 8002 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8003 const analyze_printf::OptionalFlag &ignoredFlag, 8004 const analyze_printf::OptionalFlag &flag, 8005 const char *startSpecifier, unsigned specifierLen); 8006 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8007 const Expr *E); 8008 8009 void HandleEmptyObjCModifierFlag(const char *startFlag, 8010 unsigned flagLen) override; 8011 8012 void HandleInvalidObjCModifierFlag(const char *startFlag, 8013 unsigned flagLen) override; 8014 8015 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8016 const char *flagsEnd, 8017 const char *conversionPosition) 8018 override; 8019 }; 8020 8021 } // namespace 8022 8023 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8024 const analyze_printf::PrintfSpecifier &FS, 8025 const char *startSpecifier, 8026 unsigned specifierLen) { 8027 const analyze_printf::PrintfConversionSpecifier &CS = 8028 FS.getConversionSpecifier(); 8029 8030 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8031 getLocationOfByte(CS.getStart()), 8032 startSpecifier, specifierLen, 8033 CS.getStart(), CS.getLength()); 8034 } 8035 8036 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8037 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8038 } 8039 8040 bool CheckPrintfHandler::HandleAmount( 8041 const analyze_format_string::OptionalAmount &Amt, 8042 unsigned k, const char *startSpecifier, 8043 unsigned specifierLen) { 8044 if (Amt.hasDataArgument()) { 8045 if (!HasVAListArg) { 8046 unsigned argIndex = Amt.getArgIndex(); 8047 if (argIndex >= NumDataArgs) { 8048 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8049 << k, 8050 getLocationOfByte(Amt.getStart()), 8051 /*IsStringLocation*/true, 8052 getSpecifierRange(startSpecifier, specifierLen)); 8053 // Don't do any more checking. We will just emit 8054 // spurious errors. 8055 return false; 8056 } 8057 8058 // Type check the data argument. It should be an 'int'. 8059 // Although not in conformance with C99, we also allow the argument to be 8060 // an 'unsigned int' as that is a reasonably safe case. GCC also 8061 // doesn't emit a warning for that case. 8062 CoveredArgs.set(argIndex); 8063 const Expr *Arg = getDataArg(argIndex); 8064 if (!Arg) 8065 return false; 8066 8067 QualType T = Arg->getType(); 8068 8069 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8070 assert(AT.isValid()); 8071 8072 if (!AT.matchesType(S.Context, T)) { 8073 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 8074 << k << AT.getRepresentativeTypeName(S.Context) 8075 << T << Arg->getSourceRange(), 8076 getLocationOfByte(Amt.getStart()), 8077 /*IsStringLocation*/true, 8078 getSpecifierRange(startSpecifier, specifierLen)); 8079 // Don't do any more checking. We will just emit 8080 // spurious errors. 8081 return false; 8082 } 8083 } 8084 } 8085 return true; 8086 } 8087 8088 void CheckPrintfHandler::HandleInvalidAmount( 8089 const analyze_printf::PrintfSpecifier &FS, 8090 const analyze_printf::OptionalAmount &Amt, 8091 unsigned type, 8092 const char *startSpecifier, 8093 unsigned specifierLen) { 8094 const analyze_printf::PrintfConversionSpecifier &CS = 8095 FS.getConversionSpecifier(); 8096 8097 FixItHint fixit = 8098 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 8099 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 8100 Amt.getConstantLength())) 8101 : FixItHint(); 8102 8103 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 8104 << type << CS.toString(), 8105 getLocationOfByte(Amt.getStart()), 8106 /*IsStringLocation*/true, 8107 getSpecifierRange(startSpecifier, specifierLen), 8108 fixit); 8109 } 8110 8111 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8112 const analyze_printf::OptionalFlag &flag, 8113 const char *startSpecifier, 8114 unsigned specifierLen) { 8115 // Warn about pointless flag with a fixit removal. 8116 const analyze_printf::PrintfConversionSpecifier &CS = 8117 FS.getConversionSpecifier(); 8118 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 8119 << flag.toString() << CS.toString(), 8120 getLocationOfByte(flag.getPosition()), 8121 /*IsStringLocation*/true, 8122 getSpecifierRange(startSpecifier, specifierLen), 8123 FixItHint::CreateRemoval( 8124 getSpecifierRange(flag.getPosition(), 1))); 8125 } 8126 8127 void CheckPrintfHandler::HandleIgnoredFlag( 8128 const analyze_printf::PrintfSpecifier &FS, 8129 const analyze_printf::OptionalFlag &ignoredFlag, 8130 const analyze_printf::OptionalFlag &flag, 8131 const char *startSpecifier, 8132 unsigned specifierLen) { 8133 // Warn about ignored flag with a fixit removal. 8134 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 8135 << ignoredFlag.toString() << flag.toString(), 8136 getLocationOfByte(ignoredFlag.getPosition()), 8137 /*IsStringLocation*/true, 8138 getSpecifierRange(startSpecifier, specifierLen), 8139 FixItHint::CreateRemoval( 8140 getSpecifierRange(ignoredFlag.getPosition(), 1))); 8141 } 8142 8143 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 8144 unsigned flagLen) { 8145 // Warn about an empty flag. 8146 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 8147 getLocationOfByte(startFlag), 8148 /*IsStringLocation*/true, 8149 getSpecifierRange(startFlag, flagLen)); 8150 } 8151 8152 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8153 unsigned flagLen) { 8154 // Warn about an invalid flag. 8155 auto Range = getSpecifierRange(startFlag, flagLen); 8156 StringRef flag(startFlag, flagLen); 8157 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8158 getLocationOfByte(startFlag), 8159 /*IsStringLocation*/true, 8160 Range, FixItHint::CreateRemoval(Range)); 8161 } 8162 8163 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8164 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8165 // Warn about using '[...]' without a '@' conversion. 8166 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8167 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8168 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8169 getLocationOfByte(conversionPosition), 8170 /*IsStringLocation*/true, 8171 Range, FixItHint::CreateRemoval(Range)); 8172 } 8173 8174 // Determines if the specified is a C++ class or struct containing 8175 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8176 // "c_str()"). 8177 template<typename MemberKind> 8178 static llvm::SmallPtrSet<MemberKind*, 1> 8179 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8180 const RecordType *RT = Ty->getAs<RecordType>(); 8181 llvm::SmallPtrSet<MemberKind*, 1> Results; 8182 8183 if (!RT) 8184 return Results; 8185 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8186 if (!RD || !RD->getDefinition()) 8187 return Results; 8188 8189 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8190 Sema::LookupMemberName); 8191 R.suppressDiagnostics(); 8192 8193 // We just need to include all members of the right kind turned up by the 8194 // filter, at this point. 8195 if (S.LookupQualifiedName(R, RT->getDecl())) 8196 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8197 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8198 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8199 Results.insert(FK); 8200 } 8201 return Results; 8202 } 8203 8204 /// Check if we could call '.c_str()' on an object. 8205 /// 8206 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8207 /// allow the call, or if it would be ambiguous). 8208 bool Sema::hasCStrMethod(const Expr *E) { 8209 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8210 8211 MethodSet Results = 8212 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8213 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8214 MI != ME; ++MI) 8215 if ((*MI)->getMinRequiredArguments() == 0) 8216 return true; 8217 return false; 8218 } 8219 8220 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8221 // better diagnostic if so. AT is assumed to be valid. 8222 // Returns true when a c_str() conversion method is found. 8223 bool CheckPrintfHandler::checkForCStrMembers( 8224 const analyze_printf::ArgType &AT, const Expr *E) { 8225 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8226 8227 MethodSet Results = 8228 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8229 8230 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8231 MI != ME; ++MI) { 8232 const CXXMethodDecl *Method = *MI; 8233 if (Method->getMinRequiredArguments() == 0 && 8234 AT.matchesType(S.Context, Method->getReturnType())) { 8235 // FIXME: Suggest parens if the expression needs them. 8236 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8237 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8238 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8239 return true; 8240 } 8241 } 8242 8243 return false; 8244 } 8245 8246 bool 8247 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8248 &FS, 8249 const char *startSpecifier, 8250 unsigned specifierLen) { 8251 using namespace analyze_format_string; 8252 using namespace analyze_printf; 8253 8254 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8255 8256 if (FS.consumesDataArgument()) { 8257 if (atFirstArg) { 8258 atFirstArg = false; 8259 usesPositionalArgs = FS.usesPositionalArg(); 8260 } 8261 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8262 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8263 startSpecifier, specifierLen); 8264 return false; 8265 } 8266 } 8267 8268 // First check if the field width, precision, and conversion specifier 8269 // have matching data arguments. 8270 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8271 startSpecifier, specifierLen)) { 8272 return false; 8273 } 8274 8275 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8276 startSpecifier, specifierLen)) { 8277 return false; 8278 } 8279 8280 if (!CS.consumesDataArgument()) { 8281 // FIXME: Technically specifying a precision or field width here 8282 // makes no sense. Worth issuing a warning at some point. 8283 return true; 8284 } 8285 8286 // Consume the argument. 8287 unsigned argIndex = FS.getArgIndex(); 8288 if (argIndex < NumDataArgs) { 8289 // The check to see if the argIndex is valid will come later. 8290 // We set the bit here because we may exit early from this 8291 // function if we encounter some other error. 8292 CoveredArgs.set(argIndex); 8293 } 8294 8295 // FreeBSD kernel extensions. 8296 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8297 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8298 // We need at least two arguments. 8299 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8300 return false; 8301 8302 // Claim the second argument. 8303 CoveredArgs.set(argIndex + 1); 8304 8305 // Type check the first argument (int for %b, pointer for %D) 8306 const Expr *Ex = getDataArg(argIndex); 8307 const analyze_printf::ArgType &AT = 8308 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8309 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8310 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8311 EmitFormatDiagnostic( 8312 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8313 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8314 << false << Ex->getSourceRange(), 8315 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8316 getSpecifierRange(startSpecifier, specifierLen)); 8317 8318 // Type check the second argument (char * for both %b and %D) 8319 Ex = getDataArg(argIndex + 1); 8320 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8321 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8322 EmitFormatDiagnostic( 8323 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8324 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8325 << false << Ex->getSourceRange(), 8326 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8327 getSpecifierRange(startSpecifier, specifierLen)); 8328 8329 return true; 8330 } 8331 8332 // Check for using an Objective-C specific conversion specifier 8333 // in a non-ObjC literal. 8334 if (!allowsObjCArg() && CS.isObjCArg()) { 8335 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8336 specifierLen); 8337 } 8338 8339 // %P can only be used with os_log. 8340 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8341 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8342 specifierLen); 8343 } 8344 8345 // %n is not allowed with os_log. 8346 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8347 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8348 getLocationOfByte(CS.getStart()), 8349 /*IsStringLocation*/ false, 8350 getSpecifierRange(startSpecifier, specifierLen)); 8351 8352 return true; 8353 } 8354 8355 // Only scalars are allowed for os_trace. 8356 if (FSType == Sema::FST_OSTrace && 8357 (CS.getKind() == ConversionSpecifier::PArg || 8358 CS.getKind() == ConversionSpecifier::sArg || 8359 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8360 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8361 specifierLen); 8362 } 8363 8364 // Check for use of public/private annotation outside of os_log(). 8365 if (FSType != Sema::FST_OSLog) { 8366 if (FS.isPublic().isSet()) { 8367 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8368 << "public", 8369 getLocationOfByte(FS.isPublic().getPosition()), 8370 /*IsStringLocation*/ false, 8371 getSpecifierRange(startSpecifier, specifierLen)); 8372 } 8373 if (FS.isPrivate().isSet()) { 8374 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8375 << "private", 8376 getLocationOfByte(FS.isPrivate().getPosition()), 8377 /*IsStringLocation*/ false, 8378 getSpecifierRange(startSpecifier, specifierLen)); 8379 } 8380 } 8381 8382 // Check for invalid use of field width 8383 if (!FS.hasValidFieldWidth()) { 8384 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8385 startSpecifier, specifierLen); 8386 } 8387 8388 // Check for invalid use of precision 8389 if (!FS.hasValidPrecision()) { 8390 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8391 startSpecifier, specifierLen); 8392 } 8393 8394 // Precision is mandatory for %P specifier. 8395 if (CS.getKind() == ConversionSpecifier::PArg && 8396 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8397 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8398 getLocationOfByte(startSpecifier), 8399 /*IsStringLocation*/ false, 8400 getSpecifierRange(startSpecifier, specifierLen)); 8401 } 8402 8403 // Check each flag does not conflict with any other component. 8404 if (!FS.hasValidThousandsGroupingPrefix()) 8405 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8406 if (!FS.hasValidLeadingZeros()) 8407 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8408 if (!FS.hasValidPlusPrefix()) 8409 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8410 if (!FS.hasValidSpacePrefix()) 8411 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8412 if (!FS.hasValidAlternativeForm()) 8413 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8414 if (!FS.hasValidLeftJustified()) 8415 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8416 8417 // Check that flags are not ignored by another flag 8418 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8419 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8420 startSpecifier, specifierLen); 8421 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8422 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8423 startSpecifier, specifierLen); 8424 8425 // Check the length modifier is valid with the given conversion specifier. 8426 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8427 S.getLangOpts())) 8428 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8429 diag::warn_format_nonsensical_length); 8430 else if (!FS.hasStandardLengthModifier()) 8431 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8432 else if (!FS.hasStandardLengthConversionCombination()) 8433 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8434 diag::warn_format_non_standard_conversion_spec); 8435 8436 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8437 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8438 8439 // The remaining checks depend on the data arguments. 8440 if (HasVAListArg) 8441 return true; 8442 8443 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8444 return false; 8445 8446 const Expr *Arg = getDataArg(argIndex); 8447 if (!Arg) 8448 return true; 8449 8450 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8451 } 8452 8453 static bool requiresParensToAddCast(const Expr *E) { 8454 // FIXME: We should have a general way to reason about operator 8455 // precedence and whether parens are actually needed here. 8456 // Take care of a few common cases where they aren't. 8457 const Expr *Inside = E->IgnoreImpCasts(); 8458 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8459 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8460 8461 switch (Inside->getStmtClass()) { 8462 case Stmt::ArraySubscriptExprClass: 8463 case Stmt::CallExprClass: 8464 case Stmt::CharacterLiteralClass: 8465 case Stmt::CXXBoolLiteralExprClass: 8466 case Stmt::DeclRefExprClass: 8467 case Stmt::FloatingLiteralClass: 8468 case Stmt::IntegerLiteralClass: 8469 case Stmt::MemberExprClass: 8470 case Stmt::ObjCArrayLiteralClass: 8471 case Stmt::ObjCBoolLiteralExprClass: 8472 case Stmt::ObjCBoxedExprClass: 8473 case Stmt::ObjCDictionaryLiteralClass: 8474 case Stmt::ObjCEncodeExprClass: 8475 case Stmt::ObjCIvarRefExprClass: 8476 case Stmt::ObjCMessageExprClass: 8477 case Stmt::ObjCPropertyRefExprClass: 8478 case Stmt::ObjCStringLiteralClass: 8479 case Stmt::ObjCSubscriptRefExprClass: 8480 case Stmt::ParenExprClass: 8481 case Stmt::StringLiteralClass: 8482 case Stmt::UnaryOperatorClass: 8483 return false; 8484 default: 8485 return true; 8486 } 8487 } 8488 8489 static std::pair<QualType, StringRef> 8490 shouldNotPrintDirectly(const ASTContext &Context, 8491 QualType IntendedTy, 8492 const Expr *E) { 8493 // Use a 'while' to peel off layers of typedefs. 8494 QualType TyTy = IntendedTy; 8495 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8496 StringRef Name = UserTy->getDecl()->getName(); 8497 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8498 .Case("CFIndex", Context.getNSIntegerType()) 8499 .Case("NSInteger", Context.getNSIntegerType()) 8500 .Case("NSUInteger", Context.getNSUIntegerType()) 8501 .Case("SInt32", Context.IntTy) 8502 .Case("UInt32", Context.UnsignedIntTy) 8503 .Default(QualType()); 8504 8505 if (!CastTy.isNull()) 8506 return std::make_pair(CastTy, Name); 8507 8508 TyTy = UserTy->desugar(); 8509 } 8510 8511 // Strip parens if necessary. 8512 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8513 return shouldNotPrintDirectly(Context, 8514 PE->getSubExpr()->getType(), 8515 PE->getSubExpr()); 8516 8517 // If this is a conditional expression, then its result type is constructed 8518 // via usual arithmetic conversions and thus there might be no necessary 8519 // typedef sugar there. Recurse to operands to check for NSInteger & 8520 // Co. usage condition. 8521 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8522 QualType TrueTy, FalseTy; 8523 StringRef TrueName, FalseName; 8524 8525 std::tie(TrueTy, TrueName) = 8526 shouldNotPrintDirectly(Context, 8527 CO->getTrueExpr()->getType(), 8528 CO->getTrueExpr()); 8529 std::tie(FalseTy, FalseName) = 8530 shouldNotPrintDirectly(Context, 8531 CO->getFalseExpr()->getType(), 8532 CO->getFalseExpr()); 8533 8534 if (TrueTy == FalseTy) 8535 return std::make_pair(TrueTy, TrueName); 8536 else if (TrueTy.isNull()) 8537 return std::make_pair(FalseTy, FalseName); 8538 else if (FalseTy.isNull()) 8539 return std::make_pair(TrueTy, TrueName); 8540 } 8541 8542 return std::make_pair(QualType(), StringRef()); 8543 } 8544 8545 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8546 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8547 /// type do not count. 8548 static bool 8549 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8550 QualType From = ICE->getSubExpr()->getType(); 8551 QualType To = ICE->getType(); 8552 // It's an integer promotion if the destination type is the promoted 8553 // source type. 8554 if (ICE->getCastKind() == CK_IntegralCast && 8555 From->isPromotableIntegerType() && 8556 S.Context.getPromotedIntegerType(From) == To) 8557 return true; 8558 // Look through vector types, since we do default argument promotion for 8559 // those in OpenCL. 8560 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8561 From = VecTy->getElementType(); 8562 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8563 To = VecTy->getElementType(); 8564 // It's a floating promotion if the source type is a lower rank. 8565 return ICE->getCastKind() == CK_FloatingCast && 8566 S.Context.getFloatingTypeOrder(From, To) < 0; 8567 } 8568 8569 bool 8570 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8571 const char *StartSpecifier, 8572 unsigned SpecifierLen, 8573 const Expr *E) { 8574 using namespace analyze_format_string; 8575 using namespace analyze_printf; 8576 8577 // Now type check the data expression that matches the 8578 // format specifier. 8579 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8580 if (!AT.isValid()) 8581 return true; 8582 8583 QualType ExprTy = E->getType(); 8584 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8585 ExprTy = TET->getUnderlyingExpr()->getType(); 8586 } 8587 8588 // Diagnose attempts to print a boolean value as a character. Unlike other 8589 // -Wformat diagnostics, this is fine from a type perspective, but it still 8590 // doesn't make sense. 8591 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8592 E->isKnownToHaveBooleanValue()) { 8593 const CharSourceRange &CSR = 8594 getSpecifierRange(StartSpecifier, SpecifierLen); 8595 SmallString<4> FSString; 8596 llvm::raw_svector_ostream os(FSString); 8597 FS.toString(os); 8598 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8599 << FSString, 8600 E->getExprLoc(), false, CSR); 8601 return true; 8602 } 8603 8604 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8605 if (Match == analyze_printf::ArgType::Match) 8606 return true; 8607 8608 // Look through argument promotions for our error message's reported type. 8609 // This includes the integral and floating promotions, but excludes array 8610 // and function pointer decay (seeing that an argument intended to be a 8611 // string has type 'char [6]' is probably more confusing than 'char *') and 8612 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8613 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8614 if (isArithmeticArgumentPromotion(S, ICE)) { 8615 E = ICE->getSubExpr(); 8616 ExprTy = E->getType(); 8617 8618 // Check if we didn't match because of an implicit cast from a 'char' 8619 // or 'short' to an 'int'. This is done because printf is a varargs 8620 // function. 8621 if (ICE->getType() == S.Context.IntTy || 8622 ICE->getType() == S.Context.UnsignedIntTy) { 8623 // All further checking is done on the subexpression 8624 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8625 AT.matchesType(S.Context, ExprTy); 8626 if (ImplicitMatch == analyze_printf::ArgType::Match) 8627 return true; 8628 if (ImplicitMatch == ArgType::NoMatchPedantic || 8629 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8630 Match = ImplicitMatch; 8631 } 8632 } 8633 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8634 // Special case for 'a', which has type 'int' in C. 8635 // Note, however, that we do /not/ want to treat multibyte constants like 8636 // 'MooV' as characters! This form is deprecated but still exists. 8637 if (ExprTy == S.Context.IntTy) 8638 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8639 ExprTy = S.Context.CharTy; 8640 } 8641 8642 // Look through enums to their underlying type. 8643 bool IsEnum = false; 8644 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8645 ExprTy = EnumTy->getDecl()->getIntegerType(); 8646 IsEnum = true; 8647 } 8648 8649 // %C in an Objective-C context prints a unichar, not a wchar_t. 8650 // If the argument is an integer of some kind, believe the %C and suggest 8651 // a cast instead of changing the conversion specifier. 8652 QualType IntendedTy = ExprTy; 8653 if (isObjCContext() && 8654 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8655 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8656 !ExprTy->isCharType()) { 8657 // 'unichar' is defined as a typedef of unsigned short, but we should 8658 // prefer using the typedef if it is visible. 8659 IntendedTy = S.Context.UnsignedShortTy; 8660 8661 // While we are here, check if the value is an IntegerLiteral that happens 8662 // to be within the valid range. 8663 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8664 const llvm::APInt &V = IL->getValue(); 8665 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8666 return true; 8667 } 8668 8669 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8670 Sema::LookupOrdinaryName); 8671 if (S.LookupName(Result, S.getCurScope())) { 8672 NamedDecl *ND = Result.getFoundDecl(); 8673 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8674 if (TD->getUnderlyingType() == IntendedTy) 8675 IntendedTy = S.Context.getTypedefType(TD); 8676 } 8677 } 8678 } 8679 8680 // Special-case some of Darwin's platform-independence types by suggesting 8681 // casts to primitive types that are known to be large enough. 8682 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8683 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8684 QualType CastTy; 8685 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8686 if (!CastTy.isNull()) { 8687 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8688 // (long in ASTContext). Only complain to pedants. 8689 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8690 (AT.isSizeT() || AT.isPtrdiffT()) && 8691 AT.matchesType(S.Context, CastTy)) 8692 Match = ArgType::NoMatchPedantic; 8693 IntendedTy = CastTy; 8694 ShouldNotPrintDirectly = true; 8695 } 8696 } 8697 8698 // We may be able to offer a FixItHint if it is a supported type. 8699 PrintfSpecifier fixedFS = FS; 8700 bool Success = 8701 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8702 8703 if (Success) { 8704 // Get the fix string from the fixed format specifier 8705 SmallString<16> buf; 8706 llvm::raw_svector_ostream os(buf); 8707 fixedFS.toString(os); 8708 8709 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8710 8711 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8712 unsigned Diag; 8713 switch (Match) { 8714 case ArgType::Match: llvm_unreachable("expected non-matching"); 8715 case ArgType::NoMatchPedantic: 8716 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8717 break; 8718 case ArgType::NoMatchTypeConfusion: 8719 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8720 break; 8721 case ArgType::NoMatch: 8722 Diag = diag::warn_format_conversion_argument_type_mismatch; 8723 break; 8724 } 8725 8726 // In this case, the specifier is wrong and should be changed to match 8727 // the argument. 8728 EmitFormatDiagnostic(S.PDiag(Diag) 8729 << AT.getRepresentativeTypeName(S.Context) 8730 << IntendedTy << IsEnum << E->getSourceRange(), 8731 E->getBeginLoc(), 8732 /*IsStringLocation*/ false, SpecRange, 8733 FixItHint::CreateReplacement(SpecRange, os.str())); 8734 } else { 8735 // The canonical type for formatting this value is different from the 8736 // actual type of the expression. (This occurs, for example, with Darwin's 8737 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8738 // should be printed as 'long' for 64-bit compatibility.) 8739 // Rather than emitting a normal format/argument mismatch, we want to 8740 // add a cast to the recommended type (and correct the format string 8741 // if necessary). 8742 SmallString<16> CastBuf; 8743 llvm::raw_svector_ostream CastFix(CastBuf); 8744 CastFix << "("; 8745 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8746 CastFix << ")"; 8747 8748 SmallVector<FixItHint,4> Hints; 8749 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8750 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8751 8752 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8753 // If there's already a cast present, just replace it. 8754 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8755 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8756 8757 } else if (!requiresParensToAddCast(E)) { 8758 // If the expression has high enough precedence, 8759 // just write the C-style cast. 8760 Hints.push_back( 8761 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8762 } else { 8763 // Otherwise, add parens around the expression as well as the cast. 8764 CastFix << "("; 8765 Hints.push_back( 8766 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8767 8768 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8769 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8770 } 8771 8772 if (ShouldNotPrintDirectly) { 8773 // The expression has a type that should not be printed directly. 8774 // We extract the name from the typedef because we don't want to show 8775 // the underlying type in the diagnostic. 8776 StringRef Name; 8777 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8778 Name = TypedefTy->getDecl()->getName(); 8779 else 8780 Name = CastTyName; 8781 unsigned Diag = Match == ArgType::NoMatchPedantic 8782 ? diag::warn_format_argument_needs_cast_pedantic 8783 : diag::warn_format_argument_needs_cast; 8784 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8785 << E->getSourceRange(), 8786 E->getBeginLoc(), /*IsStringLocation=*/false, 8787 SpecRange, Hints); 8788 } else { 8789 // In this case, the expression could be printed using a different 8790 // specifier, but we've decided that the specifier is probably correct 8791 // and we should cast instead. Just use the normal warning message. 8792 EmitFormatDiagnostic( 8793 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8794 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8795 << E->getSourceRange(), 8796 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8797 } 8798 } 8799 } else { 8800 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8801 SpecifierLen); 8802 // Since the warning for passing non-POD types to variadic functions 8803 // was deferred until now, we emit a warning for non-POD 8804 // arguments here. 8805 switch (S.isValidVarArgType(ExprTy)) { 8806 case Sema::VAK_Valid: 8807 case Sema::VAK_ValidInCXX11: { 8808 unsigned Diag; 8809 switch (Match) { 8810 case ArgType::Match: llvm_unreachable("expected non-matching"); 8811 case ArgType::NoMatchPedantic: 8812 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8813 break; 8814 case ArgType::NoMatchTypeConfusion: 8815 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8816 break; 8817 case ArgType::NoMatch: 8818 Diag = diag::warn_format_conversion_argument_type_mismatch; 8819 break; 8820 } 8821 8822 EmitFormatDiagnostic( 8823 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8824 << IsEnum << CSR << E->getSourceRange(), 8825 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8826 break; 8827 } 8828 case Sema::VAK_Undefined: 8829 case Sema::VAK_MSVCUndefined: 8830 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8831 << S.getLangOpts().CPlusPlus11 << ExprTy 8832 << CallType 8833 << AT.getRepresentativeTypeName(S.Context) << CSR 8834 << E->getSourceRange(), 8835 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8836 checkForCStrMembers(AT, E); 8837 break; 8838 8839 case Sema::VAK_Invalid: 8840 if (ExprTy->isObjCObjectType()) 8841 EmitFormatDiagnostic( 8842 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8843 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8844 << AT.getRepresentativeTypeName(S.Context) << CSR 8845 << E->getSourceRange(), 8846 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8847 else 8848 // FIXME: If this is an initializer list, suggest removing the braces 8849 // or inserting a cast to the target type. 8850 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8851 << isa<InitListExpr>(E) << ExprTy << CallType 8852 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8853 break; 8854 } 8855 8856 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8857 "format string specifier index out of range"); 8858 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8859 } 8860 8861 return true; 8862 } 8863 8864 //===--- CHECK: Scanf format string checking ------------------------------===// 8865 8866 namespace { 8867 8868 class CheckScanfHandler : public CheckFormatHandler { 8869 public: 8870 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8871 const Expr *origFormatExpr, Sema::FormatStringType type, 8872 unsigned firstDataArg, unsigned numDataArgs, 8873 const char *beg, bool hasVAListArg, 8874 ArrayRef<const Expr *> Args, unsigned formatIdx, 8875 bool inFunctionCall, Sema::VariadicCallType CallType, 8876 llvm::SmallBitVector &CheckedVarArgs, 8877 UncoveredArgHandler &UncoveredArg) 8878 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8879 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8880 inFunctionCall, CallType, CheckedVarArgs, 8881 UncoveredArg) {} 8882 8883 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8884 const char *startSpecifier, 8885 unsigned specifierLen) override; 8886 8887 bool HandleInvalidScanfConversionSpecifier( 8888 const analyze_scanf::ScanfSpecifier &FS, 8889 const char *startSpecifier, 8890 unsigned specifierLen) override; 8891 8892 void HandleIncompleteScanList(const char *start, const char *end) override; 8893 }; 8894 8895 } // namespace 8896 8897 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8898 const char *end) { 8899 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8900 getLocationOfByte(end), /*IsStringLocation*/true, 8901 getSpecifierRange(start, end - start)); 8902 } 8903 8904 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8905 const analyze_scanf::ScanfSpecifier &FS, 8906 const char *startSpecifier, 8907 unsigned specifierLen) { 8908 const analyze_scanf::ScanfConversionSpecifier &CS = 8909 FS.getConversionSpecifier(); 8910 8911 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8912 getLocationOfByte(CS.getStart()), 8913 startSpecifier, specifierLen, 8914 CS.getStart(), CS.getLength()); 8915 } 8916 8917 bool CheckScanfHandler::HandleScanfSpecifier( 8918 const analyze_scanf::ScanfSpecifier &FS, 8919 const char *startSpecifier, 8920 unsigned specifierLen) { 8921 using namespace analyze_scanf; 8922 using namespace analyze_format_string; 8923 8924 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8925 8926 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8927 // be used to decide if we are using positional arguments consistently. 8928 if (FS.consumesDataArgument()) { 8929 if (atFirstArg) { 8930 atFirstArg = false; 8931 usesPositionalArgs = FS.usesPositionalArg(); 8932 } 8933 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8934 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8935 startSpecifier, specifierLen); 8936 return false; 8937 } 8938 } 8939 8940 // Check if the field with is non-zero. 8941 const OptionalAmount &Amt = FS.getFieldWidth(); 8942 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8943 if (Amt.getConstantAmount() == 0) { 8944 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8945 Amt.getConstantLength()); 8946 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8947 getLocationOfByte(Amt.getStart()), 8948 /*IsStringLocation*/true, R, 8949 FixItHint::CreateRemoval(R)); 8950 } 8951 } 8952 8953 if (!FS.consumesDataArgument()) { 8954 // FIXME: Technically specifying a precision or field width here 8955 // makes no sense. Worth issuing a warning at some point. 8956 return true; 8957 } 8958 8959 // Consume the argument. 8960 unsigned argIndex = FS.getArgIndex(); 8961 if (argIndex < NumDataArgs) { 8962 // The check to see if the argIndex is valid will come later. 8963 // We set the bit here because we may exit early from this 8964 // function if we encounter some other error. 8965 CoveredArgs.set(argIndex); 8966 } 8967 8968 // Check the length modifier is valid with the given conversion specifier. 8969 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8970 S.getLangOpts())) 8971 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8972 diag::warn_format_nonsensical_length); 8973 else if (!FS.hasStandardLengthModifier()) 8974 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8975 else if (!FS.hasStandardLengthConversionCombination()) 8976 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8977 diag::warn_format_non_standard_conversion_spec); 8978 8979 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8980 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8981 8982 // The remaining checks depend on the data arguments. 8983 if (HasVAListArg) 8984 return true; 8985 8986 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8987 return false; 8988 8989 // Check that the argument type matches the format specifier. 8990 const Expr *Ex = getDataArg(argIndex); 8991 if (!Ex) 8992 return true; 8993 8994 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8995 8996 if (!AT.isValid()) { 8997 return true; 8998 } 8999 9000 analyze_format_string::ArgType::MatchKind Match = 9001 AT.matchesType(S.Context, Ex->getType()); 9002 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9003 if (Match == analyze_format_string::ArgType::Match) 9004 return true; 9005 9006 ScanfSpecifier fixedFS = FS; 9007 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9008 S.getLangOpts(), S.Context); 9009 9010 unsigned Diag = 9011 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9012 : diag::warn_format_conversion_argument_type_mismatch; 9013 9014 if (Success) { 9015 // Get the fix string from the fixed format specifier. 9016 SmallString<128> buf; 9017 llvm::raw_svector_ostream os(buf); 9018 fixedFS.toString(os); 9019 9020 EmitFormatDiagnostic( 9021 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9022 << Ex->getType() << false << Ex->getSourceRange(), 9023 Ex->getBeginLoc(), 9024 /*IsStringLocation*/ false, 9025 getSpecifierRange(startSpecifier, specifierLen), 9026 FixItHint::CreateReplacement( 9027 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9028 } else { 9029 EmitFormatDiagnostic(S.PDiag(Diag) 9030 << AT.getRepresentativeTypeName(S.Context) 9031 << Ex->getType() << false << Ex->getSourceRange(), 9032 Ex->getBeginLoc(), 9033 /*IsStringLocation*/ false, 9034 getSpecifierRange(startSpecifier, specifierLen)); 9035 } 9036 9037 return true; 9038 } 9039 9040 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9041 const Expr *OrigFormatExpr, 9042 ArrayRef<const Expr *> Args, 9043 bool HasVAListArg, unsigned format_idx, 9044 unsigned firstDataArg, 9045 Sema::FormatStringType Type, 9046 bool inFunctionCall, 9047 Sema::VariadicCallType CallType, 9048 llvm::SmallBitVector &CheckedVarArgs, 9049 UncoveredArgHandler &UncoveredArg, 9050 bool IgnoreStringsWithoutSpecifiers) { 9051 // CHECK: is the format string a wide literal? 9052 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9053 CheckFormatHandler::EmitFormatDiagnostic( 9054 S, inFunctionCall, Args[format_idx], 9055 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9056 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9057 return; 9058 } 9059 9060 // Str - The format string. NOTE: this is NOT null-terminated! 9061 StringRef StrRef = FExpr->getString(); 9062 const char *Str = StrRef.data(); 9063 // Account for cases where the string literal is truncated in a declaration. 9064 const ConstantArrayType *T = 9065 S.Context.getAsConstantArrayType(FExpr->getType()); 9066 assert(T && "String literal not of constant array type!"); 9067 size_t TypeSize = T->getSize().getZExtValue(); 9068 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9069 const unsigned numDataArgs = Args.size() - firstDataArg; 9070 9071 if (IgnoreStringsWithoutSpecifiers && 9072 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 9073 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 9074 return; 9075 9076 // Emit a warning if the string literal is truncated and does not contain an 9077 // embedded null character. 9078 if (TypeSize <= StrRef.size() && 9079 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 9080 CheckFormatHandler::EmitFormatDiagnostic( 9081 S, inFunctionCall, Args[format_idx], 9082 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 9083 FExpr->getBeginLoc(), 9084 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 9085 return; 9086 } 9087 9088 // CHECK: empty format string? 9089 if (StrLen == 0 && numDataArgs > 0) { 9090 CheckFormatHandler::EmitFormatDiagnostic( 9091 S, inFunctionCall, Args[format_idx], 9092 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 9093 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9094 return; 9095 } 9096 9097 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 9098 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 9099 Type == Sema::FST_OSTrace) { 9100 CheckPrintfHandler H( 9101 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 9102 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 9103 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 9104 CheckedVarArgs, UncoveredArg); 9105 9106 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 9107 S.getLangOpts(), 9108 S.Context.getTargetInfo(), 9109 Type == Sema::FST_FreeBSDKPrintf)) 9110 H.DoneProcessing(); 9111 } else if (Type == Sema::FST_Scanf) { 9112 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 9113 numDataArgs, Str, HasVAListArg, Args, format_idx, 9114 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 9115 9116 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 9117 S.getLangOpts(), 9118 S.Context.getTargetInfo())) 9119 H.DoneProcessing(); 9120 } // TODO: handle other formats 9121 } 9122 9123 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 9124 // Str - The format string. NOTE: this is NOT null-terminated! 9125 StringRef StrRef = FExpr->getString(); 9126 const char *Str = StrRef.data(); 9127 // Account for cases where the string literal is truncated in a declaration. 9128 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 9129 assert(T && "String literal not of constant array type!"); 9130 size_t TypeSize = T->getSize().getZExtValue(); 9131 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9132 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 9133 getLangOpts(), 9134 Context.getTargetInfo()); 9135 } 9136 9137 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 9138 9139 // Returns the related absolute value function that is larger, of 0 if one 9140 // does not exist. 9141 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 9142 switch (AbsFunction) { 9143 default: 9144 return 0; 9145 9146 case Builtin::BI__builtin_abs: 9147 return Builtin::BI__builtin_labs; 9148 case Builtin::BI__builtin_labs: 9149 return Builtin::BI__builtin_llabs; 9150 case Builtin::BI__builtin_llabs: 9151 return 0; 9152 9153 case Builtin::BI__builtin_fabsf: 9154 return Builtin::BI__builtin_fabs; 9155 case Builtin::BI__builtin_fabs: 9156 return Builtin::BI__builtin_fabsl; 9157 case Builtin::BI__builtin_fabsl: 9158 return 0; 9159 9160 case Builtin::BI__builtin_cabsf: 9161 return Builtin::BI__builtin_cabs; 9162 case Builtin::BI__builtin_cabs: 9163 return Builtin::BI__builtin_cabsl; 9164 case Builtin::BI__builtin_cabsl: 9165 return 0; 9166 9167 case Builtin::BIabs: 9168 return Builtin::BIlabs; 9169 case Builtin::BIlabs: 9170 return Builtin::BIllabs; 9171 case Builtin::BIllabs: 9172 return 0; 9173 9174 case Builtin::BIfabsf: 9175 return Builtin::BIfabs; 9176 case Builtin::BIfabs: 9177 return Builtin::BIfabsl; 9178 case Builtin::BIfabsl: 9179 return 0; 9180 9181 case Builtin::BIcabsf: 9182 return Builtin::BIcabs; 9183 case Builtin::BIcabs: 9184 return Builtin::BIcabsl; 9185 case Builtin::BIcabsl: 9186 return 0; 9187 } 9188 } 9189 9190 // Returns the argument type of the absolute value function. 9191 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9192 unsigned AbsType) { 9193 if (AbsType == 0) 9194 return QualType(); 9195 9196 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9197 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9198 if (Error != ASTContext::GE_None) 9199 return QualType(); 9200 9201 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9202 if (!FT) 9203 return QualType(); 9204 9205 if (FT->getNumParams() != 1) 9206 return QualType(); 9207 9208 return FT->getParamType(0); 9209 } 9210 9211 // Returns the best absolute value function, or zero, based on type and 9212 // current absolute value function. 9213 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9214 unsigned AbsFunctionKind) { 9215 unsigned BestKind = 0; 9216 uint64_t ArgSize = Context.getTypeSize(ArgType); 9217 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9218 Kind = getLargerAbsoluteValueFunction(Kind)) { 9219 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9220 if (Context.getTypeSize(ParamType) >= ArgSize) { 9221 if (BestKind == 0) 9222 BestKind = Kind; 9223 else if (Context.hasSameType(ParamType, ArgType)) { 9224 BestKind = Kind; 9225 break; 9226 } 9227 } 9228 } 9229 return BestKind; 9230 } 9231 9232 enum AbsoluteValueKind { 9233 AVK_Integer, 9234 AVK_Floating, 9235 AVK_Complex 9236 }; 9237 9238 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9239 if (T->isIntegralOrEnumerationType()) 9240 return AVK_Integer; 9241 if (T->isRealFloatingType()) 9242 return AVK_Floating; 9243 if (T->isAnyComplexType()) 9244 return AVK_Complex; 9245 9246 llvm_unreachable("Type not integer, floating, or complex"); 9247 } 9248 9249 // Changes the absolute value function to a different type. Preserves whether 9250 // the function is a builtin. 9251 static unsigned changeAbsFunction(unsigned AbsKind, 9252 AbsoluteValueKind ValueKind) { 9253 switch (ValueKind) { 9254 case AVK_Integer: 9255 switch (AbsKind) { 9256 default: 9257 return 0; 9258 case Builtin::BI__builtin_fabsf: 9259 case Builtin::BI__builtin_fabs: 9260 case Builtin::BI__builtin_fabsl: 9261 case Builtin::BI__builtin_cabsf: 9262 case Builtin::BI__builtin_cabs: 9263 case Builtin::BI__builtin_cabsl: 9264 return Builtin::BI__builtin_abs; 9265 case Builtin::BIfabsf: 9266 case Builtin::BIfabs: 9267 case Builtin::BIfabsl: 9268 case Builtin::BIcabsf: 9269 case Builtin::BIcabs: 9270 case Builtin::BIcabsl: 9271 return Builtin::BIabs; 9272 } 9273 case AVK_Floating: 9274 switch (AbsKind) { 9275 default: 9276 return 0; 9277 case Builtin::BI__builtin_abs: 9278 case Builtin::BI__builtin_labs: 9279 case Builtin::BI__builtin_llabs: 9280 case Builtin::BI__builtin_cabsf: 9281 case Builtin::BI__builtin_cabs: 9282 case Builtin::BI__builtin_cabsl: 9283 return Builtin::BI__builtin_fabsf; 9284 case Builtin::BIabs: 9285 case Builtin::BIlabs: 9286 case Builtin::BIllabs: 9287 case Builtin::BIcabsf: 9288 case Builtin::BIcabs: 9289 case Builtin::BIcabsl: 9290 return Builtin::BIfabsf; 9291 } 9292 case AVK_Complex: 9293 switch (AbsKind) { 9294 default: 9295 return 0; 9296 case Builtin::BI__builtin_abs: 9297 case Builtin::BI__builtin_labs: 9298 case Builtin::BI__builtin_llabs: 9299 case Builtin::BI__builtin_fabsf: 9300 case Builtin::BI__builtin_fabs: 9301 case Builtin::BI__builtin_fabsl: 9302 return Builtin::BI__builtin_cabsf; 9303 case Builtin::BIabs: 9304 case Builtin::BIlabs: 9305 case Builtin::BIllabs: 9306 case Builtin::BIfabsf: 9307 case Builtin::BIfabs: 9308 case Builtin::BIfabsl: 9309 return Builtin::BIcabsf; 9310 } 9311 } 9312 llvm_unreachable("Unable to convert function"); 9313 } 9314 9315 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9316 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9317 if (!FnInfo) 9318 return 0; 9319 9320 switch (FDecl->getBuiltinID()) { 9321 default: 9322 return 0; 9323 case Builtin::BI__builtin_abs: 9324 case Builtin::BI__builtin_fabs: 9325 case Builtin::BI__builtin_fabsf: 9326 case Builtin::BI__builtin_fabsl: 9327 case Builtin::BI__builtin_labs: 9328 case Builtin::BI__builtin_llabs: 9329 case Builtin::BI__builtin_cabs: 9330 case Builtin::BI__builtin_cabsf: 9331 case Builtin::BI__builtin_cabsl: 9332 case Builtin::BIabs: 9333 case Builtin::BIlabs: 9334 case Builtin::BIllabs: 9335 case Builtin::BIfabs: 9336 case Builtin::BIfabsf: 9337 case Builtin::BIfabsl: 9338 case Builtin::BIcabs: 9339 case Builtin::BIcabsf: 9340 case Builtin::BIcabsl: 9341 return FDecl->getBuiltinID(); 9342 } 9343 llvm_unreachable("Unknown Builtin type"); 9344 } 9345 9346 // If the replacement is valid, emit a note with replacement function. 9347 // Additionally, suggest including the proper header if not already included. 9348 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9349 unsigned AbsKind, QualType ArgType) { 9350 bool EmitHeaderHint = true; 9351 const char *HeaderName = nullptr; 9352 const char *FunctionName = nullptr; 9353 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9354 FunctionName = "std::abs"; 9355 if (ArgType->isIntegralOrEnumerationType()) { 9356 HeaderName = "cstdlib"; 9357 } else if (ArgType->isRealFloatingType()) { 9358 HeaderName = "cmath"; 9359 } else { 9360 llvm_unreachable("Invalid Type"); 9361 } 9362 9363 // Lookup all std::abs 9364 if (NamespaceDecl *Std = S.getStdNamespace()) { 9365 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9366 R.suppressDiagnostics(); 9367 S.LookupQualifiedName(R, Std); 9368 9369 for (const auto *I : R) { 9370 const FunctionDecl *FDecl = nullptr; 9371 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9372 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9373 } else { 9374 FDecl = dyn_cast<FunctionDecl>(I); 9375 } 9376 if (!FDecl) 9377 continue; 9378 9379 // Found std::abs(), check that they are the right ones. 9380 if (FDecl->getNumParams() != 1) 9381 continue; 9382 9383 // Check that the parameter type can handle the argument. 9384 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9385 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9386 S.Context.getTypeSize(ArgType) <= 9387 S.Context.getTypeSize(ParamType)) { 9388 // Found a function, don't need the header hint. 9389 EmitHeaderHint = false; 9390 break; 9391 } 9392 } 9393 } 9394 } else { 9395 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9396 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9397 9398 if (HeaderName) { 9399 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9400 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9401 R.suppressDiagnostics(); 9402 S.LookupName(R, S.getCurScope()); 9403 9404 if (R.isSingleResult()) { 9405 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9406 if (FD && FD->getBuiltinID() == AbsKind) { 9407 EmitHeaderHint = false; 9408 } else { 9409 return; 9410 } 9411 } else if (!R.empty()) { 9412 return; 9413 } 9414 } 9415 } 9416 9417 S.Diag(Loc, diag::note_replace_abs_function) 9418 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9419 9420 if (!HeaderName) 9421 return; 9422 9423 if (!EmitHeaderHint) 9424 return; 9425 9426 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9427 << FunctionName; 9428 } 9429 9430 template <std::size_t StrLen> 9431 static bool IsStdFunction(const FunctionDecl *FDecl, 9432 const char (&Str)[StrLen]) { 9433 if (!FDecl) 9434 return false; 9435 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9436 return false; 9437 if (!FDecl->isInStdNamespace()) 9438 return false; 9439 9440 return true; 9441 } 9442 9443 // Warn when using the wrong abs() function. 9444 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9445 const FunctionDecl *FDecl) { 9446 if (Call->getNumArgs() != 1) 9447 return; 9448 9449 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9450 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9451 if (AbsKind == 0 && !IsStdAbs) 9452 return; 9453 9454 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9455 QualType ParamType = Call->getArg(0)->getType(); 9456 9457 // Unsigned types cannot be negative. Suggest removing the absolute value 9458 // function call. 9459 if (ArgType->isUnsignedIntegerType()) { 9460 const char *FunctionName = 9461 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9462 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9463 Diag(Call->getExprLoc(), diag::note_remove_abs) 9464 << FunctionName 9465 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9466 return; 9467 } 9468 9469 // Taking the absolute value of a pointer is very suspicious, they probably 9470 // wanted to index into an array, dereference a pointer, call a function, etc. 9471 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9472 unsigned DiagType = 0; 9473 if (ArgType->isFunctionType()) 9474 DiagType = 1; 9475 else if (ArgType->isArrayType()) 9476 DiagType = 2; 9477 9478 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9479 return; 9480 } 9481 9482 // std::abs has overloads which prevent most of the absolute value problems 9483 // from occurring. 9484 if (IsStdAbs) 9485 return; 9486 9487 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9488 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9489 9490 // The argument and parameter are the same kind. Check if they are the right 9491 // size. 9492 if (ArgValueKind == ParamValueKind) { 9493 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9494 return; 9495 9496 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9497 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9498 << FDecl << ArgType << ParamType; 9499 9500 if (NewAbsKind == 0) 9501 return; 9502 9503 emitReplacement(*this, Call->getExprLoc(), 9504 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9505 return; 9506 } 9507 9508 // ArgValueKind != ParamValueKind 9509 // The wrong type of absolute value function was used. Attempt to find the 9510 // proper one. 9511 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9512 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9513 if (NewAbsKind == 0) 9514 return; 9515 9516 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9517 << FDecl << ParamValueKind << ArgValueKind; 9518 9519 emitReplacement(*this, Call->getExprLoc(), 9520 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9521 } 9522 9523 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9524 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9525 const FunctionDecl *FDecl) { 9526 if (!Call || !FDecl) return; 9527 9528 // Ignore template specializations and macros. 9529 if (inTemplateInstantiation()) return; 9530 if (Call->getExprLoc().isMacroID()) return; 9531 9532 // Only care about the one template argument, two function parameter std::max 9533 if (Call->getNumArgs() != 2) return; 9534 if (!IsStdFunction(FDecl, "max")) return; 9535 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9536 if (!ArgList) return; 9537 if (ArgList->size() != 1) return; 9538 9539 // Check that template type argument is unsigned integer. 9540 const auto& TA = ArgList->get(0); 9541 if (TA.getKind() != TemplateArgument::Type) return; 9542 QualType ArgType = TA.getAsType(); 9543 if (!ArgType->isUnsignedIntegerType()) return; 9544 9545 // See if either argument is a literal zero. 9546 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9547 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9548 if (!MTE) return false; 9549 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9550 if (!Num) return false; 9551 if (Num->getValue() != 0) return false; 9552 return true; 9553 }; 9554 9555 const Expr *FirstArg = Call->getArg(0); 9556 const Expr *SecondArg = Call->getArg(1); 9557 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9558 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9559 9560 // Only warn when exactly one argument is zero. 9561 if (IsFirstArgZero == IsSecondArgZero) return; 9562 9563 SourceRange FirstRange = FirstArg->getSourceRange(); 9564 SourceRange SecondRange = SecondArg->getSourceRange(); 9565 9566 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9567 9568 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9569 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9570 9571 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9572 SourceRange RemovalRange; 9573 if (IsFirstArgZero) { 9574 RemovalRange = SourceRange(FirstRange.getBegin(), 9575 SecondRange.getBegin().getLocWithOffset(-1)); 9576 } else { 9577 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9578 SecondRange.getEnd()); 9579 } 9580 9581 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9582 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9583 << FixItHint::CreateRemoval(RemovalRange); 9584 } 9585 9586 //===--- CHECK: Standard memory functions ---------------------------------===// 9587 9588 /// Takes the expression passed to the size_t parameter of functions 9589 /// such as memcmp, strncat, etc and warns if it's a comparison. 9590 /// 9591 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9592 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9593 IdentifierInfo *FnName, 9594 SourceLocation FnLoc, 9595 SourceLocation RParenLoc) { 9596 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9597 if (!Size) 9598 return false; 9599 9600 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9601 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9602 return false; 9603 9604 SourceRange SizeRange = Size->getSourceRange(); 9605 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9606 << SizeRange << FnName; 9607 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9608 << FnName 9609 << FixItHint::CreateInsertion( 9610 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9611 << FixItHint::CreateRemoval(RParenLoc); 9612 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9613 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9614 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9615 ")"); 9616 9617 return true; 9618 } 9619 9620 /// Determine whether the given type is or contains a dynamic class type 9621 /// (e.g., whether it has a vtable). 9622 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9623 bool &IsContained) { 9624 // Look through array types while ignoring qualifiers. 9625 const Type *Ty = T->getBaseElementTypeUnsafe(); 9626 IsContained = false; 9627 9628 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9629 RD = RD ? RD->getDefinition() : nullptr; 9630 if (!RD || RD->isInvalidDecl()) 9631 return nullptr; 9632 9633 if (RD->isDynamicClass()) 9634 return RD; 9635 9636 // Check all the fields. If any bases were dynamic, the class is dynamic. 9637 // It's impossible for a class to transitively contain itself by value, so 9638 // infinite recursion is impossible. 9639 for (auto *FD : RD->fields()) { 9640 bool SubContained; 9641 if (const CXXRecordDecl *ContainedRD = 9642 getContainedDynamicClass(FD->getType(), SubContained)) { 9643 IsContained = true; 9644 return ContainedRD; 9645 } 9646 } 9647 9648 return nullptr; 9649 } 9650 9651 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9652 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9653 if (Unary->getKind() == UETT_SizeOf) 9654 return Unary; 9655 return nullptr; 9656 } 9657 9658 /// If E is a sizeof expression, returns its argument expression, 9659 /// otherwise returns NULL. 9660 static const Expr *getSizeOfExprArg(const Expr *E) { 9661 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9662 if (!SizeOf->isArgumentType()) 9663 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9664 return nullptr; 9665 } 9666 9667 /// If E is a sizeof expression, returns its argument type. 9668 static QualType getSizeOfArgType(const Expr *E) { 9669 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9670 return SizeOf->getTypeOfArgument(); 9671 return QualType(); 9672 } 9673 9674 namespace { 9675 9676 struct SearchNonTrivialToInitializeField 9677 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9678 using Super = 9679 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9680 9681 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9682 9683 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9684 SourceLocation SL) { 9685 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9686 asDerived().visitArray(PDIK, AT, SL); 9687 return; 9688 } 9689 9690 Super::visitWithKind(PDIK, FT, SL); 9691 } 9692 9693 void visitARCStrong(QualType FT, SourceLocation SL) { 9694 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9695 } 9696 void visitARCWeak(QualType FT, SourceLocation SL) { 9697 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9698 } 9699 void visitStruct(QualType FT, SourceLocation SL) { 9700 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9701 visit(FD->getType(), FD->getLocation()); 9702 } 9703 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9704 const ArrayType *AT, SourceLocation SL) { 9705 visit(getContext().getBaseElementType(AT), SL); 9706 } 9707 void visitTrivial(QualType FT, SourceLocation SL) {} 9708 9709 static void diag(QualType RT, const Expr *E, Sema &S) { 9710 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9711 } 9712 9713 ASTContext &getContext() { return S.getASTContext(); } 9714 9715 const Expr *E; 9716 Sema &S; 9717 }; 9718 9719 struct SearchNonTrivialToCopyField 9720 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9721 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9722 9723 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9724 9725 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9726 SourceLocation SL) { 9727 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9728 asDerived().visitArray(PCK, AT, SL); 9729 return; 9730 } 9731 9732 Super::visitWithKind(PCK, FT, SL); 9733 } 9734 9735 void visitARCStrong(QualType FT, SourceLocation SL) { 9736 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9737 } 9738 void visitARCWeak(QualType FT, SourceLocation SL) { 9739 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9740 } 9741 void visitStruct(QualType FT, SourceLocation SL) { 9742 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9743 visit(FD->getType(), FD->getLocation()); 9744 } 9745 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9746 SourceLocation SL) { 9747 visit(getContext().getBaseElementType(AT), SL); 9748 } 9749 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9750 SourceLocation SL) {} 9751 void visitTrivial(QualType FT, SourceLocation SL) {} 9752 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9753 9754 static void diag(QualType RT, const Expr *E, Sema &S) { 9755 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9756 } 9757 9758 ASTContext &getContext() { return S.getASTContext(); } 9759 9760 const Expr *E; 9761 Sema &S; 9762 }; 9763 9764 } 9765 9766 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9767 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9768 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9769 9770 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9771 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9772 return false; 9773 9774 return doesExprLikelyComputeSize(BO->getLHS()) || 9775 doesExprLikelyComputeSize(BO->getRHS()); 9776 } 9777 9778 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9779 } 9780 9781 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9782 /// 9783 /// \code 9784 /// #define MACRO 0 9785 /// foo(MACRO); 9786 /// foo(0); 9787 /// \endcode 9788 /// 9789 /// This should return true for the first call to foo, but not for the second 9790 /// (regardless of whether foo is a macro or function). 9791 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9792 SourceLocation CallLoc, 9793 SourceLocation ArgLoc) { 9794 if (!CallLoc.isMacroID()) 9795 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9796 9797 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9798 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9799 } 9800 9801 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9802 /// last two arguments transposed. 9803 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9804 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9805 return; 9806 9807 const Expr *SizeArg = 9808 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9809 9810 auto isLiteralZero = [](const Expr *E) { 9811 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9812 }; 9813 9814 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9815 SourceLocation CallLoc = Call->getRParenLoc(); 9816 SourceManager &SM = S.getSourceManager(); 9817 if (isLiteralZero(SizeArg) && 9818 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9819 9820 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9821 9822 // Some platforms #define bzero to __builtin_memset. See if this is the 9823 // case, and if so, emit a better diagnostic. 9824 if (BId == Builtin::BIbzero || 9825 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9826 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9827 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9828 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9829 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9830 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9831 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9832 } 9833 return; 9834 } 9835 9836 // If the second argument to a memset is a sizeof expression and the third 9837 // isn't, this is also likely an error. This should catch 9838 // 'memset(buf, sizeof(buf), 0xff)'. 9839 if (BId == Builtin::BImemset && 9840 doesExprLikelyComputeSize(Call->getArg(1)) && 9841 !doesExprLikelyComputeSize(Call->getArg(2))) { 9842 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9843 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9844 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9845 return; 9846 } 9847 } 9848 9849 /// Check for dangerous or invalid arguments to memset(). 9850 /// 9851 /// This issues warnings on known problematic, dangerous or unspecified 9852 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9853 /// function calls. 9854 /// 9855 /// \param Call The call expression to diagnose. 9856 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9857 unsigned BId, 9858 IdentifierInfo *FnName) { 9859 assert(BId != 0); 9860 9861 // It is possible to have a non-standard definition of memset. Validate 9862 // we have enough arguments, and if not, abort further checking. 9863 unsigned ExpectedNumArgs = 9864 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9865 if (Call->getNumArgs() < ExpectedNumArgs) 9866 return; 9867 9868 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9869 BId == Builtin::BIstrndup ? 1 : 2); 9870 unsigned LenArg = 9871 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9872 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9873 9874 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9875 Call->getBeginLoc(), Call->getRParenLoc())) 9876 return; 9877 9878 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9879 CheckMemaccessSize(*this, BId, Call); 9880 9881 // We have special checking when the length is a sizeof expression. 9882 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9883 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9884 llvm::FoldingSetNodeID SizeOfArgID; 9885 9886 // Although widely used, 'bzero' is not a standard function. Be more strict 9887 // with the argument types before allowing diagnostics and only allow the 9888 // form bzero(ptr, sizeof(...)). 9889 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9890 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9891 return; 9892 9893 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9894 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9895 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9896 9897 QualType DestTy = Dest->getType(); 9898 QualType PointeeTy; 9899 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9900 PointeeTy = DestPtrTy->getPointeeType(); 9901 9902 // Never warn about void type pointers. This can be used to suppress 9903 // false positives. 9904 if (PointeeTy->isVoidType()) 9905 continue; 9906 9907 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9908 // actually comparing the expressions for equality. Because computing the 9909 // expression IDs can be expensive, we only do this if the diagnostic is 9910 // enabled. 9911 if (SizeOfArg && 9912 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9913 SizeOfArg->getExprLoc())) { 9914 // We only compute IDs for expressions if the warning is enabled, and 9915 // cache the sizeof arg's ID. 9916 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9917 SizeOfArg->Profile(SizeOfArgID, Context, true); 9918 llvm::FoldingSetNodeID DestID; 9919 Dest->Profile(DestID, Context, true); 9920 if (DestID == SizeOfArgID) { 9921 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9922 // over sizeof(src) as well. 9923 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9924 StringRef ReadableName = FnName->getName(); 9925 9926 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9927 if (UnaryOp->getOpcode() == UO_AddrOf) 9928 ActionIdx = 1; // If its an address-of operator, just remove it. 9929 if (!PointeeTy->isIncompleteType() && 9930 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9931 ActionIdx = 2; // If the pointee's size is sizeof(char), 9932 // suggest an explicit length. 9933 9934 // If the function is defined as a builtin macro, do not show macro 9935 // expansion. 9936 SourceLocation SL = SizeOfArg->getExprLoc(); 9937 SourceRange DSR = Dest->getSourceRange(); 9938 SourceRange SSR = SizeOfArg->getSourceRange(); 9939 SourceManager &SM = getSourceManager(); 9940 9941 if (SM.isMacroArgExpansion(SL)) { 9942 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9943 SL = SM.getSpellingLoc(SL); 9944 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9945 SM.getSpellingLoc(DSR.getEnd())); 9946 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9947 SM.getSpellingLoc(SSR.getEnd())); 9948 } 9949 9950 DiagRuntimeBehavior(SL, SizeOfArg, 9951 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9952 << ReadableName 9953 << PointeeTy 9954 << DestTy 9955 << DSR 9956 << SSR); 9957 DiagRuntimeBehavior(SL, SizeOfArg, 9958 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9959 << ActionIdx 9960 << SSR); 9961 9962 break; 9963 } 9964 } 9965 9966 // Also check for cases where the sizeof argument is the exact same 9967 // type as the memory argument, and where it points to a user-defined 9968 // record type. 9969 if (SizeOfArgTy != QualType()) { 9970 if (PointeeTy->isRecordType() && 9971 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9972 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9973 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9974 << FnName << SizeOfArgTy << ArgIdx 9975 << PointeeTy << Dest->getSourceRange() 9976 << LenExpr->getSourceRange()); 9977 break; 9978 } 9979 } 9980 } else if (DestTy->isArrayType()) { 9981 PointeeTy = DestTy; 9982 } 9983 9984 if (PointeeTy == QualType()) 9985 continue; 9986 9987 // Always complain about dynamic classes. 9988 bool IsContained; 9989 if (const CXXRecordDecl *ContainedRD = 9990 getContainedDynamicClass(PointeeTy, IsContained)) { 9991 9992 unsigned OperationType = 0; 9993 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9994 // "overwritten" if we're warning about the destination for any call 9995 // but memcmp; otherwise a verb appropriate to the call. 9996 if (ArgIdx != 0 || IsCmp) { 9997 if (BId == Builtin::BImemcpy) 9998 OperationType = 1; 9999 else if(BId == Builtin::BImemmove) 10000 OperationType = 2; 10001 else if (IsCmp) 10002 OperationType = 3; 10003 } 10004 10005 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10006 PDiag(diag::warn_dyn_class_memaccess) 10007 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10008 << IsContained << ContainedRD << OperationType 10009 << Call->getCallee()->getSourceRange()); 10010 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10011 BId != Builtin::BImemset) 10012 DiagRuntimeBehavior( 10013 Dest->getExprLoc(), Dest, 10014 PDiag(diag::warn_arc_object_memaccess) 10015 << ArgIdx << FnName << PointeeTy 10016 << Call->getCallee()->getSourceRange()); 10017 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10018 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10019 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10020 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10021 PDiag(diag::warn_cstruct_memaccess) 10022 << ArgIdx << FnName << PointeeTy << 0); 10023 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10024 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10025 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10026 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10027 PDiag(diag::warn_cstruct_memaccess) 10028 << ArgIdx << FnName << PointeeTy << 1); 10029 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10030 } else { 10031 continue; 10032 } 10033 } else 10034 continue; 10035 10036 DiagRuntimeBehavior( 10037 Dest->getExprLoc(), Dest, 10038 PDiag(diag::note_bad_memaccess_silence) 10039 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10040 break; 10041 } 10042 } 10043 10044 // A little helper routine: ignore addition and subtraction of integer literals. 10045 // This intentionally does not ignore all integer constant expressions because 10046 // we don't want to remove sizeof(). 10047 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10048 Ex = Ex->IgnoreParenCasts(); 10049 10050 while (true) { 10051 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10052 if (!BO || !BO->isAdditiveOp()) 10053 break; 10054 10055 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10056 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10057 10058 if (isa<IntegerLiteral>(RHS)) 10059 Ex = LHS; 10060 else if (isa<IntegerLiteral>(LHS)) 10061 Ex = RHS; 10062 else 10063 break; 10064 } 10065 10066 return Ex; 10067 } 10068 10069 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10070 ASTContext &Context) { 10071 // Only handle constant-sized or VLAs, but not flexible members. 10072 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 10073 // Only issue the FIXIT for arrays of size > 1. 10074 if (CAT->getSize().getSExtValue() <= 1) 10075 return false; 10076 } else if (!Ty->isVariableArrayType()) { 10077 return false; 10078 } 10079 return true; 10080 } 10081 10082 // Warn if the user has made the 'size' argument to strlcpy or strlcat 10083 // be the size of the source, instead of the destination. 10084 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 10085 IdentifierInfo *FnName) { 10086 10087 // Don't crash if the user has the wrong number of arguments 10088 unsigned NumArgs = Call->getNumArgs(); 10089 if ((NumArgs != 3) && (NumArgs != 4)) 10090 return; 10091 10092 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 10093 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 10094 const Expr *CompareWithSrc = nullptr; 10095 10096 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 10097 Call->getBeginLoc(), Call->getRParenLoc())) 10098 return; 10099 10100 // Look for 'strlcpy(dst, x, sizeof(x))' 10101 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 10102 CompareWithSrc = Ex; 10103 else { 10104 // Look for 'strlcpy(dst, x, strlen(x))' 10105 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 10106 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 10107 SizeCall->getNumArgs() == 1) 10108 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 10109 } 10110 } 10111 10112 if (!CompareWithSrc) 10113 return; 10114 10115 // Determine if the argument to sizeof/strlen is equal to the source 10116 // argument. In principle there's all kinds of things you could do 10117 // here, for instance creating an == expression and evaluating it with 10118 // EvaluateAsBooleanCondition, but this uses a more direct technique: 10119 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 10120 if (!SrcArgDRE) 10121 return; 10122 10123 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 10124 if (!CompareWithSrcDRE || 10125 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 10126 return; 10127 10128 const Expr *OriginalSizeArg = Call->getArg(2); 10129 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 10130 << OriginalSizeArg->getSourceRange() << FnName; 10131 10132 // Output a FIXIT hint if the destination is an array (rather than a 10133 // pointer to an array). This could be enhanced to handle some 10134 // pointers if we know the actual size, like if DstArg is 'array+2' 10135 // we could say 'sizeof(array)-2'. 10136 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 10137 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 10138 return; 10139 10140 SmallString<128> sizeString; 10141 llvm::raw_svector_ostream OS(sizeString); 10142 OS << "sizeof("; 10143 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10144 OS << ")"; 10145 10146 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 10147 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 10148 OS.str()); 10149 } 10150 10151 /// Check if two expressions refer to the same declaration. 10152 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10153 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10154 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10155 return D1->getDecl() == D2->getDecl(); 10156 return false; 10157 } 10158 10159 static const Expr *getStrlenExprArg(const Expr *E) { 10160 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10161 const FunctionDecl *FD = CE->getDirectCallee(); 10162 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10163 return nullptr; 10164 return CE->getArg(0)->IgnoreParenCasts(); 10165 } 10166 return nullptr; 10167 } 10168 10169 // Warn on anti-patterns as the 'size' argument to strncat. 10170 // The correct size argument should look like following: 10171 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10172 void Sema::CheckStrncatArguments(const CallExpr *CE, 10173 IdentifierInfo *FnName) { 10174 // Don't crash if the user has the wrong number of arguments. 10175 if (CE->getNumArgs() < 3) 10176 return; 10177 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10178 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10179 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10180 10181 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10182 CE->getRParenLoc())) 10183 return; 10184 10185 // Identify common expressions, which are wrongly used as the size argument 10186 // to strncat and may lead to buffer overflows. 10187 unsigned PatternType = 0; 10188 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10189 // - sizeof(dst) 10190 if (referToTheSameDecl(SizeOfArg, DstArg)) 10191 PatternType = 1; 10192 // - sizeof(src) 10193 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10194 PatternType = 2; 10195 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10196 if (BE->getOpcode() == BO_Sub) { 10197 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10198 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10199 // - sizeof(dst) - strlen(dst) 10200 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10201 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10202 PatternType = 1; 10203 // - sizeof(src) - (anything) 10204 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10205 PatternType = 2; 10206 } 10207 } 10208 10209 if (PatternType == 0) 10210 return; 10211 10212 // Generate the diagnostic. 10213 SourceLocation SL = LenArg->getBeginLoc(); 10214 SourceRange SR = LenArg->getSourceRange(); 10215 SourceManager &SM = getSourceManager(); 10216 10217 // If the function is defined as a builtin macro, do not show macro expansion. 10218 if (SM.isMacroArgExpansion(SL)) { 10219 SL = SM.getSpellingLoc(SL); 10220 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10221 SM.getSpellingLoc(SR.getEnd())); 10222 } 10223 10224 // Check if the destination is an array (rather than a pointer to an array). 10225 QualType DstTy = DstArg->getType(); 10226 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10227 Context); 10228 if (!isKnownSizeArray) { 10229 if (PatternType == 1) 10230 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10231 else 10232 Diag(SL, diag::warn_strncat_src_size) << SR; 10233 return; 10234 } 10235 10236 if (PatternType == 1) 10237 Diag(SL, diag::warn_strncat_large_size) << SR; 10238 else 10239 Diag(SL, diag::warn_strncat_src_size) << SR; 10240 10241 SmallString<128> sizeString; 10242 llvm::raw_svector_ostream OS(sizeString); 10243 OS << "sizeof("; 10244 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10245 OS << ") - "; 10246 OS << "strlen("; 10247 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10248 OS << ") - 1"; 10249 10250 Diag(SL, diag::note_strncat_wrong_size) 10251 << FixItHint::CreateReplacement(SR, OS.str()); 10252 } 10253 10254 namespace { 10255 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10256 const UnaryOperator *UnaryExpr, 10257 const VarDecl *Var) { 10258 StorageClass Class = Var->getStorageClass(); 10259 if (Class == StorageClass::SC_Extern || 10260 Class == StorageClass::SC_PrivateExtern || 10261 Var->getType()->isReferenceType()) 10262 return; 10263 10264 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10265 << CalleeName << Var; 10266 } 10267 10268 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10269 const UnaryOperator *UnaryExpr, const Decl *D) { 10270 if (const auto *Field = dyn_cast<FieldDecl>(D)) 10271 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10272 << CalleeName << Field; 10273 } 10274 10275 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 10276 const UnaryOperator *UnaryExpr) { 10277 if (UnaryExpr->getOpcode() != UnaryOperator::Opcode::UO_AddrOf) 10278 return; 10279 10280 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) 10281 if (const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl())) 10282 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, Var); 10283 10284 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 10285 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 10286 Lvalue->getMemberDecl()); 10287 } 10288 10289 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 10290 const DeclRefExpr *Lvalue) { 10291 if (!Lvalue->getType()->isArrayType()) 10292 return; 10293 10294 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 10295 if (Var == nullptr) 10296 return; 10297 10298 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 10299 << CalleeName << Var; 10300 } 10301 } // namespace 10302 10303 /// Alerts the user that they are attempting to free a non-malloc'd object. 10304 void Sema::CheckFreeArguments(const CallExpr *E) { 10305 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 10306 const std::string CalleeName = 10307 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 10308 10309 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 10310 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 10311 10312 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 10313 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 10314 } 10315 10316 void 10317 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 10318 SourceLocation ReturnLoc, 10319 bool isObjCMethod, 10320 const AttrVec *Attrs, 10321 const FunctionDecl *FD) { 10322 // Check if the return value is null but should not be. 10323 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10324 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10325 CheckNonNullExpr(*this, RetValExp)) 10326 Diag(ReturnLoc, diag::warn_null_ret) 10327 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10328 10329 // C++11 [basic.stc.dynamic.allocation]p4: 10330 // If an allocation function declared with a non-throwing 10331 // exception-specification fails to allocate storage, it shall return 10332 // a null pointer. Any other allocation function that fails to allocate 10333 // storage shall indicate failure only by throwing an exception [...] 10334 if (FD) { 10335 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10336 if (Op == OO_New || Op == OO_Array_New) { 10337 const FunctionProtoType *Proto 10338 = FD->getType()->castAs<FunctionProtoType>(); 10339 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10340 CheckNonNullExpr(*this, RetValExp)) 10341 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10342 << FD << getLangOpts().CPlusPlus11; 10343 } 10344 } 10345 10346 // PPC MMA non-pointer types are not allowed as return type. Checking the type 10347 // here prevent the user from using a PPC MMA type as trailing return type. 10348 if (Context.getTargetInfo().getTriple().isPPC64()) 10349 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 10350 } 10351 10352 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10353 10354 /// Check for comparisons of floating point operands using != and ==. 10355 /// Issue a warning if these are no self-comparisons, as they are not likely 10356 /// to do what the programmer intended. 10357 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10358 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10359 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10360 10361 // Special case: check for x == x (which is OK). 10362 // Do not emit warnings for such cases. 10363 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10364 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10365 if (DRL->getDecl() == DRR->getDecl()) 10366 return; 10367 10368 // Special case: check for comparisons against literals that can be exactly 10369 // represented by APFloat. In such cases, do not emit a warning. This 10370 // is a heuristic: often comparison against such literals are used to 10371 // detect if a value in a variable has not changed. This clearly can 10372 // lead to false negatives. 10373 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10374 if (FLL->isExact()) 10375 return; 10376 } else 10377 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10378 if (FLR->isExact()) 10379 return; 10380 10381 // Check for comparisons with builtin types. 10382 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10383 if (CL->getBuiltinCallee()) 10384 return; 10385 10386 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10387 if (CR->getBuiltinCallee()) 10388 return; 10389 10390 // Emit the diagnostic. 10391 Diag(Loc, diag::warn_floatingpoint_eq) 10392 << LHS->getSourceRange() << RHS->getSourceRange(); 10393 } 10394 10395 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10396 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10397 10398 namespace { 10399 10400 /// Structure recording the 'active' range of an integer-valued 10401 /// expression. 10402 struct IntRange { 10403 /// The number of bits active in the int. Note that this includes exactly one 10404 /// sign bit if !NonNegative. 10405 unsigned Width; 10406 10407 /// True if the int is known not to have negative values. If so, all leading 10408 /// bits before Width are known zero, otherwise they are known to be the 10409 /// same as the MSB within Width. 10410 bool NonNegative; 10411 10412 IntRange(unsigned Width, bool NonNegative) 10413 : Width(Width), NonNegative(NonNegative) {} 10414 10415 /// Number of bits excluding the sign bit. 10416 unsigned valueBits() const { 10417 return NonNegative ? Width : Width - 1; 10418 } 10419 10420 /// Returns the range of the bool type. 10421 static IntRange forBoolType() { 10422 return IntRange(1, true); 10423 } 10424 10425 /// Returns the range of an opaque value of the given integral type. 10426 static IntRange forValueOfType(ASTContext &C, QualType T) { 10427 return forValueOfCanonicalType(C, 10428 T->getCanonicalTypeInternal().getTypePtr()); 10429 } 10430 10431 /// Returns the range of an opaque value of a canonical integral type. 10432 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10433 assert(T->isCanonicalUnqualified()); 10434 10435 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10436 T = VT->getElementType().getTypePtr(); 10437 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10438 T = CT->getElementType().getTypePtr(); 10439 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10440 T = AT->getValueType().getTypePtr(); 10441 10442 if (!C.getLangOpts().CPlusPlus) { 10443 // For enum types in C code, use the underlying datatype. 10444 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10445 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10446 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10447 // For enum types in C++, use the known bit width of the enumerators. 10448 EnumDecl *Enum = ET->getDecl(); 10449 // In C++11, enums can have a fixed underlying type. Use this type to 10450 // compute the range. 10451 if (Enum->isFixed()) { 10452 return IntRange(C.getIntWidth(QualType(T, 0)), 10453 !ET->isSignedIntegerOrEnumerationType()); 10454 } 10455 10456 unsigned NumPositive = Enum->getNumPositiveBits(); 10457 unsigned NumNegative = Enum->getNumNegativeBits(); 10458 10459 if (NumNegative == 0) 10460 return IntRange(NumPositive, true/*NonNegative*/); 10461 else 10462 return IntRange(std::max(NumPositive + 1, NumNegative), 10463 false/*NonNegative*/); 10464 } 10465 10466 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10467 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10468 10469 const BuiltinType *BT = cast<BuiltinType>(T); 10470 assert(BT->isInteger()); 10471 10472 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10473 } 10474 10475 /// Returns the "target" range of a canonical integral type, i.e. 10476 /// the range of values expressible in the type. 10477 /// 10478 /// This matches forValueOfCanonicalType except that enums have the 10479 /// full range of their type, not the range of their enumerators. 10480 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10481 assert(T->isCanonicalUnqualified()); 10482 10483 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10484 T = VT->getElementType().getTypePtr(); 10485 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10486 T = CT->getElementType().getTypePtr(); 10487 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10488 T = AT->getValueType().getTypePtr(); 10489 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10490 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10491 10492 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10493 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10494 10495 const BuiltinType *BT = cast<BuiltinType>(T); 10496 assert(BT->isInteger()); 10497 10498 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10499 } 10500 10501 /// Returns the supremum of two ranges: i.e. their conservative merge. 10502 static IntRange join(IntRange L, IntRange R) { 10503 bool Unsigned = L.NonNegative && R.NonNegative; 10504 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 10505 L.NonNegative && R.NonNegative); 10506 } 10507 10508 /// Return the range of a bitwise-AND of the two ranges. 10509 static IntRange bit_and(IntRange L, IntRange R) { 10510 unsigned Bits = std::max(L.Width, R.Width); 10511 bool NonNegative = false; 10512 if (L.NonNegative) { 10513 Bits = std::min(Bits, L.Width); 10514 NonNegative = true; 10515 } 10516 if (R.NonNegative) { 10517 Bits = std::min(Bits, R.Width); 10518 NonNegative = true; 10519 } 10520 return IntRange(Bits, NonNegative); 10521 } 10522 10523 /// Return the range of a sum of the two ranges. 10524 static IntRange sum(IntRange L, IntRange R) { 10525 bool Unsigned = L.NonNegative && R.NonNegative; 10526 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 10527 Unsigned); 10528 } 10529 10530 /// Return the range of a difference of the two ranges. 10531 static IntRange difference(IntRange L, IntRange R) { 10532 // We need a 1-bit-wider range if: 10533 // 1) LHS can be negative: least value can be reduced. 10534 // 2) RHS can be negative: greatest value can be increased. 10535 bool CanWiden = !L.NonNegative || !R.NonNegative; 10536 bool Unsigned = L.NonNegative && R.Width == 0; 10537 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 10538 !Unsigned, 10539 Unsigned); 10540 } 10541 10542 /// Return the range of a product of the two ranges. 10543 static IntRange product(IntRange L, IntRange R) { 10544 // If both LHS and RHS can be negative, we can form 10545 // -2^L * -2^R = 2^(L + R) 10546 // which requires L + R + 1 value bits to represent. 10547 bool CanWiden = !L.NonNegative && !R.NonNegative; 10548 bool Unsigned = L.NonNegative && R.NonNegative; 10549 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 10550 Unsigned); 10551 } 10552 10553 /// Return the range of a remainder operation between the two ranges. 10554 static IntRange rem(IntRange L, IntRange R) { 10555 // The result of a remainder can't be larger than the result of 10556 // either side. The sign of the result is the sign of the LHS. 10557 bool Unsigned = L.NonNegative; 10558 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 10559 Unsigned); 10560 } 10561 }; 10562 10563 } // namespace 10564 10565 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10566 unsigned MaxWidth) { 10567 if (value.isSigned() && value.isNegative()) 10568 return IntRange(value.getMinSignedBits(), false); 10569 10570 if (value.getBitWidth() > MaxWidth) 10571 value = value.trunc(MaxWidth); 10572 10573 // isNonNegative() just checks the sign bit without considering 10574 // signedness. 10575 return IntRange(value.getActiveBits(), true); 10576 } 10577 10578 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10579 unsigned MaxWidth) { 10580 if (result.isInt()) 10581 return GetValueRange(C, result.getInt(), MaxWidth); 10582 10583 if (result.isVector()) { 10584 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10585 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10586 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10587 R = IntRange::join(R, El); 10588 } 10589 return R; 10590 } 10591 10592 if (result.isComplexInt()) { 10593 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10594 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10595 return IntRange::join(R, I); 10596 } 10597 10598 // This can happen with lossless casts to intptr_t of "based" lvalues. 10599 // Assume it might use arbitrary bits. 10600 // FIXME: The only reason we need to pass the type in here is to get 10601 // the sign right on this one case. It would be nice if APValue 10602 // preserved this. 10603 assert(result.isLValue() || result.isAddrLabelDiff()); 10604 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10605 } 10606 10607 static QualType GetExprType(const Expr *E) { 10608 QualType Ty = E->getType(); 10609 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10610 Ty = AtomicRHS->getValueType(); 10611 return Ty; 10612 } 10613 10614 /// Pseudo-evaluate the given integer expression, estimating the 10615 /// range of values it might take. 10616 /// 10617 /// \param MaxWidth The width to which the value will be truncated. 10618 /// \param Approximate If \c true, return a likely range for the result: in 10619 /// particular, assume that aritmetic on narrower types doesn't leave 10620 /// those types. If \c false, return a range including all possible 10621 /// result values. 10622 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10623 bool InConstantContext, bool Approximate) { 10624 E = E->IgnoreParens(); 10625 10626 // Try a full evaluation first. 10627 Expr::EvalResult result; 10628 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10629 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10630 10631 // I think we only want to look through implicit casts here; if the 10632 // user has an explicit widening cast, we should treat the value as 10633 // being of the new, wider type. 10634 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10635 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10636 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 10637 Approximate); 10638 10639 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10640 10641 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10642 CE->getCastKind() == CK_BooleanToSignedIntegral; 10643 10644 // Assume that non-integer casts can span the full range of the type. 10645 if (!isIntegerCast) 10646 return OutputTypeRange; 10647 10648 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10649 std::min(MaxWidth, OutputTypeRange.Width), 10650 InConstantContext, Approximate); 10651 10652 // Bail out if the subexpr's range is as wide as the cast type. 10653 if (SubRange.Width >= OutputTypeRange.Width) 10654 return OutputTypeRange; 10655 10656 // Otherwise, we take the smaller width, and we're non-negative if 10657 // either the output type or the subexpr is. 10658 return IntRange(SubRange.Width, 10659 SubRange.NonNegative || OutputTypeRange.NonNegative); 10660 } 10661 10662 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10663 // If we can fold the condition, just take that operand. 10664 bool CondResult; 10665 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10666 return GetExprRange(C, 10667 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10668 MaxWidth, InConstantContext, Approximate); 10669 10670 // Otherwise, conservatively merge. 10671 // GetExprRange requires an integer expression, but a throw expression 10672 // results in a void type. 10673 Expr *E = CO->getTrueExpr(); 10674 IntRange L = E->getType()->isVoidType() 10675 ? IntRange{0, true} 10676 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10677 E = CO->getFalseExpr(); 10678 IntRange R = E->getType()->isVoidType() 10679 ? IntRange{0, true} 10680 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10681 return IntRange::join(L, R); 10682 } 10683 10684 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10685 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 10686 10687 switch (BO->getOpcode()) { 10688 case BO_Cmp: 10689 llvm_unreachable("builtin <=> should have class type"); 10690 10691 // Boolean-valued operations are single-bit and positive. 10692 case BO_LAnd: 10693 case BO_LOr: 10694 case BO_LT: 10695 case BO_GT: 10696 case BO_LE: 10697 case BO_GE: 10698 case BO_EQ: 10699 case BO_NE: 10700 return IntRange::forBoolType(); 10701 10702 // The type of the assignments is the type of the LHS, so the RHS 10703 // is not necessarily the same type. 10704 case BO_MulAssign: 10705 case BO_DivAssign: 10706 case BO_RemAssign: 10707 case BO_AddAssign: 10708 case BO_SubAssign: 10709 case BO_XorAssign: 10710 case BO_OrAssign: 10711 // TODO: bitfields? 10712 return IntRange::forValueOfType(C, GetExprType(E)); 10713 10714 // Simple assignments just pass through the RHS, which will have 10715 // been coerced to the LHS type. 10716 case BO_Assign: 10717 // TODO: bitfields? 10718 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10719 Approximate); 10720 10721 // Operations with opaque sources are black-listed. 10722 case BO_PtrMemD: 10723 case BO_PtrMemI: 10724 return IntRange::forValueOfType(C, GetExprType(E)); 10725 10726 // Bitwise-and uses the *infinum* of the two source ranges. 10727 case BO_And: 10728 case BO_AndAssign: 10729 Combine = IntRange::bit_and; 10730 break; 10731 10732 // Left shift gets black-listed based on a judgement call. 10733 case BO_Shl: 10734 // ...except that we want to treat '1 << (blah)' as logically 10735 // positive. It's an important idiom. 10736 if (IntegerLiteral *I 10737 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10738 if (I->getValue() == 1) { 10739 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10740 return IntRange(R.Width, /*NonNegative*/ true); 10741 } 10742 } 10743 LLVM_FALLTHROUGH; 10744 10745 case BO_ShlAssign: 10746 return IntRange::forValueOfType(C, GetExprType(E)); 10747 10748 // Right shift by a constant can narrow its left argument. 10749 case BO_Shr: 10750 case BO_ShrAssign: { 10751 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 10752 Approximate); 10753 10754 // If the shift amount is a positive constant, drop the width by 10755 // that much. 10756 if (Optional<llvm::APSInt> shift = 10757 BO->getRHS()->getIntegerConstantExpr(C)) { 10758 if (shift->isNonNegative()) { 10759 unsigned zext = shift->getZExtValue(); 10760 if (zext >= L.Width) 10761 L.Width = (L.NonNegative ? 0 : 1); 10762 else 10763 L.Width -= zext; 10764 } 10765 } 10766 10767 return L; 10768 } 10769 10770 // Comma acts as its right operand. 10771 case BO_Comma: 10772 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10773 Approximate); 10774 10775 case BO_Add: 10776 if (!Approximate) 10777 Combine = IntRange::sum; 10778 break; 10779 10780 case BO_Sub: 10781 if (BO->getLHS()->getType()->isPointerType()) 10782 return IntRange::forValueOfType(C, GetExprType(E)); 10783 if (!Approximate) 10784 Combine = IntRange::difference; 10785 break; 10786 10787 case BO_Mul: 10788 if (!Approximate) 10789 Combine = IntRange::product; 10790 break; 10791 10792 // The width of a division result is mostly determined by the size 10793 // of the LHS. 10794 case BO_Div: { 10795 // Don't 'pre-truncate' the operands. 10796 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10797 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 10798 Approximate); 10799 10800 // If the divisor is constant, use that. 10801 if (Optional<llvm::APSInt> divisor = 10802 BO->getRHS()->getIntegerConstantExpr(C)) { 10803 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 10804 if (log2 >= L.Width) 10805 L.Width = (L.NonNegative ? 0 : 1); 10806 else 10807 L.Width = std::min(L.Width - log2, MaxWidth); 10808 return L; 10809 } 10810 10811 // Otherwise, just use the LHS's width. 10812 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 10813 // could be -1. 10814 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 10815 Approximate); 10816 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10817 } 10818 10819 case BO_Rem: 10820 Combine = IntRange::rem; 10821 break; 10822 10823 // The default behavior is okay for these. 10824 case BO_Xor: 10825 case BO_Or: 10826 break; 10827 } 10828 10829 // Combine the two ranges, but limit the result to the type in which we 10830 // performed the computation. 10831 QualType T = GetExprType(E); 10832 unsigned opWidth = C.getIntWidth(T); 10833 IntRange L = 10834 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 10835 IntRange R = 10836 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 10837 IntRange C = Combine(L, R); 10838 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 10839 C.Width = std::min(C.Width, MaxWidth); 10840 return C; 10841 } 10842 10843 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10844 switch (UO->getOpcode()) { 10845 // Boolean-valued operations are white-listed. 10846 case UO_LNot: 10847 return IntRange::forBoolType(); 10848 10849 // Operations with opaque sources are black-listed. 10850 case UO_Deref: 10851 case UO_AddrOf: // should be impossible 10852 return IntRange::forValueOfType(C, GetExprType(E)); 10853 10854 default: 10855 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 10856 Approximate); 10857 } 10858 } 10859 10860 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10861 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 10862 Approximate); 10863 10864 if (const auto *BitField = E->getSourceBitField()) 10865 return IntRange(BitField->getBitWidthValue(C), 10866 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10867 10868 return IntRange::forValueOfType(C, GetExprType(E)); 10869 } 10870 10871 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10872 bool InConstantContext, bool Approximate) { 10873 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 10874 Approximate); 10875 } 10876 10877 /// Checks whether the given value, which currently has the given 10878 /// source semantics, has the same value when coerced through the 10879 /// target semantics. 10880 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10881 const llvm::fltSemantics &Src, 10882 const llvm::fltSemantics &Tgt) { 10883 llvm::APFloat truncated = value; 10884 10885 bool ignored; 10886 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10887 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10888 10889 return truncated.bitwiseIsEqual(value); 10890 } 10891 10892 /// Checks whether the given value, which currently has the given 10893 /// source semantics, has the same value when coerced through the 10894 /// target semantics. 10895 /// 10896 /// The value might be a vector of floats (or a complex number). 10897 static bool IsSameFloatAfterCast(const APValue &value, 10898 const llvm::fltSemantics &Src, 10899 const llvm::fltSemantics &Tgt) { 10900 if (value.isFloat()) 10901 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10902 10903 if (value.isVector()) { 10904 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10905 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10906 return false; 10907 return true; 10908 } 10909 10910 assert(value.isComplexFloat()); 10911 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10912 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10913 } 10914 10915 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10916 bool IsListInit = false); 10917 10918 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10919 // Suppress cases where we are comparing against an enum constant. 10920 if (const DeclRefExpr *DR = 10921 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10922 if (isa<EnumConstantDecl>(DR->getDecl())) 10923 return true; 10924 10925 // Suppress cases where the value is expanded from a macro, unless that macro 10926 // is how a language represents a boolean literal. This is the case in both C 10927 // and Objective-C. 10928 SourceLocation BeginLoc = E->getBeginLoc(); 10929 if (BeginLoc.isMacroID()) { 10930 StringRef MacroName = Lexer::getImmediateMacroName( 10931 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10932 return MacroName != "YES" && MacroName != "NO" && 10933 MacroName != "true" && MacroName != "false"; 10934 } 10935 10936 return false; 10937 } 10938 10939 static bool isKnownToHaveUnsignedValue(Expr *E) { 10940 return E->getType()->isIntegerType() && 10941 (!E->getType()->isSignedIntegerType() || 10942 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10943 } 10944 10945 namespace { 10946 /// The promoted range of values of a type. In general this has the 10947 /// following structure: 10948 /// 10949 /// |-----------| . . . |-----------| 10950 /// ^ ^ ^ ^ 10951 /// Min HoleMin HoleMax Max 10952 /// 10953 /// ... where there is only a hole if a signed type is promoted to unsigned 10954 /// (in which case Min and Max are the smallest and largest representable 10955 /// values). 10956 struct PromotedRange { 10957 // Min, or HoleMax if there is a hole. 10958 llvm::APSInt PromotedMin; 10959 // Max, or HoleMin if there is a hole. 10960 llvm::APSInt PromotedMax; 10961 10962 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10963 if (R.Width == 0) 10964 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10965 else if (R.Width >= BitWidth && !Unsigned) { 10966 // Promotion made the type *narrower*. This happens when promoting 10967 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10968 // Treat all values of 'signed int' as being in range for now. 10969 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10970 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10971 } else { 10972 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10973 .extOrTrunc(BitWidth); 10974 PromotedMin.setIsUnsigned(Unsigned); 10975 10976 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10977 .extOrTrunc(BitWidth); 10978 PromotedMax.setIsUnsigned(Unsigned); 10979 } 10980 } 10981 10982 // Determine whether this range is contiguous (has no hole). 10983 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10984 10985 // Where a constant value is within the range. 10986 enum ComparisonResult { 10987 LT = 0x1, 10988 LE = 0x2, 10989 GT = 0x4, 10990 GE = 0x8, 10991 EQ = 0x10, 10992 NE = 0x20, 10993 InRangeFlag = 0x40, 10994 10995 Less = LE | LT | NE, 10996 Min = LE | InRangeFlag, 10997 InRange = InRangeFlag, 10998 Max = GE | InRangeFlag, 10999 Greater = GE | GT | NE, 11000 11001 OnlyValue = LE | GE | EQ | InRangeFlag, 11002 InHole = NE 11003 }; 11004 11005 ComparisonResult compare(const llvm::APSInt &Value) const { 11006 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11007 Value.isUnsigned() == PromotedMin.isUnsigned()); 11008 if (!isContiguous()) { 11009 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11010 if (Value.isMinValue()) return Min; 11011 if (Value.isMaxValue()) return Max; 11012 if (Value >= PromotedMin) return InRange; 11013 if (Value <= PromotedMax) return InRange; 11014 return InHole; 11015 } 11016 11017 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11018 case -1: return Less; 11019 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 11020 case 1: 11021 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 11022 case -1: return InRange; 11023 case 0: return Max; 11024 case 1: return Greater; 11025 } 11026 } 11027 11028 llvm_unreachable("impossible compare result"); 11029 } 11030 11031 static llvm::Optional<StringRef> 11032 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 11033 if (Op == BO_Cmp) { 11034 ComparisonResult LTFlag = LT, GTFlag = GT; 11035 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 11036 11037 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 11038 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 11039 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 11040 return llvm::None; 11041 } 11042 11043 ComparisonResult TrueFlag, FalseFlag; 11044 if (Op == BO_EQ) { 11045 TrueFlag = EQ; 11046 FalseFlag = NE; 11047 } else if (Op == BO_NE) { 11048 TrueFlag = NE; 11049 FalseFlag = EQ; 11050 } else { 11051 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 11052 TrueFlag = LT; 11053 FalseFlag = GE; 11054 } else { 11055 TrueFlag = GT; 11056 FalseFlag = LE; 11057 } 11058 if (Op == BO_GE || Op == BO_LE) 11059 std::swap(TrueFlag, FalseFlag); 11060 } 11061 if (R & TrueFlag) 11062 return StringRef("true"); 11063 if (R & FalseFlag) 11064 return StringRef("false"); 11065 return llvm::None; 11066 } 11067 }; 11068 } 11069 11070 static bool HasEnumType(Expr *E) { 11071 // Strip off implicit integral promotions. 11072 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11073 if (ICE->getCastKind() != CK_IntegralCast && 11074 ICE->getCastKind() != CK_NoOp) 11075 break; 11076 E = ICE->getSubExpr(); 11077 } 11078 11079 return E->getType()->isEnumeralType(); 11080 } 11081 11082 static int classifyConstantValue(Expr *Constant) { 11083 // The values of this enumeration are used in the diagnostics 11084 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 11085 enum ConstantValueKind { 11086 Miscellaneous = 0, 11087 LiteralTrue, 11088 LiteralFalse 11089 }; 11090 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 11091 return BL->getValue() ? ConstantValueKind::LiteralTrue 11092 : ConstantValueKind::LiteralFalse; 11093 return ConstantValueKind::Miscellaneous; 11094 } 11095 11096 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 11097 Expr *Constant, Expr *Other, 11098 const llvm::APSInt &Value, 11099 bool RhsConstant) { 11100 if (S.inTemplateInstantiation()) 11101 return false; 11102 11103 Expr *OriginalOther = Other; 11104 11105 Constant = Constant->IgnoreParenImpCasts(); 11106 Other = Other->IgnoreParenImpCasts(); 11107 11108 // Suppress warnings on tautological comparisons between values of the same 11109 // enumeration type. There are only two ways we could warn on this: 11110 // - If the constant is outside the range of representable values of 11111 // the enumeration. In such a case, we should warn about the cast 11112 // to enumeration type, not about the comparison. 11113 // - If the constant is the maximum / minimum in-range value. For an 11114 // enumeratin type, such comparisons can be meaningful and useful. 11115 if (Constant->getType()->isEnumeralType() && 11116 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 11117 return false; 11118 11119 IntRange OtherValueRange = GetExprRange( 11120 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 11121 11122 QualType OtherT = Other->getType(); 11123 if (const auto *AT = OtherT->getAs<AtomicType>()) 11124 OtherT = AT->getValueType(); 11125 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 11126 11127 // Special case for ObjC BOOL on targets where its a typedef for a signed char 11128 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 11129 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 11130 S.NSAPIObj->isObjCBOOLType(OtherT) && 11131 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 11132 11133 // Whether we're treating Other as being a bool because of the form of 11134 // expression despite it having another type (typically 'int' in C). 11135 bool OtherIsBooleanDespiteType = 11136 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 11137 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 11138 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 11139 11140 // Check if all values in the range of possible values of this expression 11141 // lead to the same comparison outcome. 11142 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 11143 Value.isUnsigned()); 11144 auto Cmp = OtherPromotedValueRange.compare(Value); 11145 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 11146 if (!Result) 11147 return false; 11148 11149 // Also consider the range determined by the type alone. This allows us to 11150 // classify the warning under the proper diagnostic group. 11151 bool TautologicalTypeCompare = false; 11152 { 11153 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 11154 Value.isUnsigned()); 11155 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 11156 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 11157 RhsConstant)) { 11158 TautologicalTypeCompare = true; 11159 Cmp = TypeCmp; 11160 Result = TypeResult; 11161 } 11162 } 11163 11164 // Don't warn if the non-constant operand actually always evaluates to the 11165 // same value. 11166 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 11167 return false; 11168 11169 // Suppress the diagnostic for an in-range comparison if the constant comes 11170 // from a macro or enumerator. We don't want to diagnose 11171 // 11172 // some_long_value <= INT_MAX 11173 // 11174 // when sizeof(int) == sizeof(long). 11175 bool InRange = Cmp & PromotedRange::InRangeFlag; 11176 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 11177 return false; 11178 11179 // A comparison of an unsigned bit-field against 0 is really a type problem, 11180 // even though at the type level the bit-field might promote to 'signed int'. 11181 if (Other->refersToBitField() && InRange && Value == 0 && 11182 Other->getType()->isUnsignedIntegerOrEnumerationType()) 11183 TautologicalTypeCompare = true; 11184 11185 // If this is a comparison to an enum constant, include that 11186 // constant in the diagnostic. 11187 const EnumConstantDecl *ED = nullptr; 11188 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 11189 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 11190 11191 // Should be enough for uint128 (39 decimal digits) 11192 SmallString<64> PrettySourceValue; 11193 llvm::raw_svector_ostream OS(PrettySourceValue); 11194 if (ED) { 11195 OS << '\'' << *ED << "' (" << Value << ")"; 11196 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 11197 Constant->IgnoreParenImpCasts())) { 11198 OS << (BL->getValue() ? "YES" : "NO"); 11199 } else { 11200 OS << Value; 11201 } 11202 11203 if (!TautologicalTypeCompare) { 11204 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 11205 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 11206 << E->getOpcodeStr() << OS.str() << *Result 11207 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11208 return true; 11209 } 11210 11211 if (IsObjCSignedCharBool) { 11212 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11213 S.PDiag(diag::warn_tautological_compare_objc_bool) 11214 << OS.str() << *Result); 11215 return true; 11216 } 11217 11218 // FIXME: We use a somewhat different formatting for the in-range cases and 11219 // cases involving boolean values for historical reasons. We should pick a 11220 // consistent way of presenting these diagnostics. 11221 if (!InRange || Other->isKnownToHaveBooleanValue()) { 11222 11223 S.DiagRuntimeBehavior( 11224 E->getOperatorLoc(), E, 11225 S.PDiag(!InRange ? diag::warn_out_of_range_compare 11226 : diag::warn_tautological_bool_compare) 11227 << OS.str() << classifyConstantValue(Constant) << OtherT 11228 << OtherIsBooleanDespiteType << *Result 11229 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 11230 } else { 11231 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11232 ? (HasEnumType(OriginalOther) 11233 ? diag::warn_unsigned_enum_always_true_comparison 11234 : diag::warn_unsigned_always_true_comparison) 11235 : diag::warn_tautological_constant_compare; 11236 11237 S.Diag(E->getOperatorLoc(), Diag) 11238 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11239 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11240 } 11241 11242 return true; 11243 } 11244 11245 /// Analyze the operands of the given comparison. Implements the 11246 /// fallback case from AnalyzeComparison. 11247 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11248 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11249 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11250 } 11251 11252 /// Implements -Wsign-compare. 11253 /// 11254 /// \param E the binary operator to check for warnings 11255 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11256 // The type the comparison is being performed in. 11257 QualType T = E->getLHS()->getType(); 11258 11259 // Only analyze comparison operators where both sides have been converted to 11260 // the same type. 11261 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11262 return AnalyzeImpConvsInComparison(S, E); 11263 11264 // Don't analyze value-dependent comparisons directly. 11265 if (E->isValueDependent()) 11266 return AnalyzeImpConvsInComparison(S, E); 11267 11268 Expr *LHS = E->getLHS(); 11269 Expr *RHS = E->getRHS(); 11270 11271 if (T->isIntegralType(S.Context)) { 11272 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11273 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11274 11275 // We don't care about expressions whose result is a constant. 11276 if (RHSValue && LHSValue) 11277 return AnalyzeImpConvsInComparison(S, E); 11278 11279 // We only care about expressions where just one side is literal 11280 if ((bool)RHSValue ^ (bool)LHSValue) { 11281 // Is the constant on the RHS or LHS? 11282 const bool RhsConstant = (bool)RHSValue; 11283 Expr *Const = RhsConstant ? RHS : LHS; 11284 Expr *Other = RhsConstant ? LHS : RHS; 11285 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 11286 11287 // Check whether an integer constant comparison results in a value 11288 // of 'true' or 'false'. 11289 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 11290 return AnalyzeImpConvsInComparison(S, E); 11291 } 11292 } 11293 11294 if (!T->hasUnsignedIntegerRepresentation()) { 11295 // We don't do anything special if this isn't an unsigned integral 11296 // comparison: we're only interested in integral comparisons, and 11297 // signed comparisons only happen in cases we don't care to warn about. 11298 return AnalyzeImpConvsInComparison(S, E); 11299 } 11300 11301 LHS = LHS->IgnoreParenImpCasts(); 11302 RHS = RHS->IgnoreParenImpCasts(); 11303 11304 if (!S.getLangOpts().CPlusPlus) { 11305 // Avoid warning about comparison of integers with different signs when 11306 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 11307 // the type of `E`. 11308 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 11309 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11310 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 11311 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11312 } 11313 11314 // Check to see if one of the (unmodified) operands is of different 11315 // signedness. 11316 Expr *signedOperand, *unsignedOperand; 11317 if (LHS->getType()->hasSignedIntegerRepresentation()) { 11318 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 11319 "unsigned comparison between two signed integer expressions?"); 11320 signedOperand = LHS; 11321 unsignedOperand = RHS; 11322 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 11323 signedOperand = RHS; 11324 unsignedOperand = LHS; 11325 } else { 11326 return AnalyzeImpConvsInComparison(S, E); 11327 } 11328 11329 // Otherwise, calculate the effective range of the signed operand. 11330 IntRange signedRange = GetExprRange( 11331 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 11332 11333 // Go ahead and analyze implicit conversions in the operands. Note 11334 // that we skip the implicit conversions on both sides. 11335 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 11336 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 11337 11338 // If the signed range is non-negative, -Wsign-compare won't fire. 11339 if (signedRange.NonNegative) 11340 return; 11341 11342 // For (in)equality comparisons, if the unsigned operand is a 11343 // constant which cannot collide with a overflowed signed operand, 11344 // then reinterpreting the signed operand as unsigned will not 11345 // change the result of the comparison. 11346 if (E->isEqualityOp()) { 11347 unsigned comparisonWidth = S.Context.getIntWidth(T); 11348 IntRange unsignedRange = 11349 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 11350 /*Approximate*/ true); 11351 11352 // We should never be unable to prove that the unsigned operand is 11353 // non-negative. 11354 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 11355 11356 if (unsignedRange.Width < comparisonWidth) 11357 return; 11358 } 11359 11360 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11361 S.PDiag(diag::warn_mixed_sign_comparison) 11362 << LHS->getType() << RHS->getType() 11363 << LHS->getSourceRange() << RHS->getSourceRange()); 11364 } 11365 11366 /// Analyzes an attempt to assign the given value to a bitfield. 11367 /// 11368 /// Returns true if there was something fishy about the attempt. 11369 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 11370 SourceLocation InitLoc) { 11371 assert(Bitfield->isBitField()); 11372 if (Bitfield->isInvalidDecl()) 11373 return false; 11374 11375 // White-list bool bitfields. 11376 QualType BitfieldType = Bitfield->getType(); 11377 if (BitfieldType->isBooleanType()) 11378 return false; 11379 11380 if (BitfieldType->isEnumeralType()) { 11381 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 11382 // If the underlying enum type was not explicitly specified as an unsigned 11383 // type and the enum contain only positive values, MSVC++ will cause an 11384 // inconsistency by storing this as a signed type. 11385 if (S.getLangOpts().CPlusPlus11 && 11386 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 11387 BitfieldEnumDecl->getNumPositiveBits() > 0 && 11388 BitfieldEnumDecl->getNumNegativeBits() == 0) { 11389 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 11390 << BitfieldEnumDecl; 11391 } 11392 } 11393 11394 if (Bitfield->getType()->isBooleanType()) 11395 return false; 11396 11397 // Ignore value- or type-dependent expressions. 11398 if (Bitfield->getBitWidth()->isValueDependent() || 11399 Bitfield->getBitWidth()->isTypeDependent() || 11400 Init->isValueDependent() || 11401 Init->isTypeDependent()) 11402 return false; 11403 11404 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 11405 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 11406 11407 Expr::EvalResult Result; 11408 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 11409 Expr::SE_AllowSideEffects)) { 11410 // The RHS is not constant. If the RHS has an enum type, make sure the 11411 // bitfield is wide enough to hold all the values of the enum without 11412 // truncation. 11413 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 11414 EnumDecl *ED = EnumTy->getDecl(); 11415 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 11416 11417 // Enum types are implicitly signed on Windows, so check if there are any 11418 // negative enumerators to see if the enum was intended to be signed or 11419 // not. 11420 bool SignedEnum = ED->getNumNegativeBits() > 0; 11421 11422 // Check for surprising sign changes when assigning enum values to a 11423 // bitfield of different signedness. If the bitfield is signed and we 11424 // have exactly the right number of bits to store this unsigned enum, 11425 // suggest changing the enum to an unsigned type. This typically happens 11426 // on Windows where unfixed enums always use an underlying type of 'int'. 11427 unsigned DiagID = 0; 11428 if (SignedEnum && !SignedBitfield) { 11429 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 11430 } else if (SignedBitfield && !SignedEnum && 11431 ED->getNumPositiveBits() == FieldWidth) { 11432 DiagID = diag::warn_signed_bitfield_enum_conversion; 11433 } 11434 11435 if (DiagID) { 11436 S.Diag(InitLoc, DiagID) << Bitfield << ED; 11437 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 11438 SourceRange TypeRange = 11439 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 11440 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 11441 << SignedEnum << TypeRange; 11442 } 11443 11444 // Compute the required bitwidth. If the enum has negative values, we need 11445 // one more bit than the normal number of positive bits to represent the 11446 // sign bit. 11447 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 11448 ED->getNumNegativeBits()) 11449 : ED->getNumPositiveBits(); 11450 11451 // Check the bitwidth. 11452 if (BitsNeeded > FieldWidth) { 11453 Expr *WidthExpr = Bitfield->getBitWidth(); 11454 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 11455 << Bitfield << ED; 11456 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 11457 << BitsNeeded << ED << WidthExpr->getSourceRange(); 11458 } 11459 } 11460 11461 return false; 11462 } 11463 11464 llvm::APSInt Value = Result.Val.getInt(); 11465 11466 unsigned OriginalWidth = Value.getBitWidth(); 11467 11468 if (!Value.isSigned() || Value.isNegative()) 11469 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 11470 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 11471 OriginalWidth = Value.getMinSignedBits(); 11472 11473 if (OriginalWidth <= FieldWidth) 11474 return false; 11475 11476 // Compute the value which the bitfield will contain. 11477 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11478 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11479 11480 // Check whether the stored value is equal to the original value. 11481 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11482 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11483 return false; 11484 11485 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11486 // therefore don't strictly fit into a signed bitfield of width 1. 11487 if (FieldWidth == 1 && Value == 1) 11488 return false; 11489 11490 std::string PrettyValue = Value.toString(10); 11491 std::string PrettyTrunc = TruncatedValue.toString(10); 11492 11493 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11494 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11495 << Init->getSourceRange(); 11496 11497 return true; 11498 } 11499 11500 /// Analyze the given simple or compound assignment for warning-worthy 11501 /// operations. 11502 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11503 // Just recurse on the LHS. 11504 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11505 11506 // We want to recurse on the RHS as normal unless we're assigning to 11507 // a bitfield. 11508 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11509 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11510 E->getOperatorLoc())) { 11511 // Recurse, ignoring any implicit conversions on the RHS. 11512 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11513 E->getOperatorLoc()); 11514 } 11515 } 11516 11517 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11518 11519 // Diagnose implicitly sequentially-consistent atomic assignment. 11520 if (E->getLHS()->getType()->isAtomicType()) 11521 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11522 } 11523 11524 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11525 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11526 SourceLocation CContext, unsigned diag, 11527 bool pruneControlFlow = false) { 11528 if (pruneControlFlow) { 11529 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11530 S.PDiag(diag) 11531 << SourceType << T << E->getSourceRange() 11532 << SourceRange(CContext)); 11533 return; 11534 } 11535 S.Diag(E->getExprLoc(), diag) 11536 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11537 } 11538 11539 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11540 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11541 SourceLocation CContext, 11542 unsigned diag, bool pruneControlFlow = false) { 11543 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11544 } 11545 11546 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11547 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11548 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11549 } 11550 11551 static void adornObjCBoolConversionDiagWithTernaryFixit( 11552 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11553 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11554 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11555 Ignored = OVE->getSourceExpr(); 11556 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11557 isa<BinaryOperator>(Ignored) || 11558 isa<CXXOperatorCallExpr>(Ignored); 11559 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11560 if (NeedsParens) 11561 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11562 << FixItHint::CreateInsertion(EndLoc, ")"); 11563 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11564 } 11565 11566 /// Diagnose an implicit cast from a floating point value to an integer value. 11567 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11568 SourceLocation CContext) { 11569 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11570 const bool PruneWarnings = S.inTemplateInstantiation(); 11571 11572 Expr *InnerE = E->IgnoreParenImpCasts(); 11573 // We also want to warn on, e.g., "int i = -1.234" 11574 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11575 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11576 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11577 11578 const bool IsLiteral = 11579 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11580 11581 llvm::APFloat Value(0.0); 11582 bool IsConstant = 11583 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11584 if (!IsConstant) { 11585 if (isObjCSignedCharBool(S, T)) { 11586 return adornObjCBoolConversionDiagWithTernaryFixit( 11587 S, E, 11588 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11589 << E->getType()); 11590 } 11591 11592 return DiagnoseImpCast(S, E, T, CContext, 11593 diag::warn_impcast_float_integer, PruneWarnings); 11594 } 11595 11596 bool isExact = false; 11597 11598 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11599 T->hasUnsignedIntegerRepresentation()); 11600 llvm::APFloat::opStatus Result = Value.convertToInteger( 11601 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11602 11603 // FIXME: Force the precision of the source value down so we don't print 11604 // digits which are usually useless (we don't really care here if we 11605 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11606 // would automatically print the shortest representation, but it's a bit 11607 // tricky to implement. 11608 SmallString<16> PrettySourceValue; 11609 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11610 precision = (precision * 59 + 195) / 196; 11611 Value.toString(PrettySourceValue, precision); 11612 11613 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11614 return adornObjCBoolConversionDiagWithTernaryFixit( 11615 S, E, 11616 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11617 << PrettySourceValue); 11618 } 11619 11620 if (Result == llvm::APFloat::opOK && isExact) { 11621 if (IsLiteral) return; 11622 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11623 PruneWarnings); 11624 } 11625 11626 // Conversion of a floating-point value to a non-bool integer where the 11627 // integral part cannot be represented by the integer type is undefined. 11628 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11629 return DiagnoseImpCast( 11630 S, E, T, CContext, 11631 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11632 : diag::warn_impcast_float_to_integer_out_of_range, 11633 PruneWarnings); 11634 11635 unsigned DiagID = 0; 11636 if (IsLiteral) { 11637 // Warn on floating point literal to integer. 11638 DiagID = diag::warn_impcast_literal_float_to_integer; 11639 } else if (IntegerValue == 0) { 11640 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11641 return DiagnoseImpCast(S, E, T, CContext, 11642 diag::warn_impcast_float_integer, PruneWarnings); 11643 } 11644 // Warn on non-zero to zero conversion. 11645 DiagID = diag::warn_impcast_float_to_integer_zero; 11646 } else { 11647 if (IntegerValue.isUnsigned()) { 11648 if (!IntegerValue.isMaxValue()) { 11649 return DiagnoseImpCast(S, E, T, CContext, 11650 diag::warn_impcast_float_integer, PruneWarnings); 11651 } 11652 } else { // IntegerValue.isSigned() 11653 if (!IntegerValue.isMaxSignedValue() && 11654 !IntegerValue.isMinSignedValue()) { 11655 return DiagnoseImpCast(S, E, T, CContext, 11656 diag::warn_impcast_float_integer, PruneWarnings); 11657 } 11658 } 11659 // Warn on evaluatable floating point expression to integer conversion. 11660 DiagID = diag::warn_impcast_float_to_integer; 11661 } 11662 11663 SmallString<16> PrettyTargetValue; 11664 if (IsBool) 11665 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11666 else 11667 IntegerValue.toString(PrettyTargetValue); 11668 11669 if (PruneWarnings) { 11670 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11671 S.PDiag(DiagID) 11672 << E->getType() << T.getUnqualifiedType() 11673 << PrettySourceValue << PrettyTargetValue 11674 << E->getSourceRange() << SourceRange(CContext)); 11675 } else { 11676 S.Diag(E->getExprLoc(), DiagID) 11677 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11678 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11679 } 11680 } 11681 11682 /// Analyze the given compound assignment for the possible losing of 11683 /// floating-point precision. 11684 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11685 assert(isa<CompoundAssignOperator>(E) && 11686 "Must be compound assignment operation"); 11687 // Recurse on the LHS and RHS in here 11688 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11689 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11690 11691 if (E->getLHS()->getType()->isAtomicType()) 11692 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11693 11694 // Now check the outermost expression 11695 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11696 const auto *RBT = cast<CompoundAssignOperator>(E) 11697 ->getComputationResultType() 11698 ->getAs<BuiltinType>(); 11699 11700 // The below checks assume source is floating point. 11701 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11702 11703 // If source is floating point but target is an integer. 11704 if (ResultBT->isInteger()) 11705 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11706 E->getExprLoc(), diag::warn_impcast_float_integer); 11707 11708 if (!ResultBT->isFloatingPoint()) 11709 return; 11710 11711 // If both source and target are floating points, warn about losing precision. 11712 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11713 QualType(ResultBT, 0), QualType(RBT, 0)); 11714 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11715 // warn about dropping FP rank. 11716 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11717 diag::warn_impcast_float_result_precision); 11718 } 11719 11720 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11721 IntRange Range) { 11722 if (!Range.Width) return "0"; 11723 11724 llvm::APSInt ValueInRange = Value; 11725 ValueInRange.setIsSigned(!Range.NonNegative); 11726 ValueInRange = ValueInRange.trunc(Range.Width); 11727 return ValueInRange.toString(10); 11728 } 11729 11730 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11731 if (!isa<ImplicitCastExpr>(Ex)) 11732 return false; 11733 11734 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11735 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11736 const Type *Source = 11737 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11738 if (Target->isDependentType()) 11739 return false; 11740 11741 const BuiltinType *FloatCandidateBT = 11742 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11743 const Type *BoolCandidateType = ToBool ? Target : Source; 11744 11745 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11746 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11747 } 11748 11749 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11750 SourceLocation CC) { 11751 unsigned NumArgs = TheCall->getNumArgs(); 11752 for (unsigned i = 0; i < NumArgs; ++i) { 11753 Expr *CurrA = TheCall->getArg(i); 11754 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11755 continue; 11756 11757 bool IsSwapped = ((i > 0) && 11758 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11759 IsSwapped |= ((i < (NumArgs - 1)) && 11760 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11761 if (IsSwapped) { 11762 // Warn on this floating-point to bool conversion. 11763 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11764 CurrA->getType(), CC, 11765 diag::warn_impcast_floating_point_to_bool); 11766 } 11767 } 11768 } 11769 11770 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11771 SourceLocation CC) { 11772 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11773 E->getExprLoc())) 11774 return; 11775 11776 // Don't warn on functions which have return type nullptr_t. 11777 if (isa<CallExpr>(E)) 11778 return; 11779 11780 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11781 const Expr::NullPointerConstantKind NullKind = 11782 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11783 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11784 return; 11785 11786 // Return if target type is a safe conversion. 11787 if (T->isAnyPointerType() || T->isBlockPointerType() || 11788 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11789 return; 11790 11791 SourceLocation Loc = E->getSourceRange().getBegin(); 11792 11793 // Venture through the macro stacks to get to the source of macro arguments. 11794 // The new location is a better location than the complete location that was 11795 // passed in. 11796 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11797 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11798 11799 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11800 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11801 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11802 Loc, S.SourceMgr, S.getLangOpts()); 11803 if (MacroName == "NULL") 11804 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11805 } 11806 11807 // Only warn if the null and context location are in the same macro expansion. 11808 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11809 return; 11810 11811 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11812 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11813 << FixItHint::CreateReplacement(Loc, 11814 S.getFixItZeroLiteralForType(T, Loc)); 11815 } 11816 11817 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11818 ObjCArrayLiteral *ArrayLiteral); 11819 11820 static void 11821 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11822 ObjCDictionaryLiteral *DictionaryLiteral); 11823 11824 /// Check a single element within a collection literal against the 11825 /// target element type. 11826 static void checkObjCCollectionLiteralElement(Sema &S, 11827 QualType TargetElementType, 11828 Expr *Element, 11829 unsigned ElementKind) { 11830 // Skip a bitcast to 'id' or qualified 'id'. 11831 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11832 if (ICE->getCastKind() == CK_BitCast && 11833 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11834 Element = ICE->getSubExpr(); 11835 } 11836 11837 QualType ElementType = Element->getType(); 11838 ExprResult ElementResult(Element); 11839 if (ElementType->getAs<ObjCObjectPointerType>() && 11840 S.CheckSingleAssignmentConstraints(TargetElementType, 11841 ElementResult, 11842 false, false) 11843 != Sema::Compatible) { 11844 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11845 << ElementType << ElementKind << TargetElementType 11846 << Element->getSourceRange(); 11847 } 11848 11849 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11850 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11851 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11852 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11853 } 11854 11855 /// Check an Objective-C array literal being converted to the given 11856 /// target type. 11857 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11858 ObjCArrayLiteral *ArrayLiteral) { 11859 if (!S.NSArrayDecl) 11860 return; 11861 11862 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11863 if (!TargetObjCPtr) 11864 return; 11865 11866 if (TargetObjCPtr->isUnspecialized() || 11867 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11868 != S.NSArrayDecl->getCanonicalDecl()) 11869 return; 11870 11871 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11872 if (TypeArgs.size() != 1) 11873 return; 11874 11875 QualType TargetElementType = TypeArgs[0]; 11876 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11877 checkObjCCollectionLiteralElement(S, TargetElementType, 11878 ArrayLiteral->getElement(I), 11879 0); 11880 } 11881 } 11882 11883 /// Check an Objective-C dictionary literal being converted to the given 11884 /// target type. 11885 static void 11886 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11887 ObjCDictionaryLiteral *DictionaryLiteral) { 11888 if (!S.NSDictionaryDecl) 11889 return; 11890 11891 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11892 if (!TargetObjCPtr) 11893 return; 11894 11895 if (TargetObjCPtr->isUnspecialized() || 11896 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11897 != S.NSDictionaryDecl->getCanonicalDecl()) 11898 return; 11899 11900 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11901 if (TypeArgs.size() != 2) 11902 return; 11903 11904 QualType TargetKeyType = TypeArgs[0]; 11905 QualType TargetObjectType = TypeArgs[1]; 11906 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11907 auto Element = DictionaryLiteral->getKeyValueElement(I); 11908 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11909 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11910 } 11911 } 11912 11913 // Helper function to filter out cases for constant width constant conversion. 11914 // Don't warn on char array initialization or for non-decimal values. 11915 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11916 SourceLocation CC) { 11917 // If initializing from a constant, and the constant starts with '0', 11918 // then it is a binary, octal, or hexadecimal. Allow these constants 11919 // to fill all the bits, even if there is a sign change. 11920 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11921 const char FirstLiteralCharacter = 11922 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11923 if (FirstLiteralCharacter == '0') 11924 return false; 11925 } 11926 11927 // If the CC location points to a '{', and the type is char, then assume 11928 // assume it is an array initialization. 11929 if (CC.isValid() && T->isCharType()) { 11930 const char FirstContextCharacter = 11931 S.getSourceManager().getCharacterData(CC)[0]; 11932 if (FirstContextCharacter == '{') 11933 return false; 11934 } 11935 11936 return true; 11937 } 11938 11939 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11940 const auto *IL = dyn_cast<IntegerLiteral>(E); 11941 if (!IL) { 11942 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11943 if (UO->getOpcode() == UO_Minus) 11944 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11945 } 11946 } 11947 11948 return IL; 11949 } 11950 11951 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11952 E = E->IgnoreParenImpCasts(); 11953 SourceLocation ExprLoc = E->getExprLoc(); 11954 11955 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11956 BinaryOperator::Opcode Opc = BO->getOpcode(); 11957 Expr::EvalResult Result; 11958 // Do not diagnose unsigned shifts. 11959 if (Opc == BO_Shl) { 11960 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11961 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11962 if (LHS && LHS->getValue() == 0) 11963 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11964 else if (!E->isValueDependent() && LHS && RHS && 11965 RHS->getValue().isNonNegative() && 11966 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11967 S.Diag(ExprLoc, diag::warn_left_shift_always) 11968 << (Result.Val.getInt() != 0); 11969 else if (E->getType()->isSignedIntegerType()) 11970 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11971 } 11972 } 11973 11974 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11975 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11976 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11977 if (!LHS || !RHS) 11978 return; 11979 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11980 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11981 // Do not diagnose common idioms. 11982 return; 11983 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11984 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11985 } 11986 } 11987 11988 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11989 SourceLocation CC, 11990 bool *ICContext = nullptr, 11991 bool IsListInit = false) { 11992 if (E->isTypeDependent() || E->isValueDependent()) return; 11993 11994 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11995 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11996 if (Source == Target) return; 11997 if (Target->isDependentType()) return; 11998 11999 // If the conversion context location is invalid don't complain. We also 12000 // don't want to emit a warning if the issue occurs from the expansion of 12001 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12002 // delay this check as long as possible. Once we detect we are in that 12003 // scenario, we just return. 12004 if (CC.isInvalid()) 12005 return; 12006 12007 if (Source->isAtomicType()) 12008 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12009 12010 // Diagnose implicit casts to bool. 12011 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12012 if (isa<StringLiteral>(E)) 12013 // Warn on string literal to bool. Checks for string literals in logical 12014 // and expressions, for instance, assert(0 && "error here"), are 12015 // prevented by a check in AnalyzeImplicitConversions(). 12016 return DiagnoseImpCast(S, E, T, CC, 12017 diag::warn_impcast_string_literal_to_bool); 12018 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 12019 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 12020 // This covers the literal expressions that evaluate to Objective-C 12021 // objects. 12022 return DiagnoseImpCast(S, E, T, CC, 12023 diag::warn_impcast_objective_c_literal_to_bool); 12024 } 12025 if (Source->isPointerType() || Source->canDecayToPointerType()) { 12026 // Warn on pointer to bool conversion that is always true. 12027 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 12028 SourceRange(CC)); 12029 } 12030 } 12031 12032 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 12033 // is a typedef for signed char (macOS), then that constant value has to be 1 12034 // or 0. 12035 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 12036 Expr::EvalResult Result; 12037 if (E->EvaluateAsInt(Result, S.getASTContext(), 12038 Expr::SE_AllowSideEffects)) { 12039 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 12040 adornObjCBoolConversionDiagWithTernaryFixit( 12041 S, E, 12042 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 12043 << Result.Val.getInt().toString(10)); 12044 } 12045 return; 12046 } 12047 } 12048 12049 // Check implicit casts from Objective-C collection literals to specialized 12050 // collection types, e.g., NSArray<NSString *> *. 12051 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 12052 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 12053 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 12054 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 12055 12056 // Strip vector types. 12057 if (isa<VectorType>(Source)) { 12058 if (!isa<VectorType>(Target)) { 12059 if (S.SourceMgr.isInSystemMacro(CC)) 12060 return; 12061 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 12062 } 12063 12064 // If the vector cast is cast between two vectors of the same size, it is 12065 // a bitcast, not a conversion. 12066 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 12067 return; 12068 12069 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 12070 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 12071 } 12072 if (auto VecTy = dyn_cast<VectorType>(Target)) 12073 Target = VecTy->getElementType().getTypePtr(); 12074 12075 // Strip complex types. 12076 if (isa<ComplexType>(Source)) { 12077 if (!isa<ComplexType>(Target)) { 12078 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 12079 return; 12080 12081 return DiagnoseImpCast(S, E, T, CC, 12082 S.getLangOpts().CPlusPlus 12083 ? diag::err_impcast_complex_scalar 12084 : diag::warn_impcast_complex_scalar); 12085 } 12086 12087 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 12088 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 12089 } 12090 12091 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 12092 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 12093 12094 // If the source is floating point... 12095 if (SourceBT && SourceBT->isFloatingPoint()) { 12096 // ...and the target is floating point... 12097 if (TargetBT && TargetBT->isFloatingPoint()) { 12098 // ...then warn if we're dropping FP rank. 12099 12100 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12101 QualType(SourceBT, 0), QualType(TargetBT, 0)); 12102 if (Order > 0) { 12103 // Don't warn about float constants that are precisely 12104 // representable in the target type. 12105 Expr::EvalResult result; 12106 if (E->EvaluateAsRValue(result, S.Context)) { 12107 // Value might be a float, a float vector, or a float complex. 12108 if (IsSameFloatAfterCast(result.Val, 12109 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 12110 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 12111 return; 12112 } 12113 12114 if (S.SourceMgr.isInSystemMacro(CC)) 12115 return; 12116 12117 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 12118 } 12119 // ... or possibly if we're increasing rank, too 12120 else if (Order < 0) { 12121 if (S.SourceMgr.isInSystemMacro(CC)) 12122 return; 12123 12124 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 12125 } 12126 return; 12127 } 12128 12129 // If the target is integral, always warn. 12130 if (TargetBT && TargetBT->isInteger()) { 12131 if (S.SourceMgr.isInSystemMacro(CC)) 12132 return; 12133 12134 DiagnoseFloatingImpCast(S, E, T, CC); 12135 } 12136 12137 // Detect the case where a call result is converted from floating-point to 12138 // to bool, and the final argument to the call is converted from bool, to 12139 // discover this typo: 12140 // 12141 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 12142 // 12143 // FIXME: This is an incredibly special case; is there some more general 12144 // way to detect this class of misplaced-parentheses bug? 12145 if (Target->isBooleanType() && isa<CallExpr>(E)) { 12146 // Check last argument of function call to see if it is an 12147 // implicit cast from a type matching the type the result 12148 // is being cast to. 12149 CallExpr *CEx = cast<CallExpr>(E); 12150 if (unsigned NumArgs = CEx->getNumArgs()) { 12151 Expr *LastA = CEx->getArg(NumArgs - 1); 12152 Expr *InnerE = LastA->IgnoreParenImpCasts(); 12153 if (isa<ImplicitCastExpr>(LastA) && 12154 InnerE->getType()->isBooleanType()) { 12155 // Warn on this floating-point to bool conversion 12156 DiagnoseImpCast(S, E, T, CC, 12157 diag::warn_impcast_floating_point_to_bool); 12158 } 12159 } 12160 } 12161 return; 12162 } 12163 12164 // Valid casts involving fixed point types should be accounted for here. 12165 if (Source->isFixedPointType()) { 12166 if (Target->isUnsaturatedFixedPointType()) { 12167 Expr::EvalResult Result; 12168 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 12169 S.isConstantEvaluated())) { 12170 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 12171 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 12172 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 12173 if (Value > MaxVal || Value < MinVal) { 12174 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12175 S.PDiag(diag::warn_impcast_fixed_point_range) 12176 << Value.toString() << T 12177 << E->getSourceRange() 12178 << clang::SourceRange(CC)); 12179 return; 12180 } 12181 } 12182 } else if (Target->isIntegerType()) { 12183 Expr::EvalResult Result; 12184 if (!S.isConstantEvaluated() && 12185 E->EvaluateAsFixedPoint(Result, S.Context, 12186 Expr::SE_AllowSideEffects)) { 12187 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 12188 12189 bool Overflowed; 12190 llvm::APSInt IntResult = FXResult.convertToInt( 12191 S.Context.getIntWidth(T), 12192 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 12193 12194 if (Overflowed) { 12195 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12196 S.PDiag(diag::warn_impcast_fixed_point_range) 12197 << FXResult.toString() << T 12198 << E->getSourceRange() 12199 << clang::SourceRange(CC)); 12200 return; 12201 } 12202 } 12203 } 12204 } else if (Target->isUnsaturatedFixedPointType()) { 12205 if (Source->isIntegerType()) { 12206 Expr::EvalResult Result; 12207 if (!S.isConstantEvaluated() && 12208 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 12209 llvm::APSInt Value = Result.Val.getInt(); 12210 12211 bool Overflowed; 12212 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 12213 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 12214 12215 if (Overflowed) { 12216 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12217 S.PDiag(diag::warn_impcast_fixed_point_range) 12218 << Value.toString(/*Radix=*/10) << T 12219 << E->getSourceRange() 12220 << clang::SourceRange(CC)); 12221 return; 12222 } 12223 } 12224 } 12225 } 12226 12227 // If we are casting an integer type to a floating point type without 12228 // initialization-list syntax, we might lose accuracy if the floating 12229 // point type has a narrower significand than the integer type. 12230 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 12231 TargetBT->isFloatingType() && !IsListInit) { 12232 // Determine the number of precision bits in the source integer type. 12233 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12234 /*Approximate*/ true); 12235 unsigned int SourcePrecision = SourceRange.Width; 12236 12237 // Determine the number of precision bits in the 12238 // target floating point type. 12239 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12240 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12241 12242 if (SourcePrecision > 0 && TargetPrecision > 0 && 12243 SourcePrecision > TargetPrecision) { 12244 12245 if (Optional<llvm::APSInt> SourceInt = 12246 E->getIntegerConstantExpr(S.Context)) { 12247 // If the source integer is a constant, convert it to the target 12248 // floating point type. Issue a warning if the value changes 12249 // during the whole conversion. 12250 llvm::APFloat TargetFloatValue( 12251 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12252 llvm::APFloat::opStatus ConversionStatus = 12253 TargetFloatValue.convertFromAPInt( 12254 *SourceInt, SourceBT->isSignedInteger(), 12255 llvm::APFloat::rmNearestTiesToEven); 12256 12257 if (ConversionStatus != llvm::APFloat::opOK) { 12258 std::string PrettySourceValue = SourceInt->toString(10); 12259 SmallString<32> PrettyTargetValue; 12260 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12261 12262 S.DiagRuntimeBehavior( 12263 E->getExprLoc(), E, 12264 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12265 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12266 << E->getSourceRange() << clang::SourceRange(CC)); 12267 } 12268 } else { 12269 // Otherwise, the implicit conversion may lose precision. 12270 DiagnoseImpCast(S, E, T, CC, 12271 diag::warn_impcast_integer_float_precision); 12272 } 12273 } 12274 } 12275 12276 DiagnoseNullConversion(S, E, T, CC); 12277 12278 S.DiscardMisalignedMemberAddress(Target, E); 12279 12280 if (Target->isBooleanType()) 12281 DiagnoseIntInBoolContext(S, E); 12282 12283 if (!Source->isIntegerType() || !Target->isIntegerType()) 12284 return; 12285 12286 // TODO: remove this early return once the false positives for constant->bool 12287 // in templates, macros, etc, are reduced or removed. 12288 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 12289 return; 12290 12291 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 12292 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 12293 return adornObjCBoolConversionDiagWithTernaryFixit( 12294 S, E, 12295 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 12296 << E->getType()); 12297 } 12298 12299 IntRange SourceTypeRange = 12300 IntRange::forTargetOfCanonicalType(S.Context, Source); 12301 IntRange LikelySourceRange = 12302 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 12303 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 12304 12305 if (LikelySourceRange.Width > TargetRange.Width) { 12306 // If the source is a constant, use a default-on diagnostic. 12307 // TODO: this should happen for bitfield stores, too. 12308 Expr::EvalResult Result; 12309 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 12310 S.isConstantEvaluated())) { 12311 llvm::APSInt Value(32); 12312 Value = Result.Val.getInt(); 12313 12314 if (S.SourceMgr.isInSystemMacro(CC)) 12315 return; 12316 12317 std::string PrettySourceValue = Value.toString(10); 12318 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12319 12320 S.DiagRuntimeBehavior( 12321 E->getExprLoc(), E, 12322 S.PDiag(diag::warn_impcast_integer_precision_constant) 12323 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12324 << E->getSourceRange() << SourceRange(CC)); 12325 return; 12326 } 12327 12328 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 12329 if (S.SourceMgr.isInSystemMacro(CC)) 12330 return; 12331 12332 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 12333 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 12334 /* pruneControlFlow */ true); 12335 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 12336 } 12337 12338 if (TargetRange.Width > SourceTypeRange.Width) { 12339 if (auto *UO = dyn_cast<UnaryOperator>(E)) 12340 if (UO->getOpcode() == UO_Minus) 12341 if (Source->isUnsignedIntegerType()) { 12342 if (Target->isUnsignedIntegerType()) 12343 return DiagnoseImpCast(S, E, T, CC, 12344 diag::warn_impcast_high_order_zero_bits); 12345 if (Target->isSignedIntegerType()) 12346 return DiagnoseImpCast(S, E, T, CC, 12347 diag::warn_impcast_nonnegative_result); 12348 } 12349 } 12350 12351 if (TargetRange.Width == LikelySourceRange.Width && 12352 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 12353 Source->isSignedIntegerType()) { 12354 // Warn when doing a signed to signed conversion, warn if the positive 12355 // source value is exactly the width of the target type, which will 12356 // cause a negative value to be stored. 12357 12358 Expr::EvalResult Result; 12359 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 12360 !S.SourceMgr.isInSystemMacro(CC)) { 12361 llvm::APSInt Value = Result.Val.getInt(); 12362 if (isSameWidthConstantConversion(S, E, T, CC)) { 12363 std::string PrettySourceValue = Value.toString(10); 12364 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12365 12366 S.DiagRuntimeBehavior( 12367 E->getExprLoc(), E, 12368 S.PDiag(diag::warn_impcast_integer_precision_constant) 12369 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12370 << E->getSourceRange() << SourceRange(CC)); 12371 return; 12372 } 12373 } 12374 12375 // Fall through for non-constants to give a sign conversion warning. 12376 } 12377 12378 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 12379 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 12380 LikelySourceRange.Width == TargetRange.Width)) { 12381 if (S.SourceMgr.isInSystemMacro(CC)) 12382 return; 12383 12384 unsigned DiagID = diag::warn_impcast_integer_sign; 12385 12386 // Traditionally, gcc has warned about this under -Wsign-compare. 12387 // We also want to warn about it in -Wconversion. 12388 // So if -Wconversion is off, use a completely identical diagnostic 12389 // in the sign-compare group. 12390 // The conditional-checking code will 12391 if (ICContext) { 12392 DiagID = diag::warn_impcast_integer_sign_conditional; 12393 *ICContext = true; 12394 } 12395 12396 return DiagnoseImpCast(S, E, T, CC, DiagID); 12397 } 12398 12399 // Diagnose conversions between different enumeration types. 12400 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 12401 // type, to give us better diagnostics. 12402 QualType SourceType = E->getType(); 12403 if (!S.getLangOpts().CPlusPlus) { 12404 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12405 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 12406 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 12407 SourceType = S.Context.getTypeDeclType(Enum); 12408 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 12409 } 12410 } 12411 12412 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 12413 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 12414 if (SourceEnum->getDecl()->hasNameForLinkage() && 12415 TargetEnum->getDecl()->hasNameForLinkage() && 12416 SourceEnum != TargetEnum) { 12417 if (S.SourceMgr.isInSystemMacro(CC)) 12418 return; 12419 12420 return DiagnoseImpCast(S, E, SourceType, T, CC, 12421 diag::warn_impcast_different_enum_types); 12422 } 12423 } 12424 12425 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12426 SourceLocation CC, QualType T); 12427 12428 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 12429 SourceLocation CC, bool &ICContext) { 12430 E = E->IgnoreParenImpCasts(); 12431 12432 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 12433 return CheckConditionalOperator(S, CO, CC, T); 12434 12435 AnalyzeImplicitConversions(S, E, CC); 12436 if (E->getType() != T) 12437 return CheckImplicitConversion(S, E, T, CC, &ICContext); 12438 } 12439 12440 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12441 SourceLocation CC, QualType T) { 12442 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 12443 12444 Expr *TrueExpr = E->getTrueExpr(); 12445 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 12446 TrueExpr = BCO->getCommon(); 12447 12448 bool Suspicious = false; 12449 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 12450 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 12451 12452 if (T->isBooleanType()) 12453 DiagnoseIntInBoolContext(S, E); 12454 12455 // If -Wconversion would have warned about either of the candidates 12456 // for a signedness conversion to the context type... 12457 if (!Suspicious) return; 12458 12459 // ...but it's currently ignored... 12460 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12461 return; 12462 12463 // ...then check whether it would have warned about either of the 12464 // candidates for a signedness conversion to the condition type. 12465 if (E->getType() == T) return; 12466 12467 Suspicious = false; 12468 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 12469 E->getType(), CC, &Suspicious); 12470 if (!Suspicious) 12471 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12472 E->getType(), CC, &Suspicious); 12473 } 12474 12475 /// Check conversion of given expression to boolean. 12476 /// Input argument E is a logical expression. 12477 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12478 if (S.getLangOpts().Bool) 12479 return; 12480 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12481 return; 12482 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12483 } 12484 12485 namespace { 12486 struct AnalyzeImplicitConversionsWorkItem { 12487 Expr *E; 12488 SourceLocation CC; 12489 bool IsListInit; 12490 }; 12491 } 12492 12493 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 12494 /// that should be visited are added to WorkList. 12495 static void AnalyzeImplicitConversions( 12496 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 12497 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 12498 Expr *OrigE = Item.E; 12499 SourceLocation CC = Item.CC; 12500 12501 QualType T = OrigE->getType(); 12502 Expr *E = OrigE->IgnoreParenImpCasts(); 12503 12504 // Propagate whether we are in a C++ list initialization expression. 12505 // If so, we do not issue warnings for implicit int-float conversion 12506 // precision loss, because C++11 narrowing already handles it. 12507 bool IsListInit = Item.IsListInit || 12508 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12509 12510 if (E->isTypeDependent() || E->isValueDependent()) 12511 return; 12512 12513 Expr *SourceExpr = E; 12514 // Examine, but don't traverse into the source expression of an 12515 // OpaqueValueExpr, since it may have multiple parents and we don't want to 12516 // emit duplicate diagnostics. Its fine to examine the form or attempt to 12517 // evaluate it in the context of checking the specific conversion to T though. 12518 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12519 if (auto *Src = OVE->getSourceExpr()) 12520 SourceExpr = Src; 12521 12522 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 12523 if (UO->getOpcode() == UO_Not && 12524 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12525 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12526 << OrigE->getSourceRange() << T->isBooleanType() 12527 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12528 12529 // For conditional operators, we analyze the arguments as if they 12530 // were being fed directly into the output. 12531 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 12532 CheckConditionalOperator(S, CO, CC, T); 12533 return; 12534 } 12535 12536 // Check implicit argument conversions for function calls. 12537 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 12538 CheckImplicitArgumentConversions(S, Call, CC); 12539 12540 // Go ahead and check any implicit conversions we might have skipped. 12541 // The non-canonical typecheck is just an optimization; 12542 // CheckImplicitConversion will filter out dead implicit conversions. 12543 if (SourceExpr->getType() != T) 12544 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 12545 12546 // Now continue drilling into this expression. 12547 12548 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12549 // The bound subexpressions in a PseudoObjectExpr are not reachable 12550 // as transitive children. 12551 // FIXME: Use a more uniform representation for this. 12552 for (auto *SE : POE->semantics()) 12553 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12554 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 12555 } 12556 12557 // Skip past explicit casts. 12558 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12559 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12560 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12561 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12562 WorkList.push_back({E, CC, IsListInit}); 12563 return; 12564 } 12565 12566 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12567 // Do a somewhat different check with comparison operators. 12568 if (BO->isComparisonOp()) 12569 return AnalyzeComparison(S, BO); 12570 12571 // And with simple assignments. 12572 if (BO->getOpcode() == BO_Assign) 12573 return AnalyzeAssignment(S, BO); 12574 // And with compound assignments. 12575 if (BO->isAssignmentOp()) 12576 return AnalyzeCompoundAssignment(S, BO); 12577 } 12578 12579 // These break the otherwise-useful invariant below. Fortunately, 12580 // we don't really need to recurse into them, because any internal 12581 // expressions should have been analyzed already when they were 12582 // built into statements. 12583 if (isa<StmtExpr>(E)) return; 12584 12585 // Don't descend into unevaluated contexts. 12586 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12587 12588 // Now just recurse over the expression's children. 12589 CC = E->getExprLoc(); 12590 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12591 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12592 for (Stmt *SubStmt : E->children()) { 12593 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12594 if (!ChildExpr) 12595 continue; 12596 12597 if (IsLogicalAndOperator && 12598 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12599 // Ignore checking string literals that are in logical and operators. 12600 // This is a common pattern for asserts. 12601 continue; 12602 WorkList.push_back({ChildExpr, CC, IsListInit}); 12603 } 12604 12605 if (BO && BO->isLogicalOp()) { 12606 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12607 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12608 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12609 12610 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12611 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12612 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12613 } 12614 12615 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12616 if (U->getOpcode() == UO_LNot) { 12617 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12618 } else if (U->getOpcode() != UO_AddrOf) { 12619 if (U->getSubExpr()->getType()->isAtomicType()) 12620 S.Diag(U->getSubExpr()->getBeginLoc(), 12621 diag::warn_atomic_implicit_seq_cst); 12622 } 12623 } 12624 } 12625 12626 /// AnalyzeImplicitConversions - Find and report any interesting 12627 /// implicit conversions in the given expression. There are a couple 12628 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12629 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12630 bool IsListInit/*= false*/) { 12631 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 12632 WorkList.push_back({OrigE, CC, IsListInit}); 12633 while (!WorkList.empty()) 12634 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 12635 } 12636 12637 /// Diagnose integer type and any valid implicit conversion to it. 12638 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12639 // Taking into account implicit conversions, 12640 // allow any integer. 12641 if (!E->getType()->isIntegerType()) { 12642 S.Diag(E->getBeginLoc(), 12643 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12644 return true; 12645 } 12646 // Potentially emit standard warnings for implicit conversions if enabled 12647 // using -Wconversion. 12648 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12649 return false; 12650 } 12651 12652 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12653 // Returns true when emitting a warning about taking the address of a reference. 12654 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12655 const PartialDiagnostic &PD) { 12656 E = E->IgnoreParenImpCasts(); 12657 12658 const FunctionDecl *FD = nullptr; 12659 12660 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12661 if (!DRE->getDecl()->getType()->isReferenceType()) 12662 return false; 12663 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12664 if (!M->getMemberDecl()->getType()->isReferenceType()) 12665 return false; 12666 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12667 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12668 return false; 12669 FD = Call->getDirectCallee(); 12670 } else { 12671 return false; 12672 } 12673 12674 SemaRef.Diag(E->getExprLoc(), PD); 12675 12676 // If possible, point to location of function. 12677 if (FD) { 12678 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12679 } 12680 12681 return true; 12682 } 12683 12684 // Returns true if the SourceLocation is expanded from any macro body. 12685 // Returns false if the SourceLocation is invalid, is from not in a macro 12686 // expansion, or is from expanded from a top-level macro argument. 12687 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12688 if (Loc.isInvalid()) 12689 return false; 12690 12691 while (Loc.isMacroID()) { 12692 if (SM.isMacroBodyExpansion(Loc)) 12693 return true; 12694 Loc = SM.getImmediateMacroCallerLoc(Loc); 12695 } 12696 12697 return false; 12698 } 12699 12700 /// Diagnose pointers that are always non-null. 12701 /// \param E the expression containing the pointer 12702 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12703 /// compared to a null pointer 12704 /// \param IsEqual True when the comparison is equal to a null pointer 12705 /// \param Range Extra SourceRange to highlight in the diagnostic 12706 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12707 Expr::NullPointerConstantKind NullKind, 12708 bool IsEqual, SourceRange Range) { 12709 if (!E) 12710 return; 12711 12712 // Don't warn inside macros. 12713 if (E->getExprLoc().isMacroID()) { 12714 const SourceManager &SM = getSourceManager(); 12715 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12716 IsInAnyMacroBody(SM, Range.getBegin())) 12717 return; 12718 } 12719 E = E->IgnoreImpCasts(); 12720 12721 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12722 12723 if (isa<CXXThisExpr>(E)) { 12724 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12725 : diag::warn_this_bool_conversion; 12726 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12727 return; 12728 } 12729 12730 bool IsAddressOf = false; 12731 12732 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12733 if (UO->getOpcode() != UO_AddrOf) 12734 return; 12735 IsAddressOf = true; 12736 E = UO->getSubExpr(); 12737 } 12738 12739 if (IsAddressOf) { 12740 unsigned DiagID = IsCompare 12741 ? diag::warn_address_of_reference_null_compare 12742 : diag::warn_address_of_reference_bool_conversion; 12743 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12744 << IsEqual; 12745 if (CheckForReference(*this, E, PD)) { 12746 return; 12747 } 12748 } 12749 12750 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12751 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12752 std::string Str; 12753 llvm::raw_string_ostream S(Str); 12754 E->printPretty(S, nullptr, getPrintingPolicy()); 12755 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12756 : diag::warn_cast_nonnull_to_bool; 12757 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12758 << E->getSourceRange() << Range << IsEqual; 12759 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12760 }; 12761 12762 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12763 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12764 if (auto *Callee = Call->getDirectCallee()) { 12765 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12766 ComplainAboutNonnullParamOrCall(A); 12767 return; 12768 } 12769 } 12770 } 12771 12772 // Expect to find a single Decl. Skip anything more complicated. 12773 ValueDecl *D = nullptr; 12774 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12775 D = R->getDecl(); 12776 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12777 D = M->getMemberDecl(); 12778 } 12779 12780 // Weak Decls can be null. 12781 if (!D || D->isWeak()) 12782 return; 12783 12784 // Check for parameter decl with nonnull attribute 12785 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12786 if (getCurFunction() && 12787 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12788 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12789 ComplainAboutNonnullParamOrCall(A); 12790 return; 12791 } 12792 12793 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12794 // Skip function template not specialized yet. 12795 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12796 return; 12797 auto ParamIter = llvm::find(FD->parameters(), PV); 12798 assert(ParamIter != FD->param_end()); 12799 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12800 12801 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12802 if (!NonNull->args_size()) { 12803 ComplainAboutNonnullParamOrCall(NonNull); 12804 return; 12805 } 12806 12807 for (const ParamIdx &ArgNo : NonNull->args()) { 12808 if (ArgNo.getASTIndex() == ParamNo) { 12809 ComplainAboutNonnullParamOrCall(NonNull); 12810 return; 12811 } 12812 } 12813 } 12814 } 12815 } 12816 } 12817 12818 QualType T = D->getType(); 12819 const bool IsArray = T->isArrayType(); 12820 const bool IsFunction = T->isFunctionType(); 12821 12822 // Address of function is used to silence the function warning. 12823 if (IsAddressOf && IsFunction) { 12824 return; 12825 } 12826 12827 // Found nothing. 12828 if (!IsAddressOf && !IsFunction && !IsArray) 12829 return; 12830 12831 // Pretty print the expression for the diagnostic. 12832 std::string Str; 12833 llvm::raw_string_ostream S(Str); 12834 E->printPretty(S, nullptr, getPrintingPolicy()); 12835 12836 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12837 : diag::warn_impcast_pointer_to_bool; 12838 enum { 12839 AddressOf, 12840 FunctionPointer, 12841 ArrayPointer 12842 } DiagType; 12843 if (IsAddressOf) 12844 DiagType = AddressOf; 12845 else if (IsFunction) 12846 DiagType = FunctionPointer; 12847 else if (IsArray) 12848 DiagType = ArrayPointer; 12849 else 12850 llvm_unreachable("Could not determine diagnostic."); 12851 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12852 << Range << IsEqual; 12853 12854 if (!IsFunction) 12855 return; 12856 12857 // Suggest '&' to silence the function warning. 12858 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12859 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12860 12861 // Check to see if '()' fixit should be emitted. 12862 QualType ReturnType; 12863 UnresolvedSet<4> NonTemplateOverloads; 12864 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12865 if (ReturnType.isNull()) 12866 return; 12867 12868 if (IsCompare) { 12869 // There are two cases here. If there is null constant, the only suggest 12870 // for a pointer return type. If the null is 0, then suggest if the return 12871 // type is a pointer or an integer type. 12872 if (!ReturnType->isPointerType()) { 12873 if (NullKind == Expr::NPCK_ZeroExpression || 12874 NullKind == Expr::NPCK_ZeroLiteral) { 12875 if (!ReturnType->isIntegerType()) 12876 return; 12877 } else { 12878 return; 12879 } 12880 } 12881 } else { // !IsCompare 12882 // For function to bool, only suggest if the function pointer has bool 12883 // return type. 12884 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12885 return; 12886 } 12887 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12888 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12889 } 12890 12891 /// Diagnoses "dangerous" implicit conversions within the given 12892 /// expression (which is a full expression). Implements -Wconversion 12893 /// and -Wsign-compare. 12894 /// 12895 /// \param CC the "context" location of the implicit conversion, i.e. 12896 /// the most location of the syntactic entity requiring the implicit 12897 /// conversion 12898 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12899 // Don't diagnose in unevaluated contexts. 12900 if (isUnevaluatedContext()) 12901 return; 12902 12903 // Don't diagnose for value- or type-dependent expressions. 12904 if (E->isTypeDependent() || E->isValueDependent()) 12905 return; 12906 12907 // Check for array bounds violations in cases where the check isn't triggered 12908 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12909 // ArraySubscriptExpr is on the RHS of a variable initialization. 12910 CheckArrayAccess(E); 12911 12912 // This is not the right CC for (e.g.) a variable initialization. 12913 AnalyzeImplicitConversions(*this, E, CC); 12914 } 12915 12916 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12917 /// Input argument E is a logical expression. 12918 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12919 ::CheckBoolLikeConversion(*this, E, CC); 12920 } 12921 12922 /// Diagnose when expression is an integer constant expression and its evaluation 12923 /// results in integer overflow 12924 void Sema::CheckForIntOverflow (Expr *E) { 12925 // Use a work list to deal with nested struct initializers. 12926 SmallVector<Expr *, 2> Exprs(1, E); 12927 12928 do { 12929 Expr *OriginalE = Exprs.pop_back_val(); 12930 Expr *E = OriginalE->IgnoreParenCasts(); 12931 12932 if (isa<BinaryOperator>(E)) { 12933 E->EvaluateForOverflow(Context); 12934 continue; 12935 } 12936 12937 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12938 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12939 else if (isa<ObjCBoxedExpr>(OriginalE)) 12940 E->EvaluateForOverflow(Context); 12941 else if (auto Call = dyn_cast<CallExpr>(E)) 12942 Exprs.append(Call->arg_begin(), Call->arg_end()); 12943 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12944 Exprs.append(Message->arg_begin(), Message->arg_end()); 12945 } while (!Exprs.empty()); 12946 } 12947 12948 namespace { 12949 12950 /// Visitor for expressions which looks for unsequenced operations on the 12951 /// same object. 12952 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 12953 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 12954 12955 /// A tree of sequenced regions within an expression. Two regions are 12956 /// unsequenced if one is an ancestor or a descendent of the other. When we 12957 /// finish processing an expression with sequencing, such as a comma 12958 /// expression, we fold its tree nodes into its parent, since they are 12959 /// unsequenced with respect to nodes we will visit later. 12960 class SequenceTree { 12961 struct Value { 12962 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12963 unsigned Parent : 31; 12964 unsigned Merged : 1; 12965 }; 12966 SmallVector<Value, 8> Values; 12967 12968 public: 12969 /// A region within an expression which may be sequenced with respect 12970 /// to some other region. 12971 class Seq { 12972 friend class SequenceTree; 12973 12974 unsigned Index; 12975 12976 explicit Seq(unsigned N) : Index(N) {} 12977 12978 public: 12979 Seq() : Index(0) {} 12980 }; 12981 12982 SequenceTree() { Values.push_back(Value(0)); } 12983 Seq root() const { return Seq(0); } 12984 12985 /// Create a new sequence of operations, which is an unsequenced 12986 /// subset of \p Parent. This sequence of operations is sequenced with 12987 /// respect to other children of \p Parent. 12988 Seq allocate(Seq Parent) { 12989 Values.push_back(Value(Parent.Index)); 12990 return Seq(Values.size() - 1); 12991 } 12992 12993 /// Merge a sequence of operations into its parent. 12994 void merge(Seq S) { 12995 Values[S.Index].Merged = true; 12996 } 12997 12998 /// Determine whether two operations are unsequenced. This operation 12999 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 13000 /// should have been merged into its parent as appropriate. 13001 bool isUnsequenced(Seq Cur, Seq Old) { 13002 unsigned C = representative(Cur.Index); 13003 unsigned Target = representative(Old.Index); 13004 while (C >= Target) { 13005 if (C == Target) 13006 return true; 13007 C = Values[C].Parent; 13008 } 13009 return false; 13010 } 13011 13012 private: 13013 /// Pick a representative for a sequence. 13014 unsigned representative(unsigned K) { 13015 if (Values[K].Merged) 13016 // Perform path compression as we go. 13017 return Values[K].Parent = representative(Values[K].Parent); 13018 return K; 13019 } 13020 }; 13021 13022 /// An object for which we can track unsequenced uses. 13023 using Object = const NamedDecl *; 13024 13025 /// Different flavors of object usage which we track. We only track the 13026 /// least-sequenced usage of each kind. 13027 enum UsageKind { 13028 /// A read of an object. Multiple unsequenced reads are OK. 13029 UK_Use, 13030 13031 /// A modification of an object which is sequenced before the value 13032 /// computation of the expression, such as ++n in C++. 13033 UK_ModAsValue, 13034 13035 /// A modification of an object which is not sequenced before the value 13036 /// computation of the expression, such as n++. 13037 UK_ModAsSideEffect, 13038 13039 UK_Count = UK_ModAsSideEffect + 1 13040 }; 13041 13042 /// Bundle together a sequencing region and the expression corresponding 13043 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 13044 struct Usage { 13045 const Expr *UsageExpr; 13046 SequenceTree::Seq Seq; 13047 13048 Usage() : UsageExpr(nullptr), Seq() {} 13049 }; 13050 13051 struct UsageInfo { 13052 Usage Uses[UK_Count]; 13053 13054 /// Have we issued a diagnostic for this object already? 13055 bool Diagnosed; 13056 13057 UsageInfo() : Uses(), Diagnosed(false) {} 13058 }; 13059 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 13060 13061 Sema &SemaRef; 13062 13063 /// Sequenced regions within the expression. 13064 SequenceTree Tree; 13065 13066 /// Declaration modifications and references which we have seen. 13067 UsageInfoMap UsageMap; 13068 13069 /// The region we are currently within. 13070 SequenceTree::Seq Region; 13071 13072 /// Filled in with declarations which were modified as a side-effect 13073 /// (that is, post-increment operations). 13074 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 13075 13076 /// Expressions to check later. We defer checking these to reduce 13077 /// stack usage. 13078 SmallVectorImpl<const Expr *> &WorkList; 13079 13080 /// RAII object wrapping the visitation of a sequenced subexpression of an 13081 /// expression. At the end of this process, the side-effects of the evaluation 13082 /// become sequenced with respect to the value computation of the result, so 13083 /// we downgrade any UK_ModAsSideEffect within the evaluation to 13084 /// UK_ModAsValue. 13085 struct SequencedSubexpression { 13086 SequencedSubexpression(SequenceChecker &Self) 13087 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 13088 Self.ModAsSideEffect = &ModAsSideEffect; 13089 } 13090 13091 ~SequencedSubexpression() { 13092 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 13093 // Add a new usage with usage kind UK_ModAsValue, and then restore 13094 // the previous usage with UK_ModAsSideEffect (thus clearing it if 13095 // the previous one was empty). 13096 UsageInfo &UI = Self.UsageMap[M.first]; 13097 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 13098 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 13099 SideEffectUsage = M.second; 13100 } 13101 Self.ModAsSideEffect = OldModAsSideEffect; 13102 } 13103 13104 SequenceChecker &Self; 13105 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 13106 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 13107 }; 13108 13109 /// RAII object wrapping the visitation of a subexpression which we might 13110 /// choose to evaluate as a constant. If any subexpression is evaluated and 13111 /// found to be non-constant, this allows us to suppress the evaluation of 13112 /// the outer expression. 13113 class EvaluationTracker { 13114 public: 13115 EvaluationTracker(SequenceChecker &Self) 13116 : Self(Self), Prev(Self.EvalTracker) { 13117 Self.EvalTracker = this; 13118 } 13119 13120 ~EvaluationTracker() { 13121 Self.EvalTracker = Prev; 13122 if (Prev) 13123 Prev->EvalOK &= EvalOK; 13124 } 13125 13126 bool evaluate(const Expr *E, bool &Result) { 13127 if (!EvalOK || E->isValueDependent()) 13128 return false; 13129 EvalOK = E->EvaluateAsBooleanCondition( 13130 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 13131 return EvalOK; 13132 } 13133 13134 private: 13135 SequenceChecker &Self; 13136 EvaluationTracker *Prev; 13137 bool EvalOK = true; 13138 } *EvalTracker = nullptr; 13139 13140 /// Find the object which is produced by the specified expression, 13141 /// if any. 13142 Object getObject(const Expr *E, bool Mod) const { 13143 E = E->IgnoreParenCasts(); 13144 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13145 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 13146 return getObject(UO->getSubExpr(), Mod); 13147 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13148 if (BO->getOpcode() == BO_Comma) 13149 return getObject(BO->getRHS(), Mod); 13150 if (Mod && BO->isAssignmentOp()) 13151 return getObject(BO->getLHS(), Mod); 13152 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 13153 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 13154 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 13155 return ME->getMemberDecl(); 13156 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13157 // FIXME: If this is a reference, map through to its value. 13158 return DRE->getDecl(); 13159 return nullptr; 13160 } 13161 13162 /// Note that an object \p O was modified or used by an expression 13163 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 13164 /// the object \p O as obtained via the \p UsageMap. 13165 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 13166 // Get the old usage for the given object and usage kind. 13167 Usage &U = UI.Uses[UK]; 13168 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 13169 // If we have a modification as side effect and are in a sequenced 13170 // subexpression, save the old Usage so that we can restore it later 13171 // in SequencedSubexpression::~SequencedSubexpression. 13172 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 13173 ModAsSideEffect->push_back(std::make_pair(O, U)); 13174 // Then record the new usage with the current sequencing region. 13175 U.UsageExpr = UsageExpr; 13176 U.Seq = Region; 13177 } 13178 } 13179 13180 /// Check whether a modification or use of an object \p O in an expression 13181 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 13182 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 13183 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 13184 /// usage and false we are checking for a mod-use unsequenced usage. 13185 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 13186 UsageKind OtherKind, bool IsModMod) { 13187 if (UI.Diagnosed) 13188 return; 13189 13190 const Usage &U = UI.Uses[OtherKind]; 13191 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 13192 return; 13193 13194 const Expr *Mod = U.UsageExpr; 13195 const Expr *ModOrUse = UsageExpr; 13196 if (OtherKind == UK_Use) 13197 std::swap(Mod, ModOrUse); 13198 13199 SemaRef.DiagRuntimeBehavior( 13200 Mod->getExprLoc(), {Mod, ModOrUse}, 13201 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 13202 : diag::warn_unsequenced_mod_use) 13203 << O << SourceRange(ModOrUse->getExprLoc())); 13204 UI.Diagnosed = true; 13205 } 13206 13207 // A note on note{Pre, Post}{Use, Mod}: 13208 // 13209 // (It helps to follow the algorithm with an expression such as 13210 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 13211 // operations before C++17 and both are well-defined in C++17). 13212 // 13213 // When visiting a node which uses/modify an object we first call notePreUse 13214 // or notePreMod before visiting its sub-expression(s). At this point the 13215 // children of the current node have not yet been visited and so the eventual 13216 // uses/modifications resulting from the children of the current node have not 13217 // been recorded yet. 13218 // 13219 // We then visit the children of the current node. After that notePostUse or 13220 // notePostMod is called. These will 1) detect an unsequenced modification 13221 // as side effect (as in "k++ + k") and 2) add a new usage with the 13222 // appropriate usage kind. 13223 // 13224 // We also have to be careful that some operation sequences modification as 13225 // side effect as well (for example: || or ,). To account for this we wrap 13226 // the visitation of such a sub-expression (for example: the LHS of || or ,) 13227 // with SequencedSubexpression. SequencedSubexpression is an RAII object 13228 // which record usages which are modifications as side effect, and then 13229 // downgrade them (or more accurately restore the previous usage which was a 13230 // modification as side effect) when exiting the scope of the sequenced 13231 // subexpression. 13232 13233 void notePreUse(Object O, const Expr *UseExpr) { 13234 UsageInfo &UI = UsageMap[O]; 13235 // Uses conflict with other modifications. 13236 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13237 } 13238 13239 void notePostUse(Object O, const Expr *UseExpr) { 13240 UsageInfo &UI = UsageMap[O]; 13241 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13242 /*IsModMod=*/false); 13243 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13244 } 13245 13246 void notePreMod(Object O, const Expr *ModExpr) { 13247 UsageInfo &UI = UsageMap[O]; 13248 // Modifications conflict with other modifications and with uses. 13249 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13250 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13251 } 13252 13253 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13254 UsageInfo &UI = UsageMap[O]; 13255 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13256 /*IsModMod=*/true); 13257 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13258 } 13259 13260 public: 13261 SequenceChecker(Sema &S, const Expr *E, 13262 SmallVectorImpl<const Expr *> &WorkList) 13263 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13264 Visit(E); 13265 // Silence a -Wunused-private-field since WorkList is now unused. 13266 // TODO: Evaluate if it can be used, and if not remove it. 13267 (void)this->WorkList; 13268 } 13269 13270 void VisitStmt(const Stmt *S) { 13271 // Skip all statements which aren't expressions for now. 13272 } 13273 13274 void VisitExpr(const Expr *E) { 13275 // By default, just recurse to evaluated subexpressions. 13276 Base::VisitStmt(E); 13277 } 13278 13279 void VisitCastExpr(const CastExpr *E) { 13280 Object O = Object(); 13281 if (E->getCastKind() == CK_LValueToRValue) 13282 O = getObject(E->getSubExpr(), false); 13283 13284 if (O) 13285 notePreUse(O, E); 13286 VisitExpr(E); 13287 if (O) 13288 notePostUse(O, E); 13289 } 13290 13291 void VisitSequencedExpressions(const Expr *SequencedBefore, 13292 const Expr *SequencedAfter) { 13293 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 13294 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 13295 SequenceTree::Seq OldRegion = Region; 13296 13297 { 13298 SequencedSubexpression SeqBefore(*this); 13299 Region = BeforeRegion; 13300 Visit(SequencedBefore); 13301 } 13302 13303 Region = AfterRegion; 13304 Visit(SequencedAfter); 13305 13306 Region = OldRegion; 13307 13308 Tree.merge(BeforeRegion); 13309 Tree.merge(AfterRegion); 13310 } 13311 13312 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 13313 // C++17 [expr.sub]p1: 13314 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 13315 // expression E1 is sequenced before the expression E2. 13316 if (SemaRef.getLangOpts().CPlusPlus17) 13317 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 13318 else { 13319 Visit(ASE->getLHS()); 13320 Visit(ASE->getRHS()); 13321 } 13322 } 13323 13324 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13325 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13326 void VisitBinPtrMem(const BinaryOperator *BO) { 13327 // C++17 [expr.mptr.oper]p4: 13328 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 13329 // the expression E1 is sequenced before the expression E2. 13330 if (SemaRef.getLangOpts().CPlusPlus17) 13331 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13332 else { 13333 Visit(BO->getLHS()); 13334 Visit(BO->getRHS()); 13335 } 13336 } 13337 13338 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13339 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13340 void VisitBinShlShr(const BinaryOperator *BO) { 13341 // C++17 [expr.shift]p4: 13342 // The expression E1 is sequenced before the expression E2. 13343 if (SemaRef.getLangOpts().CPlusPlus17) 13344 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13345 else { 13346 Visit(BO->getLHS()); 13347 Visit(BO->getRHS()); 13348 } 13349 } 13350 13351 void VisitBinComma(const BinaryOperator *BO) { 13352 // C++11 [expr.comma]p1: 13353 // Every value computation and side effect associated with the left 13354 // expression is sequenced before every value computation and side 13355 // effect associated with the right expression. 13356 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13357 } 13358 13359 void VisitBinAssign(const BinaryOperator *BO) { 13360 SequenceTree::Seq RHSRegion; 13361 SequenceTree::Seq LHSRegion; 13362 if (SemaRef.getLangOpts().CPlusPlus17) { 13363 RHSRegion = Tree.allocate(Region); 13364 LHSRegion = Tree.allocate(Region); 13365 } else { 13366 RHSRegion = Region; 13367 LHSRegion = Region; 13368 } 13369 SequenceTree::Seq OldRegion = Region; 13370 13371 // C++11 [expr.ass]p1: 13372 // [...] the assignment is sequenced after the value computation 13373 // of the right and left operands, [...] 13374 // 13375 // so check it before inspecting the operands and update the 13376 // map afterwards. 13377 Object O = getObject(BO->getLHS(), /*Mod=*/true); 13378 if (O) 13379 notePreMod(O, BO); 13380 13381 if (SemaRef.getLangOpts().CPlusPlus17) { 13382 // C++17 [expr.ass]p1: 13383 // [...] The right operand is sequenced before the left operand. [...] 13384 { 13385 SequencedSubexpression SeqBefore(*this); 13386 Region = RHSRegion; 13387 Visit(BO->getRHS()); 13388 } 13389 13390 Region = LHSRegion; 13391 Visit(BO->getLHS()); 13392 13393 if (O && isa<CompoundAssignOperator>(BO)) 13394 notePostUse(O, BO); 13395 13396 } else { 13397 // C++11 does not specify any sequencing between the LHS and RHS. 13398 Region = LHSRegion; 13399 Visit(BO->getLHS()); 13400 13401 if (O && isa<CompoundAssignOperator>(BO)) 13402 notePostUse(O, BO); 13403 13404 Region = RHSRegion; 13405 Visit(BO->getRHS()); 13406 } 13407 13408 // C++11 [expr.ass]p1: 13409 // the assignment is sequenced [...] before the value computation of the 13410 // assignment expression. 13411 // C11 6.5.16/3 has no such rule. 13412 Region = OldRegion; 13413 if (O) 13414 notePostMod(O, BO, 13415 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13416 : UK_ModAsSideEffect); 13417 if (SemaRef.getLangOpts().CPlusPlus17) { 13418 Tree.merge(RHSRegion); 13419 Tree.merge(LHSRegion); 13420 } 13421 } 13422 13423 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 13424 VisitBinAssign(CAO); 13425 } 13426 13427 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13428 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13429 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 13430 Object O = getObject(UO->getSubExpr(), true); 13431 if (!O) 13432 return VisitExpr(UO); 13433 13434 notePreMod(O, UO); 13435 Visit(UO->getSubExpr()); 13436 // C++11 [expr.pre.incr]p1: 13437 // the expression ++x is equivalent to x+=1 13438 notePostMod(O, UO, 13439 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13440 : UK_ModAsSideEffect); 13441 } 13442 13443 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13444 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13445 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 13446 Object O = getObject(UO->getSubExpr(), true); 13447 if (!O) 13448 return VisitExpr(UO); 13449 13450 notePreMod(O, UO); 13451 Visit(UO->getSubExpr()); 13452 notePostMod(O, UO, UK_ModAsSideEffect); 13453 } 13454 13455 void VisitBinLOr(const BinaryOperator *BO) { 13456 // C++11 [expr.log.or]p2: 13457 // If the second expression is evaluated, every value computation and 13458 // side effect associated with the first expression is sequenced before 13459 // every value computation and side effect associated with the 13460 // second expression. 13461 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13462 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13463 SequenceTree::Seq OldRegion = Region; 13464 13465 EvaluationTracker Eval(*this); 13466 { 13467 SequencedSubexpression Sequenced(*this); 13468 Region = LHSRegion; 13469 Visit(BO->getLHS()); 13470 } 13471 13472 // C++11 [expr.log.or]p1: 13473 // [...] the second operand is not evaluated if the first operand 13474 // evaluates to true. 13475 bool EvalResult = false; 13476 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13477 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 13478 if (ShouldVisitRHS) { 13479 Region = RHSRegion; 13480 Visit(BO->getRHS()); 13481 } 13482 13483 Region = OldRegion; 13484 Tree.merge(LHSRegion); 13485 Tree.merge(RHSRegion); 13486 } 13487 13488 void VisitBinLAnd(const BinaryOperator *BO) { 13489 // C++11 [expr.log.and]p2: 13490 // If the second expression is evaluated, every value computation and 13491 // side effect associated with the first expression is sequenced before 13492 // every value computation and side effect associated with the 13493 // second expression. 13494 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13495 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13496 SequenceTree::Seq OldRegion = Region; 13497 13498 EvaluationTracker Eval(*this); 13499 { 13500 SequencedSubexpression Sequenced(*this); 13501 Region = LHSRegion; 13502 Visit(BO->getLHS()); 13503 } 13504 13505 // C++11 [expr.log.and]p1: 13506 // [...] the second operand is not evaluated if the first operand is false. 13507 bool EvalResult = false; 13508 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13509 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 13510 if (ShouldVisitRHS) { 13511 Region = RHSRegion; 13512 Visit(BO->getRHS()); 13513 } 13514 13515 Region = OldRegion; 13516 Tree.merge(LHSRegion); 13517 Tree.merge(RHSRegion); 13518 } 13519 13520 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 13521 // C++11 [expr.cond]p1: 13522 // [...] Every value computation and side effect associated with the first 13523 // expression is sequenced before every value computation and side effect 13524 // associated with the second or third expression. 13525 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 13526 13527 // No sequencing is specified between the true and false expression. 13528 // However since exactly one of both is going to be evaluated we can 13529 // consider them to be sequenced. This is needed to avoid warning on 13530 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 13531 // both the true and false expressions because we can't evaluate x. 13532 // This will still allow us to detect an expression like (pre C++17) 13533 // "(x ? y += 1 : y += 2) = y". 13534 // 13535 // We don't wrap the visitation of the true and false expression with 13536 // SequencedSubexpression because we don't want to downgrade modifications 13537 // as side effect in the true and false expressions after the visition 13538 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 13539 // not warn between the two "y++", but we should warn between the "y++" 13540 // and the "y". 13541 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 13542 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 13543 SequenceTree::Seq OldRegion = Region; 13544 13545 EvaluationTracker Eval(*this); 13546 { 13547 SequencedSubexpression Sequenced(*this); 13548 Region = ConditionRegion; 13549 Visit(CO->getCond()); 13550 } 13551 13552 // C++11 [expr.cond]p1: 13553 // [...] The first expression is contextually converted to bool (Clause 4). 13554 // It is evaluated and if it is true, the result of the conditional 13555 // expression is the value of the second expression, otherwise that of the 13556 // third expression. Only one of the second and third expressions is 13557 // evaluated. [...] 13558 bool EvalResult = false; 13559 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 13560 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 13561 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 13562 if (ShouldVisitTrueExpr) { 13563 Region = TrueRegion; 13564 Visit(CO->getTrueExpr()); 13565 } 13566 if (ShouldVisitFalseExpr) { 13567 Region = FalseRegion; 13568 Visit(CO->getFalseExpr()); 13569 } 13570 13571 Region = OldRegion; 13572 Tree.merge(ConditionRegion); 13573 Tree.merge(TrueRegion); 13574 Tree.merge(FalseRegion); 13575 } 13576 13577 void VisitCallExpr(const CallExpr *CE) { 13578 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 13579 13580 if (CE->isUnevaluatedBuiltinCall(Context)) 13581 return; 13582 13583 // C++11 [intro.execution]p15: 13584 // When calling a function [...], every value computation and side effect 13585 // associated with any argument expression, or with the postfix expression 13586 // designating the called function, is sequenced before execution of every 13587 // expression or statement in the body of the function [and thus before 13588 // the value computation of its result]. 13589 SequencedSubexpression Sequenced(*this); 13590 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 13591 // C++17 [expr.call]p5 13592 // The postfix-expression is sequenced before each expression in the 13593 // expression-list and any default argument. [...] 13594 SequenceTree::Seq CalleeRegion; 13595 SequenceTree::Seq OtherRegion; 13596 if (SemaRef.getLangOpts().CPlusPlus17) { 13597 CalleeRegion = Tree.allocate(Region); 13598 OtherRegion = Tree.allocate(Region); 13599 } else { 13600 CalleeRegion = Region; 13601 OtherRegion = Region; 13602 } 13603 SequenceTree::Seq OldRegion = Region; 13604 13605 // Visit the callee expression first. 13606 Region = CalleeRegion; 13607 if (SemaRef.getLangOpts().CPlusPlus17) { 13608 SequencedSubexpression Sequenced(*this); 13609 Visit(CE->getCallee()); 13610 } else { 13611 Visit(CE->getCallee()); 13612 } 13613 13614 // Then visit the argument expressions. 13615 Region = OtherRegion; 13616 for (const Expr *Argument : CE->arguments()) 13617 Visit(Argument); 13618 13619 Region = OldRegion; 13620 if (SemaRef.getLangOpts().CPlusPlus17) { 13621 Tree.merge(CalleeRegion); 13622 Tree.merge(OtherRegion); 13623 } 13624 }); 13625 } 13626 13627 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 13628 // C++17 [over.match.oper]p2: 13629 // [...] the operator notation is first transformed to the equivalent 13630 // function-call notation as summarized in Table 12 (where @ denotes one 13631 // of the operators covered in the specified subclause). However, the 13632 // operands are sequenced in the order prescribed for the built-in 13633 // operator (Clause 8). 13634 // 13635 // From the above only overloaded binary operators and overloaded call 13636 // operators have sequencing rules in C++17 that we need to handle 13637 // separately. 13638 if (!SemaRef.getLangOpts().CPlusPlus17 || 13639 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 13640 return VisitCallExpr(CXXOCE); 13641 13642 enum { 13643 NoSequencing, 13644 LHSBeforeRHS, 13645 RHSBeforeLHS, 13646 LHSBeforeRest 13647 } SequencingKind; 13648 switch (CXXOCE->getOperator()) { 13649 case OO_Equal: 13650 case OO_PlusEqual: 13651 case OO_MinusEqual: 13652 case OO_StarEqual: 13653 case OO_SlashEqual: 13654 case OO_PercentEqual: 13655 case OO_CaretEqual: 13656 case OO_AmpEqual: 13657 case OO_PipeEqual: 13658 case OO_LessLessEqual: 13659 case OO_GreaterGreaterEqual: 13660 SequencingKind = RHSBeforeLHS; 13661 break; 13662 13663 case OO_LessLess: 13664 case OO_GreaterGreater: 13665 case OO_AmpAmp: 13666 case OO_PipePipe: 13667 case OO_Comma: 13668 case OO_ArrowStar: 13669 case OO_Subscript: 13670 SequencingKind = LHSBeforeRHS; 13671 break; 13672 13673 case OO_Call: 13674 SequencingKind = LHSBeforeRest; 13675 break; 13676 13677 default: 13678 SequencingKind = NoSequencing; 13679 break; 13680 } 13681 13682 if (SequencingKind == NoSequencing) 13683 return VisitCallExpr(CXXOCE); 13684 13685 // This is a call, so all subexpressions are sequenced before the result. 13686 SequencedSubexpression Sequenced(*this); 13687 13688 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 13689 assert(SemaRef.getLangOpts().CPlusPlus17 && 13690 "Should only get there with C++17 and above!"); 13691 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 13692 "Should only get there with an overloaded binary operator" 13693 " or an overloaded call operator!"); 13694 13695 if (SequencingKind == LHSBeforeRest) { 13696 assert(CXXOCE->getOperator() == OO_Call && 13697 "We should only have an overloaded call operator here!"); 13698 13699 // This is very similar to VisitCallExpr, except that we only have the 13700 // C++17 case. The postfix-expression is the first argument of the 13701 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 13702 // are in the following arguments. 13703 // 13704 // Note that we intentionally do not visit the callee expression since 13705 // it is just a decayed reference to a function. 13706 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 13707 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 13708 SequenceTree::Seq OldRegion = Region; 13709 13710 assert(CXXOCE->getNumArgs() >= 1 && 13711 "An overloaded call operator must have at least one argument" 13712 " for the postfix-expression!"); 13713 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 13714 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 13715 CXXOCE->getNumArgs() - 1); 13716 13717 // Visit the postfix-expression first. 13718 { 13719 Region = PostfixExprRegion; 13720 SequencedSubexpression Sequenced(*this); 13721 Visit(PostfixExpr); 13722 } 13723 13724 // Then visit the argument expressions. 13725 Region = ArgsRegion; 13726 for (const Expr *Arg : Args) 13727 Visit(Arg); 13728 13729 Region = OldRegion; 13730 Tree.merge(PostfixExprRegion); 13731 Tree.merge(ArgsRegion); 13732 } else { 13733 assert(CXXOCE->getNumArgs() == 2 && 13734 "Should only have two arguments here!"); 13735 assert((SequencingKind == LHSBeforeRHS || 13736 SequencingKind == RHSBeforeLHS) && 13737 "Unexpected sequencing kind!"); 13738 13739 // We do not visit the callee expression since it is just a decayed 13740 // reference to a function. 13741 const Expr *E1 = CXXOCE->getArg(0); 13742 const Expr *E2 = CXXOCE->getArg(1); 13743 if (SequencingKind == RHSBeforeLHS) 13744 std::swap(E1, E2); 13745 13746 return VisitSequencedExpressions(E1, E2); 13747 } 13748 }); 13749 } 13750 13751 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 13752 // This is a call, so all subexpressions are sequenced before the result. 13753 SequencedSubexpression Sequenced(*this); 13754 13755 if (!CCE->isListInitialization()) 13756 return VisitExpr(CCE); 13757 13758 // In C++11, list initializations are sequenced. 13759 SmallVector<SequenceTree::Seq, 32> Elts; 13760 SequenceTree::Seq Parent = Region; 13761 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 13762 E = CCE->arg_end(); 13763 I != E; ++I) { 13764 Region = Tree.allocate(Parent); 13765 Elts.push_back(Region); 13766 Visit(*I); 13767 } 13768 13769 // Forget that the initializers are sequenced. 13770 Region = Parent; 13771 for (unsigned I = 0; I < Elts.size(); ++I) 13772 Tree.merge(Elts[I]); 13773 } 13774 13775 void VisitInitListExpr(const InitListExpr *ILE) { 13776 if (!SemaRef.getLangOpts().CPlusPlus11) 13777 return VisitExpr(ILE); 13778 13779 // In C++11, list initializations are sequenced. 13780 SmallVector<SequenceTree::Seq, 32> Elts; 13781 SequenceTree::Seq Parent = Region; 13782 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 13783 const Expr *E = ILE->getInit(I); 13784 if (!E) 13785 continue; 13786 Region = Tree.allocate(Parent); 13787 Elts.push_back(Region); 13788 Visit(E); 13789 } 13790 13791 // Forget that the initializers are sequenced. 13792 Region = Parent; 13793 for (unsigned I = 0; I < Elts.size(); ++I) 13794 Tree.merge(Elts[I]); 13795 } 13796 }; 13797 13798 } // namespace 13799 13800 void Sema::CheckUnsequencedOperations(const Expr *E) { 13801 SmallVector<const Expr *, 8> WorkList; 13802 WorkList.push_back(E); 13803 while (!WorkList.empty()) { 13804 const Expr *Item = WorkList.pop_back_val(); 13805 SequenceChecker(*this, Item, WorkList); 13806 } 13807 } 13808 13809 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 13810 bool IsConstexpr) { 13811 llvm::SaveAndRestore<bool> ConstantContext( 13812 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 13813 CheckImplicitConversions(E, CheckLoc); 13814 if (!E->isInstantiationDependent()) 13815 CheckUnsequencedOperations(E); 13816 if (!IsConstexpr && !E->isValueDependent()) 13817 CheckForIntOverflow(E); 13818 DiagnoseMisalignedMembers(); 13819 } 13820 13821 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13822 FieldDecl *BitField, 13823 Expr *Init) { 13824 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13825 } 13826 13827 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13828 SourceLocation Loc) { 13829 if (!PType->isVariablyModifiedType()) 13830 return; 13831 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13832 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13833 return; 13834 } 13835 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13836 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13837 return; 13838 } 13839 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13840 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13841 return; 13842 } 13843 13844 const ArrayType *AT = S.Context.getAsArrayType(PType); 13845 if (!AT) 13846 return; 13847 13848 if (AT->getSizeModifier() != ArrayType::Star) { 13849 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13850 return; 13851 } 13852 13853 S.Diag(Loc, diag::err_array_star_in_function_definition); 13854 } 13855 13856 /// CheckParmsForFunctionDef - Check that the parameters of the given 13857 /// function are appropriate for the definition of a function. This 13858 /// takes care of any checks that cannot be performed on the 13859 /// declaration itself, e.g., that the types of each of the function 13860 /// parameters are complete. 13861 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13862 bool CheckParameterNames) { 13863 bool HasInvalidParm = false; 13864 for (ParmVarDecl *Param : Parameters) { 13865 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13866 // function declarator that is part of a function definition of 13867 // that function shall not have incomplete type. 13868 // 13869 // This is also C++ [dcl.fct]p6. 13870 if (!Param->isInvalidDecl() && 13871 RequireCompleteType(Param->getLocation(), Param->getType(), 13872 diag::err_typecheck_decl_incomplete_type)) { 13873 Param->setInvalidDecl(); 13874 HasInvalidParm = true; 13875 } 13876 13877 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13878 // declaration of each parameter shall include an identifier. 13879 if (CheckParameterNames && Param->getIdentifier() == nullptr && 13880 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 13881 // Diagnose this as an extension in C17 and earlier. 13882 if (!getLangOpts().C2x) 13883 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 13884 } 13885 13886 // C99 6.7.5.3p12: 13887 // If the function declarator is not part of a definition of that 13888 // function, parameters may have incomplete type and may use the [*] 13889 // notation in their sequences of declarator specifiers to specify 13890 // variable length array types. 13891 QualType PType = Param->getOriginalType(); 13892 // FIXME: This diagnostic should point the '[*]' if source-location 13893 // information is added for it. 13894 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13895 13896 // If the parameter is a c++ class type and it has to be destructed in the 13897 // callee function, declare the destructor so that it can be called by the 13898 // callee function. Do not perform any direct access check on the dtor here. 13899 if (!Param->isInvalidDecl()) { 13900 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13901 if (!ClassDecl->isInvalidDecl() && 13902 !ClassDecl->hasIrrelevantDestructor() && 13903 !ClassDecl->isDependentContext() && 13904 ClassDecl->isParamDestroyedInCallee()) { 13905 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13906 MarkFunctionReferenced(Param->getLocation(), Destructor); 13907 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13908 } 13909 } 13910 } 13911 13912 // Parameters with the pass_object_size attribute only need to be marked 13913 // constant at function definitions. Because we lack information about 13914 // whether we're on a declaration or definition when we're instantiating the 13915 // attribute, we need to check for constness here. 13916 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13917 if (!Param->getType().isConstQualified()) 13918 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13919 << Attr->getSpelling() << 1; 13920 13921 // Check for parameter names shadowing fields from the class. 13922 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13923 // The owning context for the parameter should be the function, but we 13924 // want to see if this function's declaration context is a record. 13925 DeclContext *DC = Param->getDeclContext(); 13926 if (DC && DC->isFunctionOrMethod()) { 13927 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13928 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13929 RD, /*DeclIsField*/ false); 13930 } 13931 } 13932 } 13933 13934 return HasInvalidParm; 13935 } 13936 13937 Optional<std::pair<CharUnits, CharUnits>> 13938 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 13939 13940 /// Compute the alignment and offset of the base class object given the 13941 /// derived-to-base cast expression and the alignment and offset of the derived 13942 /// class object. 13943 static std::pair<CharUnits, CharUnits> 13944 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 13945 CharUnits BaseAlignment, CharUnits Offset, 13946 ASTContext &Ctx) { 13947 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 13948 ++PathI) { 13949 const CXXBaseSpecifier *Base = *PathI; 13950 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 13951 if (Base->isVirtual()) { 13952 // The complete object may have a lower alignment than the non-virtual 13953 // alignment of the base, in which case the base may be misaligned. Choose 13954 // the smaller of the non-virtual alignment and BaseAlignment, which is a 13955 // conservative lower bound of the complete object alignment. 13956 CharUnits NonVirtualAlignment = 13957 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 13958 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 13959 Offset = CharUnits::Zero(); 13960 } else { 13961 const ASTRecordLayout &RL = 13962 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 13963 Offset += RL.getBaseClassOffset(BaseDecl); 13964 } 13965 DerivedType = Base->getType(); 13966 } 13967 13968 return std::make_pair(BaseAlignment, Offset); 13969 } 13970 13971 /// Compute the alignment and offset of a binary additive operator. 13972 static Optional<std::pair<CharUnits, CharUnits>> 13973 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 13974 bool IsSub, ASTContext &Ctx) { 13975 QualType PointeeType = PtrE->getType()->getPointeeType(); 13976 13977 if (!PointeeType->isConstantSizeType()) 13978 return llvm::None; 13979 13980 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 13981 13982 if (!P) 13983 return llvm::None; 13984 13985 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 13986 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 13987 CharUnits Offset = EltSize * IdxRes->getExtValue(); 13988 if (IsSub) 13989 Offset = -Offset; 13990 return std::make_pair(P->first, P->second + Offset); 13991 } 13992 13993 // If the integer expression isn't a constant expression, compute the lower 13994 // bound of the alignment using the alignment and offset of the pointer 13995 // expression and the element size. 13996 return std::make_pair( 13997 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 13998 CharUnits::Zero()); 13999 } 14000 14001 /// This helper function takes an lvalue expression and returns the alignment of 14002 /// a VarDecl and a constant offset from the VarDecl. 14003 Optional<std::pair<CharUnits, CharUnits>> 14004 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 14005 E = E->IgnoreParens(); 14006 switch (E->getStmtClass()) { 14007 default: 14008 break; 14009 case Stmt::CStyleCastExprClass: 14010 case Stmt::CXXStaticCastExprClass: 14011 case Stmt::ImplicitCastExprClass: { 14012 auto *CE = cast<CastExpr>(E); 14013 const Expr *From = CE->getSubExpr(); 14014 switch (CE->getCastKind()) { 14015 default: 14016 break; 14017 case CK_NoOp: 14018 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14019 case CK_UncheckedDerivedToBase: 14020 case CK_DerivedToBase: { 14021 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14022 if (!P) 14023 break; 14024 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 14025 P->second, Ctx); 14026 } 14027 } 14028 break; 14029 } 14030 case Stmt::ArraySubscriptExprClass: { 14031 auto *ASE = cast<ArraySubscriptExpr>(E); 14032 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 14033 false, Ctx); 14034 } 14035 case Stmt::DeclRefExprClass: { 14036 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 14037 // FIXME: If VD is captured by copy or is an escaping __block variable, 14038 // use the alignment of VD's type. 14039 if (!VD->getType()->isReferenceType()) 14040 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 14041 if (VD->hasInit()) 14042 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 14043 } 14044 break; 14045 } 14046 case Stmt::MemberExprClass: { 14047 auto *ME = cast<MemberExpr>(E); 14048 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 14049 if (!FD || FD->getType()->isReferenceType()) 14050 break; 14051 Optional<std::pair<CharUnits, CharUnits>> P; 14052 if (ME->isArrow()) 14053 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 14054 else 14055 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 14056 if (!P) 14057 break; 14058 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 14059 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 14060 return std::make_pair(P->first, 14061 P->second + CharUnits::fromQuantity(Offset)); 14062 } 14063 case Stmt::UnaryOperatorClass: { 14064 auto *UO = cast<UnaryOperator>(E); 14065 switch (UO->getOpcode()) { 14066 default: 14067 break; 14068 case UO_Deref: 14069 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 14070 } 14071 break; 14072 } 14073 case Stmt::BinaryOperatorClass: { 14074 auto *BO = cast<BinaryOperator>(E); 14075 auto Opcode = BO->getOpcode(); 14076 switch (Opcode) { 14077 default: 14078 break; 14079 case BO_Comma: 14080 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 14081 } 14082 break; 14083 } 14084 } 14085 return llvm::None; 14086 } 14087 14088 /// This helper function takes a pointer expression and returns the alignment of 14089 /// a VarDecl and a constant offset from the VarDecl. 14090 Optional<std::pair<CharUnits, CharUnits>> 14091 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 14092 E = E->IgnoreParens(); 14093 switch (E->getStmtClass()) { 14094 default: 14095 break; 14096 case Stmt::CStyleCastExprClass: 14097 case Stmt::CXXStaticCastExprClass: 14098 case Stmt::ImplicitCastExprClass: { 14099 auto *CE = cast<CastExpr>(E); 14100 const Expr *From = CE->getSubExpr(); 14101 switch (CE->getCastKind()) { 14102 default: 14103 break; 14104 case CK_NoOp: 14105 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14106 case CK_ArrayToPointerDecay: 14107 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14108 case CK_UncheckedDerivedToBase: 14109 case CK_DerivedToBase: { 14110 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14111 if (!P) 14112 break; 14113 return getDerivedToBaseAlignmentAndOffset( 14114 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 14115 } 14116 } 14117 break; 14118 } 14119 case Stmt::CXXThisExprClass: { 14120 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 14121 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 14122 return std::make_pair(Alignment, CharUnits::Zero()); 14123 } 14124 case Stmt::UnaryOperatorClass: { 14125 auto *UO = cast<UnaryOperator>(E); 14126 if (UO->getOpcode() == UO_AddrOf) 14127 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 14128 break; 14129 } 14130 case Stmt::BinaryOperatorClass: { 14131 auto *BO = cast<BinaryOperator>(E); 14132 auto Opcode = BO->getOpcode(); 14133 switch (Opcode) { 14134 default: 14135 break; 14136 case BO_Add: 14137 case BO_Sub: { 14138 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 14139 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 14140 std::swap(LHS, RHS); 14141 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 14142 Ctx); 14143 } 14144 case BO_Comma: 14145 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 14146 } 14147 break; 14148 } 14149 } 14150 return llvm::None; 14151 } 14152 14153 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 14154 // See if we can compute the alignment of a VarDecl and an offset from it. 14155 Optional<std::pair<CharUnits, CharUnits>> P = 14156 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 14157 14158 if (P) 14159 return P->first.alignmentAtOffset(P->second); 14160 14161 // If that failed, return the type's alignment. 14162 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 14163 } 14164 14165 /// CheckCastAlign - Implements -Wcast-align, which warns when a 14166 /// pointer cast increases the alignment requirements. 14167 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 14168 // This is actually a lot of work to potentially be doing on every 14169 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 14170 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 14171 return; 14172 14173 // Ignore dependent types. 14174 if (T->isDependentType() || Op->getType()->isDependentType()) 14175 return; 14176 14177 // Require that the destination be a pointer type. 14178 const PointerType *DestPtr = T->getAs<PointerType>(); 14179 if (!DestPtr) return; 14180 14181 // If the destination has alignment 1, we're done. 14182 QualType DestPointee = DestPtr->getPointeeType(); 14183 if (DestPointee->isIncompleteType()) return; 14184 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 14185 if (DestAlign.isOne()) return; 14186 14187 // Require that the source be a pointer type. 14188 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 14189 if (!SrcPtr) return; 14190 QualType SrcPointee = SrcPtr->getPointeeType(); 14191 14192 // Explicitly allow casts from cv void*. We already implicitly 14193 // allowed casts to cv void*, since they have alignment 1. 14194 // Also allow casts involving incomplete types, which implicitly 14195 // includes 'void'. 14196 if (SrcPointee->isIncompleteType()) return; 14197 14198 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 14199 14200 if (SrcAlign >= DestAlign) return; 14201 14202 Diag(TRange.getBegin(), diag::warn_cast_align) 14203 << Op->getType() << T 14204 << static_cast<unsigned>(SrcAlign.getQuantity()) 14205 << static_cast<unsigned>(DestAlign.getQuantity()) 14206 << TRange << Op->getSourceRange(); 14207 } 14208 14209 /// Check whether this array fits the idiom of a size-one tail padded 14210 /// array member of a struct. 14211 /// 14212 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 14213 /// commonly used to emulate flexible arrays in C89 code. 14214 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 14215 const NamedDecl *ND) { 14216 if (Size != 1 || !ND) return false; 14217 14218 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 14219 if (!FD) return false; 14220 14221 // Don't consider sizes resulting from macro expansions or template argument 14222 // substitution to form C89 tail-padded arrays. 14223 14224 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 14225 while (TInfo) { 14226 TypeLoc TL = TInfo->getTypeLoc(); 14227 // Look through typedefs. 14228 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 14229 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 14230 TInfo = TDL->getTypeSourceInfo(); 14231 continue; 14232 } 14233 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14234 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14235 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14236 return false; 14237 } 14238 break; 14239 } 14240 14241 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14242 if (!RD) return false; 14243 if (RD->isUnion()) return false; 14244 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14245 if (!CRD->isStandardLayout()) return false; 14246 } 14247 14248 // See if this is the last field decl in the record. 14249 const Decl *D = FD; 14250 while ((D = D->getNextDeclInContext())) 14251 if (isa<FieldDecl>(D)) 14252 return false; 14253 return true; 14254 } 14255 14256 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14257 const ArraySubscriptExpr *ASE, 14258 bool AllowOnePastEnd, bool IndexNegated) { 14259 // Already diagnosed by the constant evaluator. 14260 if (isConstantEvaluated()) 14261 return; 14262 14263 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14264 if (IndexExpr->isValueDependent()) 14265 return; 14266 14267 const Type *EffectiveType = 14268 BaseExpr->getType()->getPointeeOrArrayElementType(); 14269 BaseExpr = BaseExpr->IgnoreParenCasts(); 14270 const ConstantArrayType *ArrayTy = 14271 Context.getAsConstantArrayType(BaseExpr->getType()); 14272 14273 if (!ArrayTy) 14274 return; 14275 14276 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 14277 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 14278 return; 14279 14280 Expr::EvalResult Result; 14281 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 14282 return; 14283 14284 llvm::APSInt index = Result.Val.getInt(); 14285 if (IndexNegated) 14286 index = -index; 14287 14288 const NamedDecl *ND = nullptr; 14289 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14290 ND = DRE->getDecl(); 14291 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14292 ND = ME->getMemberDecl(); 14293 14294 if (index.isUnsigned() || !index.isNegative()) { 14295 // It is possible that the type of the base expression after 14296 // IgnoreParenCasts is incomplete, even though the type of the base 14297 // expression before IgnoreParenCasts is complete (see PR39746 for an 14298 // example). In this case we have no information about whether the array 14299 // access exceeds the array bounds. However we can still diagnose an array 14300 // access which precedes the array bounds. 14301 if (BaseType->isIncompleteType()) 14302 return; 14303 14304 llvm::APInt size = ArrayTy->getSize(); 14305 if (!size.isStrictlyPositive()) 14306 return; 14307 14308 if (BaseType != EffectiveType) { 14309 // Make sure we're comparing apples to apples when comparing index to size 14310 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 14311 uint64_t array_typesize = Context.getTypeSize(BaseType); 14312 // Handle ptrarith_typesize being zero, such as when casting to void* 14313 if (!ptrarith_typesize) ptrarith_typesize = 1; 14314 if (ptrarith_typesize != array_typesize) { 14315 // There's a cast to a different size type involved 14316 uint64_t ratio = array_typesize / ptrarith_typesize; 14317 // TODO: Be smarter about handling cases where array_typesize is not a 14318 // multiple of ptrarith_typesize 14319 if (ptrarith_typesize * ratio == array_typesize) 14320 size *= llvm::APInt(size.getBitWidth(), ratio); 14321 } 14322 } 14323 14324 if (size.getBitWidth() > index.getBitWidth()) 14325 index = index.zext(size.getBitWidth()); 14326 else if (size.getBitWidth() < index.getBitWidth()) 14327 size = size.zext(index.getBitWidth()); 14328 14329 // For array subscripting the index must be less than size, but for pointer 14330 // arithmetic also allow the index (offset) to be equal to size since 14331 // computing the next address after the end of the array is legal and 14332 // commonly done e.g. in C++ iterators and range-based for loops. 14333 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 14334 return; 14335 14336 // Also don't warn for arrays of size 1 which are members of some 14337 // structure. These are often used to approximate flexible arrays in C89 14338 // code. 14339 if (IsTailPaddedMemberArray(*this, size, ND)) 14340 return; 14341 14342 // Suppress the warning if the subscript expression (as identified by the 14343 // ']' location) and the index expression are both from macro expansions 14344 // within a system header. 14345 if (ASE) { 14346 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 14347 ASE->getRBracketLoc()); 14348 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 14349 SourceLocation IndexLoc = 14350 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 14351 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 14352 return; 14353 } 14354 } 14355 14356 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 14357 if (ASE) 14358 DiagID = diag::warn_array_index_exceeds_bounds; 14359 14360 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14361 PDiag(DiagID) << index.toString(10, true) 14362 << size.toString(10, true) 14363 << (unsigned)size.getLimitedValue(~0U) 14364 << IndexExpr->getSourceRange()); 14365 } else { 14366 unsigned DiagID = diag::warn_array_index_precedes_bounds; 14367 if (!ASE) { 14368 DiagID = diag::warn_ptr_arith_precedes_bounds; 14369 if (index.isNegative()) index = -index; 14370 } 14371 14372 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14373 PDiag(DiagID) << index.toString(10, true) 14374 << IndexExpr->getSourceRange()); 14375 } 14376 14377 if (!ND) { 14378 // Try harder to find a NamedDecl to point at in the note. 14379 while (const ArraySubscriptExpr *ASE = 14380 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 14381 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 14382 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14383 ND = DRE->getDecl(); 14384 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14385 ND = ME->getMemberDecl(); 14386 } 14387 14388 if (ND) 14389 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 14390 PDiag(diag::note_array_declared_here) << ND); 14391 } 14392 14393 void Sema::CheckArrayAccess(const Expr *expr) { 14394 int AllowOnePastEnd = 0; 14395 while (expr) { 14396 expr = expr->IgnoreParenImpCasts(); 14397 switch (expr->getStmtClass()) { 14398 case Stmt::ArraySubscriptExprClass: { 14399 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 14400 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 14401 AllowOnePastEnd > 0); 14402 expr = ASE->getBase(); 14403 break; 14404 } 14405 case Stmt::MemberExprClass: { 14406 expr = cast<MemberExpr>(expr)->getBase(); 14407 break; 14408 } 14409 case Stmt::OMPArraySectionExprClass: { 14410 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 14411 if (ASE->getLowerBound()) 14412 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 14413 /*ASE=*/nullptr, AllowOnePastEnd > 0); 14414 return; 14415 } 14416 case Stmt::UnaryOperatorClass: { 14417 // Only unwrap the * and & unary operators 14418 const UnaryOperator *UO = cast<UnaryOperator>(expr); 14419 expr = UO->getSubExpr(); 14420 switch (UO->getOpcode()) { 14421 case UO_AddrOf: 14422 AllowOnePastEnd++; 14423 break; 14424 case UO_Deref: 14425 AllowOnePastEnd--; 14426 break; 14427 default: 14428 return; 14429 } 14430 break; 14431 } 14432 case Stmt::ConditionalOperatorClass: { 14433 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 14434 if (const Expr *lhs = cond->getLHS()) 14435 CheckArrayAccess(lhs); 14436 if (const Expr *rhs = cond->getRHS()) 14437 CheckArrayAccess(rhs); 14438 return; 14439 } 14440 case Stmt::CXXOperatorCallExprClass: { 14441 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 14442 for (const auto *Arg : OCE->arguments()) 14443 CheckArrayAccess(Arg); 14444 return; 14445 } 14446 default: 14447 return; 14448 } 14449 } 14450 } 14451 14452 //===--- CHECK: Objective-C retain cycles ----------------------------------// 14453 14454 namespace { 14455 14456 struct RetainCycleOwner { 14457 VarDecl *Variable = nullptr; 14458 SourceRange Range; 14459 SourceLocation Loc; 14460 bool Indirect = false; 14461 14462 RetainCycleOwner() = default; 14463 14464 void setLocsFrom(Expr *e) { 14465 Loc = e->getExprLoc(); 14466 Range = e->getSourceRange(); 14467 } 14468 }; 14469 14470 } // namespace 14471 14472 /// Consider whether capturing the given variable can possibly lead to 14473 /// a retain cycle. 14474 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 14475 // In ARC, it's captured strongly iff the variable has __strong 14476 // lifetime. In MRR, it's captured strongly if the variable is 14477 // __block and has an appropriate type. 14478 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14479 return false; 14480 14481 owner.Variable = var; 14482 if (ref) 14483 owner.setLocsFrom(ref); 14484 return true; 14485 } 14486 14487 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 14488 while (true) { 14489 e = e->IgnoreParens(); 14490 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 14491 switch (cast->getCastKind()) { 14492 case CK_BitCast: 14493 case CK_LValueBitCast: 14494 case CK_LValueToRValue: 14495 case CK_ARCReclaimReturnedObject: 14496 e = cast->getSubExpr(); 14497 continue; 14498 14499 default: 14500 return false; 14501 } 14502 } 14503 14504 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 14505 ObjCIvarDecl *ivar = ref->getDecl(); 14506 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14507 return false; 14508 14509 // Try to find a retain cycle in the base. 14510 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 14511 return false; 14512 14513 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 14514 owner.Indirect = true; 14515 return true; 14516 } 14517 14518 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 14519 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 14520 if (!var) return false; 14521 return considerVariable(var, ref, owner); 14522 } 14523 14524 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 14525 if (member->isArrow()) return false; 14526 14527 // Don't count this as an indirect ownership. 14528 e = member->getBase(); 14529 continue; 14530 } 14531 14532 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 14533 // Only pay attention to pseudo-objects on property references. 14534 ObjCPropertyRefExpr *pre 14535 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 14536 ->IgnoreParens()); 14537 if (!pre) return false; 14538 if (pre->isImplicitProperty()) return false; 14539 ObjCPropertyDecl *property = pre->getExplicitProperty(); 14540 if (!property->isRetaining() && 14541 !(property->getPropertyIvarDecl() && 14542 property->getPropertyIvarDecl()->getType() 14543 .getObjCLifetime() == Qualifiers::OCL_Strong)) 14544 return false; 14545 14546 owner.Indirect = true; 14547 if (pre->isSuperReceiver()) { 14548 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 14549 if (!owner.Variable) 14550 return false; 14551 owner.Loc = pre->getLocation(); 14552 owner.Range = pre->getSourceRange(); 14553 return true; 14554 } 14555 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 14556 ->getSourceExpr()); 14557 continue; 14558 } 14559 14560 // Array ivars? 14561 14562 return false; 14563 } 14564 } 14565 14566 namespace { 14567 14568 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 14569 ASTContext &Context; 14570 VarDecl *Variable; 14571 Expr *Capturer = nullptr; 14572 bool VarWillBeReased = false; 14573 14574 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 14575 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 14576 Context(Context), Variable(variable) {} 14577 14578 void VisitDeclRefExpr(DeclRefExpr *ref) { 14579 if (ref->getDecl() == Variable && !Capturer) 14580 Capturer = ref; 14581 } 14582 14583 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 14584 if (Capturer) return; 14585 Visit(ref->getBase()); 14586 if (Capturer && ref->isFreeIvar()) 14587 Capturer = ref; 14588 } 14589 14590 void VisitBlockExpr(BlockExpr *block) { 14591 // Look inside nested blocks 14592 if (block->getBlockDecl()->capturesVariable(Variable)) 14593 Visit(block->getBlockDecl()->getBody()); 14594 } 14595 14596 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 14597 if (Capturer) return; 14598 if (OVE->getSourceExpr()) 14599 Visit(OVE->getSourceExpr()); 14600 } 14601 14602 void VisitBinaryOperator(BinaryOperator *BinOp) { 14603 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 14604 return; 14605 Expr *LHS = BinOp->getLHS(); 14606 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 14607 if (DRE->getDecl() != Variable) 14608 return; 14609 if (Expr *RHS = BinOp->getRHS()) { 14610 RHS = RHS->IgnoreParenCasts(); 14611 Optional<llvm::APSInt> Value; 14612 VarWillBeReased = 14613 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 14614 *Value == 0); 14615 } 14616 } 14617 } 14618 }; 14619 14620 } // namespace 14621 14622 /// Check whether the given argument is a block which captures a 14623 /// variable. 14624 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 14625 assert(owner.Variable && owner.Loc.isValid()); 14626 14627 e = e->IgnoreParenCasts(); 14628 14629 // Look through [^{...} copy] and Block_copy(^{...}). 14630 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 14631 Selector Cmd = ME->getSelector(); 14632 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 14633 e = ME->getInstanceReceiver(); 14634 if (!e) 14635 return nullptr; 14636 e = e->IgnoreParenCasts(); 14637 } 14638 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 14639 if (CE->getNumArgs() == 1) { 14640 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 14641 if (Fn) { 14642 const IdentifierInfo *FnI = Fn->getIdentifier(); 14643 if (FnI && FnI->isStr("_Block_copy")) { 14644 e = CE->getArg(0)->IgnoreParenCasts(); 14645 } 14646 } 14647 } 14648 } 14649 14650 BlockExpr *block = dyn_cast<BlockExpr>(e); 14651 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 14652 return nullptr; 14653 14654 FindCaptureVisitor visitor(S.Context, owner.Variable); 14655 visitor.Visit(block->getBlockDecl()->getBody()); 14656 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 14657 } 14658 14659 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 14660 RetainCycleOwner &owner) { 14661 assert(capturer); 14662 assert(owner.Variable && owner.Loc.isValid()); 14663 14664 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 14665 << owner.Variable << capturer->getSourceRange(); 14666 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 14667 << owner.Indirect << owner.Range; 14668 } 14669 14670 /// Check for a keyword selector that starts with the word 'add' or 14671 /// 'set'. 14672 static bool isSetterLikeSelector(Selector sel) { 14673 if (sel.isUnarySelector()) return false; 14674 14675 StringRef str = sel.getNameForSlot(0); 14676 while (!str.empty() && str.front() == '_') str = str.substr(1); 14677 if (str.startswith("set")) 14678 str = str.substr(3); 14679 else if (str.startswith("add")) { 14680 // Specially allow 'addOperationWithBlock:'. 14681 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 14682 return false; 14683 str = str.substr(3); 14684 } 14685 else 14686 return false; 14687 14688 if (str.empty()) return true; 14689 return !isLowercase(str.front()); 14690 } 14691 14692 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 14693 ObjCMessageExpr *Message) { 14694 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 14695 Message->getReceiverInterface(), 14696 NSAPI::ClassId_NSMutableArray); 14697 if (!IsMutableArray) { 14698 return None; 14699 } 14700 14701 Selector Sel = Message->getSelector(); 14702 14703 Optional<NSAPI::NSArrayMethodKind> MKOpt = 14704 S.NSAPIObj->getNSArrayMethodKind(Sel); 14705 if (!MKOpt) { 14706 return None; 14707 } 14708 14709 NSAPI::NSArrayMethodKind MK = *MKOpt; 14710 14711 switch (MK) { 14712 case NSAPI::NSMutableArr_addObject: 14713 case NSAPI::NSMutableArr_insertObjectAtIndex: 14714 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 14715 return 0; 14716 case NSAPI::NSMutableArr_replaceObjectAtIndex: 14717 return 1; 14718 14719 default: 14720 return None; 14721 } 14722 14723 return None; 14724 } 14725 14726 static 14727 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 14728 ObjCMessageExpr *Message) { 14729 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 14730 Message->getReceiverInterface(), 14731 NSAPI::ClassId_NSMutableDictionary); 14732 if (!IsMutableDictionary) { 14733 return None; 14734 } 14735 14736 Selector Sel = Message->getSelector(); 14737 14738 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 14739 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 14740 if (!MKOpt) { 14741 return None; 14742 } 14743 14744 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 14745 14746 switch (MK) { 14747 case NSAPI::NSMutableDict_setObjectForKey: 14748 case NSAPI::NSMutableDict_setValueForKey: 14749 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 14750 return 0; 14751 14752 default: 14753 return None; 14754 } 14755 14756 return None; 14757 } 14758 14759 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 14760 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 14761 Message->getReceiverInterface(), 14762 NSAPI::ClassId_NSMutableSet); 14763 14764 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 14765 Message->getReceiverInterface(), 14766 NSAPI::ClassId_NSMutableOrderedSet); 14767 if (!IsMutableSet && !IsMutableOrderedSet) { 14768 return None; 14769 } 14770 14771 Selector Sel = Message->getSelector(); 14772 14773 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 14774 if (!MKOpt) { 14775 return None; 14776 } 14777 14778 NSAPI::NSSetMethodKind MK = *MKOpt; 14779 14780 switch (MK) { 14781 case NSAPI::NSMutableSet_addObject: 14782 case NSAPI::NSOrderedSet_setObjectAtIndex: 14783 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 14784 case NSAPI::NSOrderedSet_insertObjectAtIndex: 14785 return 0; 14786 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 14787 return 1; 14788 } 14789 14790 return None; 14791 } 14792 14793 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 14794 if (!Message->isInstanceMessage()) { 14795 return; 14796 } 14797 14798 Optional<int> ArgOpt; 14799 14800 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 14801 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 14802 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 14803 return; 14804 } 14805 14806 int ArgIndex = *ArgOpt; 14807 14808 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 14809 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 14810 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 14811 } 14812 14813 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 14814 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14815 if (ArgRE->isObjCSelfExpr()) { 14816 Diag(Message->getSourceRange().getBegin(), 14817 diag::warn_objc_circular_container) 14818 << ArgRE->getDecl() << StringRef("'super'"); 14819 } 14820 } 14821 } else { 14822 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 14823 14824 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 14825 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 14826 } 14827 14828 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 14829 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14830 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 14831 ValueDecl *Decl = ReceiverRE->getDecl(); 14832 Diag(Message->getSourceRange().getBegin(), 14833 diag::warn_objc_circular_container) 14834 << Decl << Decl; 14835 if (!ArgRE->isObjCSelfExpr()) { 14836 Diag(Decl->getLocation(), 14837 diag::note_objc_circular_container_declared_here) 14838 << Decl; 14839 } 14840 } 14841 } 14842 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 14843 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 14844 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 14845 ObjCIvarDecl *Decl = IvarRE->getDecl(); 14846 Diag(Message->getSourceRange().getBegin(), 14847 diag::warn_objc_circular_container) 14848 << Decl << Decl; 14849 Diag(Decl->getLocation(), 14850 diag::note_objc_circular_container_declared_here) 14851 << Decl; 14852 } 14853 } 14854 } 14855 } 14856 } 14857 14858 /// Check a message send to see if it's likely to cause a retain cycle. 14859 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 14860 // Only check instance methods whose selector looks like a setter. 14861 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 14862 return; 14863 14864 // Try to find a variable that the receiver is strongly owned by. 14865 RetainCycleOwner owner; 14866 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 14867 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 14868 return; 14869 } else { 14870 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 14871 owner.Variable = getCurMethodDecl()->getSelfDecl(); 14872 owner.Loc = msg->getSuperLoc(); 14873 owner.Range = msg->getSuperLoc(); 14874 } 14875 14876 // Check whether the receiver is captured by any of the arguments. 14877 const ObjCMethodDecl *MD = msg->getMethodDecl(); 14878 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 14879 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 14880 // noescape blocks should not be retained by the method. 14881 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 14882 continue; 14883 return diagnoseRetainCycle(*this, capturer, owner); 14884 } 14885 } 14886 } 14887 14888 /// Check a property assign to see if it's likely to cause a retain cycle. 14889 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 14890 RetainCycleOwner owner; 14891 if (!findRetainCycleOwner(*this, receiver, owner)) 14892 return; 14893 14894 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 14895 diagnoseRetainCycle(*this, capturer, owner); 14896 } 14897 14898 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 14899 RetainCycleOwner Owner; 14900 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 14901 return; 14902 14903 // Because we don't have an expression for the variable, we have to set the 14904 // location explicitly here. 14905 Owner.Loc = Var->getLocation(); 14906 Owner.Range = Var->getSourceRange(); 14907 14908 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 14909 diagnoseRetainCycle(*this, Capturer, Owner); 14910 } 14911 14912 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 14913 Expr *RHS, bool isProperty) { 14914 // Check if RHS is an Objective-C object literal, which also can get 14915 // immediately zapped in a weak reference. Note that we explicitly 14916 // allow ObjCStringLiterals, since those are designed to never really die. 14917 RHS = RHS->IgnoreParenImpCasts(); 14918 14919 // This enum needs to match with the 'select' in 14920 // warn_objc_arc_literal_assign (off-by-1). 14921 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 14922 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 14923 return false; 14924 14925 S.Diag(Loc, diag::warn_arc_literal_assign) 14926 << (unsigned) Kind 14927 << (isProperty ? 0 : 1) 14928 << RHS->getSourceRange(); 14929 14930 return true; 14931 } 14932 14933 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 14934 Qualifiers::ObjCLifetime LT, 14935 Expr *RHS, bool isProperty) { 14936 // Strip off any implicit cast added to get to the one ARC-specific. 14937 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14938 if (cast->getCastKind() == CK_ARCConsumeObject) { 14939 S.Diag(Loc, diag::warn_arc_retained_assign) 14940 << (LT == Qualifiers::OCL_ExplicitNone) 14941 << (isProperty ? 0 : 1) 14942 << RHS->getSourceRange(); 14943 return true; 14944 } 14945 RHS = cast->getSubExpr(); 14946 } 14947 14948 if (LT == Qualifiers::OCL_Weak && 14949 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 14950 return true; 14951 14952 return false; 14953 } 14954 14955 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 14956 QualType LHS, Expr *RHS) { 14957 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 14958 14959 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 14960 return false; 14961 14962 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 14963 return true; 14964 14965 return false; 14966 } 14967 14968 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 14969 Expr *LHS, Expr *RHS) { 14970 QualType LHSType; 14971 // PropertyRef on LHS type need be directly obtained from 14972 // its declaration as it has a PseudoType. 14973 ObjCPropertyRefExpr *PRE 14974 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 14975 if (PRE && !PRE->isImplicitProperty()) { 14976 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14977 if (PD) 14978 LHSType = PD->getType(); 14979 } 14980 14981 if (LHSType.isNull()) 14982 LHSType = LHS->getType(); 14983 14984 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 14985 14986 if (LT == Qualifiers::OCL_Weak) { 14987 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 14988 getCurFunction()->markSafeWeakUse(LHS); 14989 } 14990 14991 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 14992 return; 14993 14994 // FIXME. Check for other life times. 14995 if (LT != Qualifiers::OCL_None) 14996 return; 14997 14998 if (PRE) { 14999 if (PRE->isImplicitProperty()) 15000 return; 15001 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15002 if (!PD) 15003 return; 15004 15005 unsigned Attributes = PD->getPropertyAttributes(); 15006 if (Attributes & ObjCPropertyAttribute::kind_assign) { 15007 // when 'assign' attribute was not explicitly specified 15008 // by user, ignore it and rely on property type itself 15009 // for lifetime info. 15010 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 15011 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 15012 LHSType->isObjCRetainableType()) 15013 return; 15014 15015 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15016 if (cast->getCastKind() == CK_ARCConsumeObject) { 15017 Diag(Loc, diag::warn_arc_retained_property_assign) 15018 << RHS->getSourceRange(); 15019 return; 15020 } 15021 RHS = cast->getSubExpr(); 15022 } 15023 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 15024 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 15025 return; 15026 } 15027 } 15028 } 15029 15030 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 15031 15032 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 15033 SourceLocation StmtLoc, 15034 const NullStmt *Body) { 15035 // Do not warn if the body is a macro that expands to nothing, e.g: 15036 // 15037 // #define CALL(x) 15038 // if (condition) 15039 // CALL(0); 15040 if (Body->hasLeadingEmptyMacro()) 15041 return false; 15042 15043 // Get line numbers of statement and body. 15044 bool StmtLineInvalid; 15045 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 15046 &StmtLineInvalid); 15047 if (StmtLineInvalid) 15048 return false; 15049 15050 bool BodyLineInvalid; 15051 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 15052 &BodyLineInvalid); 15053 if (BodyLineInvalid) 15054 return false; 15055 15056 // Warn if null statement and body are on the same line. 15057 if (StmtLine != BodyLine) 15058 return false; 15059 15060 return true; 15061 } 15062 15063 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 15064 const Stmt *Body, 15065 unsigned DiagID) { 15066 // Since this is a syntactic check, don't emit diagnostic for template 15067 // instantiations, this just adds noise. 15068 if (CurrentInstantiationScope) 15069 return; 15070 15071 // The body should be a null statement. 15072 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15073 if (!NBody) 15074 return; 15075 15076 // Do the usual checks. 15077 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15078 return; 15079 15080 Diag(NBody->getSemiLoc(), DiagID); 15081 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15082 } 15083 15084 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 15085 const Stmt *PossibleBody) { 15086 assert(!CurrentInstantiationScope); // Ensured by caller 15087 15088 SourceLocation StmtLoc; 15089 const Stmt *Body; 15090 unsigned DiagID; 15091 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 15092 StmtLoc = FS->getRParenLoc(); 15093 Body = FS->getBody(); 15094 DiagID = diag::warn_empty_for_body; 15095 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 15096 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 15097 Body = WS->getBody(); 15098 DiagID = diag::warn_empty_while_body; 15099 } else 15100 return; // Neither `for' nor `while'. 15101 15102 // The body should be a null statement. 15103 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15104 if (!NBody) 15105 return; 15106 15107 // Skip expensive checks if diagnostic is disabled. 15108 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 15109 return; 15110 15111 // Do the usual checks. 15112 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15113 return; 15114 15115 // `for(...);' and `while(...);' are popular idioms, so in order to keep 15116 // noise level low, emit diagnostics only if for/while is followed by a 15117 // CompoundStmt, e.g.: 15118 // for (int i = 0; i < n; i++); 15119 // { 15120 // a(i); 15121 // } 15122 // or if for/while is followed by a statement with more indentation 15123 // than for/while itself: 15124 // for (int i = 0; i < n; i++); 15125 // a(i); 15126 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 15127 if (!ProbableTypo) { 15128 bool BodyColInvalid; 15129 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 15130 PossibleBody->getBeginLoc(), &BodyColInvalid); 15131 if (BodyColInvalid) 15132 return; 15133 15134 bool StmtColInvalid; 15135 unsigned StmtCol = 15136 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 15137 if (StmtColInvalid) 15138 return; 15139 15140 if (BodyCol > StmtCol) 15141 ProbableTypo = true; 15142 } 15143 15144 if (ProbableTypo) { 15145 Diag(NBody->getSemiLoc(), DiagID); 15146 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15147 } 15148 } 15149 15150 //===--- CHECK: Warn on self move with std::move. -------------------------===// 15151 15152 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 15153 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 15154 SourceLocation OpLoc) { 15155 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 15156 return; 15157 15158 if (inTemplateInstantiation()) 15159 return; 15160 15161 // Strip parens and casts away. 15162 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 15163 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 15164 15165 // Check for a call expression 15166 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 15167 if (!CE || CE->getNumArgs() != 1) 15168 return; 15169 15170 // Check for a call to std::move 15171 if (!CE->isCallToStdMove()) 15172 return; 15173 15174 // Get argument from std::move 15175 RHSExpr = CE->getArg(0); 15176 15177 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 15178 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 15179 15180 // Two DeclRefExpr's, check that the decls are the same. 15181 if (LHSDeclRef && RHSDeclRef) { 15182 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15183 return; 15184 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15185 RHSDeclRef->getDecl()->getCanonicalDecl()) 15186 return; 15187 15188 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15189 << LHSExpr->getSourceRange() 15190 << RHSExpr->getSourceRange(); 15191 return; 15192 } 15193 15194 // Member variables require a different approach to check for self moves. 15195 // MemberExpr's are the same if every nested MemberExpr refers to the same 15196 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 15197 // the base Expr's are CXXThisExpr's. 15198 const Expr *LHSBase = LHSExpr; 15199 const Expr *RHSBase = RHSExpr; 15200 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 15201 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 15202 if (!LHSME || !RHSME) 15203 return; 15204 15205 while (LHSME && RHSME) { 15206 if (LHSME->getMemberDecl()->getCanonicalDecl() != 15207 RHSME->getMemberDecl()->getCanonicalDecl()) 15208 return; 15209 15210 LHSBase = LHSME->getBase(); 15211 RHSBase = RHSME->getBase(); 15212 LHSME = dyn_cast<MemberExpr>(LHSBase); 15213 RHSME = dyn_cast<MemberExpr>(RHSBase); 15214 } 15215 15216 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 15217 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 15218 if (LHSDeclRef && RHSDeclRef) { 15219 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15220 return; 15221 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15222 RHSDeclRef->getDecl()->getCanonicalDecl()) 15223 return; 15224 15225 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15226 << LHSExpr->getSourceRange() 15227 << RHSExpr->getSourceRange(); 15228 return; 15229 } 15230 15231 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 15232 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15233 << LHSExpr->getSourceRange() 15234 << RHSExpr->getSourceRange(); 15235 } 15236 15237 //===--- Layout compatibility ----------------------------------------------// 15238 15239 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 15240 15241 /// Check if two enumeration types are layout-compatible. 15242 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 15243 // C++11 [dcl.enum] p8: 15244 // Two enumeration types are layout-compatible if they have the same 15245 // underlying type. 15246 return ED1->isComplete() && ED2->isComplete() && 15247 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 15248 } 15249 15250 /// Check if two fields are layout-compatible. 15251 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 15252 FieldDecl *Field2) { 15253 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 15254 return false; 15255 15256 if (Field1->isBitField() != Field2->isBitField()) 15257 return false; 15258 15259 if (Field1->isBitField()) { 15260 // Make sure that the bit-fields are the same length. 15261 unsigned Bits1 = Field1->getBitWidthValue(C); 15262 unsigned Bits2 = Field2->getBitWidthValue(C); 15263 15264 if (Bits1 != Bits2) 15265 return false; 15266 } 15267 15268 return true; 15269 } 15270 15271 /// Check if two standard-layout structs are layout-compatible. 15272 /// (C++11 [class.mem] p17) 15273 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 15274 RecordDecl *RD2) { 15275 // If both records are C++ classes, check that base classes match. 15276 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 15277 // If one of records is a CXXRecordDecl we are in C++ mode, 15278 // thus the other one is a CXXRecordDecl, too. 15279 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 15280 // Check number of base classes. 15281 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 15282 return false; 15283 15284 // Check the base classes. 15285 for (CXXRecordDecl::base_class_const_iterator 15286 Base1 = D1CXX->bases_begin(), 15287 BaseEnd1 = D1CXX->bases_end(), 15288 Base2 = D2CXX->bases_begin(); 15289 Base1 != BaseEnd1; 15290 ++Base1, ++Base2) { 15291 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 15292 return false; 15293 } 15294 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 15295 // If only RD2 is a C++ class, it should have zero base classes. 15296 if (D2CXX->getNumBases() > 0) 15297 return false; 15298 } 15299 15300 // Check the fields. 15301 RecordDecl::field_iterator Field2 = RD2->field_begin(), 15302 Field2End = RD2->field_end(), 15303 Field1 = RD1->field_begin(), 15304 Field1End = RD1->field_end(); 15305 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 15306 if (!isLayoutCompatible(C, *Field1, *Field2)) 15307 return false; 15308 } 15309 if (Field1 != Field1End || Field2 != Field2End) 15310 return false; 15311 15312 return true; 15313 } 15314 15315 /// Check if two standard-layout unions are layout-compatible. 15316 /// (C++11 [class.mem] p18) 15317 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 15318 RecordDecl *RD2) { 15319 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 15320 for (auto *Field2 : RD2->fields()) 15321 UnmatchedFields.insert(Field2); 15322 15323 for (auto *Field1 : RD1->fields()) { 15324 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 15325 I = UnmatchedFields.begin(), 15326 E = UnmatchedFields.end(); 15327 15328 for ( ; I != E; ++I) { 15329 if (isLayoutCompatible(C, Field1, *I)) { 15330 bool Result = UnmatchedFields.erase(*I); 15331 (void) Result; 15332 assert(Result); 15333 break; 15334 } 15335 } 15336 if (I == E) 15337 return false; 15338 } 15339 15340 return UnmatchedFields.empty(); 15341 } 15342 15343 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 15344 RecordDecl *RD2) { 15345 if (RD1->isUnion() != RD2->isUnion()) 15346 return false; 15347 15348 if (RD1->isUnion()) 15349 return isLayoutCompatibleUnion(C, RD1, RD2); 15350 else 15351 return isLayoutCompatibleStruct(C, RD1, RD2); 15352 } 15353 15354 /// Check if two types are layout-compatible in C++11 sense. 15355 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 15356 if (T1.isNull() || T2.isNull()) 15357 return false; 15358 15359 // C++11 [basic.types] p11: 15360 // If two types T1 and T2 are the same type, then T1 and T2 are 15361 // layout-compatible types. 15362 if (C.hasSameType(T1, T2)) 15363 return true; 15364 15365 T1 = T1.getCanonicalType().getUnqualifiedType(); 15366 T2 = T2.getCanonicalType().getUnqualifiedType(); 15367 15368 const Type::TypeClass TC1 = T1->getTypeClass(); 15369 const Type::TypeClass TC2 = T2->getTypeClass(); 15370 15371 if (TC1 != TC2) 15372 return false; 15373 15374 if (TC1 == Type::Enum) { 15375 return isLayoutCompatible(C, 15376 cast<EnumType>(T1)->getDecl(), 15377 cast<EnumType>(T2)->getDecl()); 15378 } else if (TC1 == Type::Record) { 15379 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 15380 return false; 15381 15382 return isLayoutCompatible(C, 15383 cast<RecordType>(T1)->getDecl(), 15384 cast<RecordType>(T2)->getDecl()); 15385 } 15386 15387 return false; 15388 } 15389 15390 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 15391 15392 /// Given a type tag expression find the type tag itself. 15393 /// 15394 /// \param TypeExpr Type tag expression, as it appears in user's code. 15395 /// 15396 /// \param VD Declaration of an identifier that appears in a type tag. 15397 /// 15398 /// \param MagicValue Type tag magic value. 15399 /// 15400 /// \param isConstantEvaluated wether the evalaution should be performed in 15401 15402 /// constant context. 15403 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 15404 const ValueDecl **VD, uint64_t *MagicValue, 15405 bool isConstantEvaluated) { 15406 while(true) { 15407 if (!TypeExpr) 15408 return false; 15409 15410 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 15411 15412 switch (TypeExpr->getStmtClass()) { 15413 case Stmt::UnaryOperatorClass: { 15414 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 15415 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 15416 TypeExpr = UO->getSubExpr(); 15417 continue; 15418 } 15419 return false; 15420 } 15421 15422 case Stmt::DeclRefExprClass: { 15423 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 15424 *VD = DRE->getDecl(); 15425 return true; 15426 } 15427 15428 case Stmt::IntegerLiteralClass: { 15429 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 15430 llvm::APInt MagicValueAPInt = IL->getValue(); 15431 if (MagicValueAPInt.getActiveBits() <= 64) { 15432 *MagicValue = MagicValueAPInt.getZExtValue(); 15433 return true; 15434 } else 15435 return false; 15436 } 15437 15438 case Stmt::BinaryConditionalOperatorClass: 15439 case Stmt::ConditionalOperatorClass: { 15440 const AbstractConditionalOperator *ACO = 15441 cast<AbstractConditionalOperator>(TypeExpr); 15442 bool Result; 15443 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 15444 isConstantEvaluated)) { 15445 if (Result) 15446 TypeExpr = ACO->getTrueExpr(); 15447 else 15448 TypeExpr = ACO->getFalseExpr(); 15449 continue; 15450 } 15451 return false; 15452 } 15453 15454 case Stmt::BinaryOperatorClass: { 15455 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 15456 if (BO->getOpcode() == BO_Comma) { 15457 TypeExpr = BO->getRHS(); 15458 continue; 15459 } 15460 return false; 15461 } 15462 15463 default: 15464 return false; 15465 } 15466 } 15467 } 15468 15469 /// Retrieve the C type corresponding to type tag TypeExpr. 15470 /// 15471 /// \param TypeExpr Expression that specifies a type tag. 15472 /// 15473 /// \param MagicValues Registered magic values. 15474 /// 15475 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 15476 /// kind. 15477 /// 15478 /// \param TypeInfo Information about the corresponding C type. 15479 /// 15480 /// \param isConstantEvaluated wether the evalaution should be performed in 15481 /// constant context. 15482 /// 15483 /// \returns true if the corresponding C type was found. 15484 static bool GetMatchingCType( 15485 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 15486 const ASTContext &Ctx, 15487 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 15488 *MagicValues, 15489 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 15490 bool isConstantEvaluated) { 15491 FoundWrongKind = false; 15492 15493 // Variable declaration that has type_tag_for_datatype attribute. 15494 const ValueDecl *VD = nullptr; 15495 15496 uint64_t MagicValue; 15497 15498 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 15499 return false; 15500 15501 if (VD) { 15502 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 15503 if (I->getArgumentKind() != ArgumentKind) { 15504 FoundWrongKind = true; 15505 return false; 15506 } 15507 TypeInfo.Type = I->getMatchingCType(); 15508 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 15509 TypeInfo.MustBeNull = I->getMustBeNull(); 15510 return true; 15511 } 15512 return false; 15513 } 15514 15515 if (!MagicValues) 15516 return false; 15517 15518 llvm::DenseMap<Sema::TypeTagMagicValue, 15519 Sema::TypeTagData>::const_iterator I = 15520 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 15521 if (I == MagicValues->end()) 15522 return false; 15523 15524 TypeInfo = I->second; 15525 return true; 15526 } 15527 15528 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 15529 uint64_t MagicValue, QualType Type, 15530 bool LayoutCompatible, 15531 bool MustBeNull) { 15532 if (!TypeTagForDatatypeMagicValues) 15533 TypeTagForDatatypeMagicValues.reset( 15534 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 15535 15536 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 15537 (*TypeTagForDatatypeMagicValues)[Magic] = 15538 TypeTagData(Type, LayoutCompatible, MustBeNull); 15539 } 15540 15541 static bool IsSameCharType(QualType T1, QualType T2) { 15542 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 15543 if (!BT1) 15544 return false; 15545 15546 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 15547 if (!BT2) 15548 return false; 15549 15550 BuiltinType::Kind T1Kind = BT1->getKind(); 15551 BuiltinType::Kind T2Kind = BT2->getKind(); 15552 15553 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 15554 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 15555 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 15556 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 15557 } 15558 15559 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 15560 const ArrayRef<const Expr *> ExprArgs, 15561 SourceLocation CallSiteLoc) { 15562 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 15563 bool IsPointerAttr = Attr->getIsPointer(); 15564 15565 // Retrieve the argument representing the 'type_tag'. 15566 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 15567 if (TypeTagIdxAST >= ExprArgs.size()) { 15568 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15569 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 15570 return; 15571 } 15572 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 15573 bool FoundWrongKind; 15574 TypeTagData TypeInfo; 15575 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 15576 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 15577 TypeInfo, isConstantEvaluated())) { 15578 if (FoundWrongKind) 15579 Diag(TypeTagExpr->getExprLoc(), 15580 diag::warn_type_tag_for_datatype_wrong_kind) 15581 << TypeTagExpr->getSourceRange(); 15582 return; 15583 } 15584 15585 // Retrieve the argument representing the 'arg_idx'. 15586 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 15587 if (ArgumentIdxAST >= ExprArgs.size()) { 15588 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15589 << 1 << Attr->getArgumentIdx().getSourceIndex(); 15590 return; 15591 } 15592 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 15593 if (IsPointerAttr) { 15594 // Skip implicit cast of pointer to `void *' (as a function argument). 15595 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 15596 if (ICE->getType()->isVoidPointerType() && 15597 ICE->getCastKind() == CK_BitCast) 15598 ArgumentExpr = ICE->getSubExpr(); 15599 } 15600 QualType ArgumentType = ArgumentExpr->getType(); 15601 15602 // Passing a `void*' pointer shouldn't trigger a warning. 15603 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 15604 return; 15605 15606 if (TypeInfo.MustBeNull) { 15607 // Type tag with matching void type requires a null pointer. 15608 if (!ArgumentExpr->isNullPointerConstant(Context, 15609 Expr::NPC_ValueDependentIsNotNull)) { 15610 Diag(ArgumentExpr->getExprLoc(), 15611 diag::warn_type_safety_null_pointer_required) 15612 << ArgumentKind->getName() 15613 << ArgumentExpr->getSourceRange() 15614 << TypeTagExpr->getSourceRange(); 15615 } 15616 return; 15617 } 15618 15619 QualType RequiredType = TypeInfo.Type; 15620 if (IsPointerAttr) 15621 RequiredType = Context.getPointerType(RequiredType); 15622 15623 bool mismatch = false; 15624 if (!TypeInfo.LayoutCompatible) { 15625 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 15626 15627 // C++11 [basic.fundamental] p1: 15628 // Plain char, signed char, and unsigned char are three distinct types. 15629 // 15630 // But we treat plain `char' as equivalent to `signed char' or `unsigned 15631 // char' depending on the current char signedness mode. 15632 if (mismatch) 15633 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 15634 RequiredType->getPointeeType())) || 15635 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 15636 mismatch = false; 15637 } else 15638 if (IsPointerAttr) 15639 mismatch = !isLayoutCompatible(Context, 15640 ArgumentType->getPointeeType(), 15641 RequiredType->getPointeeType()); 15642 else 15643 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 15644 15645 if (mismatch) 15646 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 15647 << ArgumentType << ArgumentKind 15648 << TypeInfo.LayoutCompatible << RequiredType 15649 << ArgumentExpr->getSourceRange() 15650 << TypeTagExpr->getSourceRange(); 15651 } 15652 15653 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 15654 CharUnits Alignment) { 15655 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 15656 } 15657 15658 void Sema::DiagnoseMisalignedMembers() { 15659 for (MisalignedMember &m : MisalignedMembers) { 15660 const NamedDecl *ND = m.RD; 15661 if (ND->getName().empty()) { 15662 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 15663 ND = TD; 15664 } 15665 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 15666 << m.MD << ND << m.E->getSourceRange(); 15667 } 15668 MisalignedMembers.clear(); 15669 } 15670 15671 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 15672 E = E->IgnoreParens(); 15673 if (!T->isPointerType() && !T->isIntegerType()) 15674 return; 15675 if (isa<UnaryOperator>(E) && 15676 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 15677 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 15678 if (isa<MemberExpr>(Op)) { 15679 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 15680 if (MA != MisalignedMembers.end() && 15681 (T->isIntegerType() || 15682 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 15683 Context.getTypeAlignInChars( 15684 T->getPointeeType()) <= MA->Alignment)))) 15685 MisalignedMembers.erase(MA); 15686 } 15687 } 15688 } 15689 15690 void Sema::RefersToMemberWithReducedAlignment( 15691 Expr *E, 15692 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 15693 Action) { 15694 const auto *ME = dyn_cast<MemberExpr>(E); 15695 if (!ME) 15696 return; 15697 15698 // No need to check expressions with an __unaligned-qualified type. 15699 if (E->getType().getQualifiers().hasUnaligned()) 15700 return; 15701 15702 // For a chain of MemberExpr like "a.b.c.d" this list 15703 // will keep FieldDecl's like [d, c, b]. 15704 SmallVector<FieldDecl *, 4> ReverseMemberChain; 15705 const MemberExpr *TopME = nullptr; 15706 bool AnyIsPacked = false; 15707 do { 15708 QualType BaseType = ME->getBase()->getType(); 15709 if (BaseType->isDependentType()) 15710 return; 15711 if (ME->isArrow()) 15712 BaseType = BaseType->getPointeeType(); 15713 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 15714 if (RD->isInvalidDecl()) 15715 return; 15716 15717 ValueDecl *MD = ME->getMemberDecl(); 15718 auto *FD = dyn_cast<FieldDecl>(MD); 15719 // We do not care about non-data members. 15720 if (!FD || FD->isInvalidDecl()) 15721 return; 15722 15723 AnyIsPacked = 15724 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 15725 ReverseMemberChain.push_back(FD); 15726 15727 TopME = ME; 15728 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 15729 } while (ME); 15730 assert(TopME && "We did not compute a topmost MemberExpr!"); 15731 15732 // Not the scope of this diagnostic. 15733 if (!AnyIsPacked) 15734 return; 15735 15736 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 15737 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 15738 // TODO: The innermost base of the member expression may be too complicated. 15739 // For now, just disregard these cases. This is left for future 15740 // improvement. 15741 if (!DRE && !isa<CXXThisExpr>(TopBase)) 15742 return; 15743 15744 // Alignment expected by the whole expression. 15745 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 15746 15747 // No need to do anything else with this case. 15748 if (ExpectedAlignment.isOne()) 15749 return; 15750 15751 // Synthesize offset of the whole access. 15752 CharUnits Offset; 15753 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 15754 I++) { 15755 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 15756 } 15757 15758 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 15759 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 15760 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 15761 15762 // The base expression of the innermost MemberExpr may give 15763 // stronger guarantees than the class containing the member. 15764 if (DRE && !TopME->isArrow()) { 15765 const ValueDecl *VD = DRE->getDecl(); 15766 if (!VD->getType()->isReferenceType()) 15767 CompleteObjectAlignment = 15768 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 15769 } 15770 15771 // Check if the synthesized offset fulfills the alignment. 15772 if (Offset % ExpectedAlignment != 0 || 15773 // It may fulfill the offset it but the effective alignment may still be 15774 // lower than the expected expression alignment. 15775 CompleteObjectAlignment < ExpectedAlignment) { 15776 // If this happens, we want to determine a sensible culprit of this. 15777 // Intuitively, watching the chain of member expressions from right to 15778 // left, we start with the required alignment (as required by the field 15779 // type) but some packed attribute in that chain has reduced the alignment. 15780 // It may happen that another packed structure increases it again. But if 15781 // we are here such increase has not been enough. So pointing the first 15782 // FieldDecl that either is packed or else its RecordDecl is, 15783 // seems reasonable. 15784 FieldDecl *FD = nullptr; 15785 CharUnits Alignment; 15786 for (FieldDecl *FDI : ReverseMemberChain) { 15787 if (FDI->hasAttr<PackedAttr>() || 15788 FDI->getParent()->hasAttr<PackedAttr>()) { 15789 FD = FDI; 15790 Alignment = std::min( 15791 Context.getTypeAlignInChars(FD->getType()), 15792 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 15793 break; 15794 } 15795 } 15796 assert(FD && "We did not find a packed FieldDecl!"); 15797 Action(E, FD->getParent(), FD, Alignment); 15798 } 15799 } 15800 15801 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 15802 using namespace std::placeholders; 15803 15804 RefersToMemberWithReducedAlignment( 15805 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 15806 _2, _3, _4)); 15807 } 15808 15809 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 15810 ExprResult CallResult) { 15811 if (checkArgCount(*this, TheCall, 1)) 15812 return ExprError(); 15813 15814 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 15815 if (MatrixArg.isInvalid()) 15816 return MatrixArg; 15817 Expr *Matrix = MatrixArg.get(); 15818 15819 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 15820 if (!MType) { 15821 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 15822 return ExprError(); 15823 } 15824 15825 // Create returned matrix type by swapping rows and columns of the argument 15826 // matrix type. 15827 QualType ResultType = Context.getConstantMatrixType( 15828 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 15829 15830 // Change the return type to the type of the returned matrix. 15831 TheCall->setType(ResultType); 15832 15833 // Update call argument to use the possibly converted matrix argument. 15834 TheCall->setArg(0, Matrix); 15835 return CallResult; 15836 } 15837 15838 // Get and verify the matrix dimensions. 15839 static llvm::Optional<unsigned> 15840 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 15841 SourceLocation ErrorPos; 15842 Optional<llvm::APSInt> Value = 15843 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 15844 if (!Value) { 15845 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 15846 << Name; 15847 return {}; 15848 } 15849 uint64_t Dim = Value->getZExtValue(); 15850 if (!ConstantMatrixType::isDimensionValid(Dim)) { 15851 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 15852 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 15853 return {}; 15854 } 15855 return Dim; 15856 } 15857 15858 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 15859 ExprResult CallResult) { 15860 if (!getLangOpts().MatrixTypes) { 15861 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 15862 return ExprError(); 15863 } 15864 15865 if (checkArgCount(*this, TheCall, 4)) 15866 return ExprError(); 15867 15868 unsigned PtrArgIdx = 0; 15869 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15870 Expr *RowsExpr = TheCall->getArg(1); 15871 Expr *ColumnsExpr = TheCall->getArg(2); 15872 Expr *StrideExpr = TheCall->getArg(3); 15873 15874 bool ArgError = false; 15875 15876 // Check pointer argument. 15877 { 15878 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15879 if (PtrConv.isInvalid()) 15880 return PtrConv; 15881 PtrExpr = PtrConv.get(); 15882 TheCall->setArg(0, PtrExpr); 15883 if (PtrExpr->isTypeDependent()) { 15884 TheCall->setType(Context.DependentTy); 15885 return TheCall; 15886 } 15887 } 15888 15889 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15890 QualType ElementTy; 15891 if (!PtrTy) { 15892 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15893 << PtrArgIdx + 1; 15894 ArgError = true; 15895 } else { 15896 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 15897 15898 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 15899 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15900 << PtrArgIdx + 1; 15901 ArgError = true; 15902 } 15903 } 15904 15905 // Apply default Lvalue conversions and convert the expression to size_t. 15906 auto ApplyArgumentConversions = [this](Expr *E) { 15907 ExprResult Conv = DefaultLvalueConversion(E); 15908 if (Conv.isInvalid()) 15909 return Conv; 15910 15911 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 15912 }; 15913 15914 // Apply conversion to row and column expressions. 15915 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 15916 if (!RowsConv.isInvalid()) { 15917 RowsExpr = RowsConv.get(); 15918 TheCall->setArg(1, RowsExpr); 15919 } else 15920 RowsExpr = nullptr; 15921 15922 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 15923 if (!ColumnsConv.isInvalid()) { 15924 ColumnsExpr = ColumnsConv.get(); 15925 TheCall->setArg(2, ColumnsExpr); 15926 } else 15927 ColumnsExpr = nullptr; 15928 15929 // If any any part of the result matrix type is still pending, just use 15930 // Context.DependentTy, until all parts are resolved. 15931 if ((RowsExpr && RowsExpr->isTypeDependent()) || 15932 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 15933 TheCall->setType(Context.DependentTy); 15934 return CallResult; 15935 } 15936 15937 // Check row and column dimenions. 15938 llvm::Optional<unsigned> MaybeRows; 15939 if (RowsExpr) 15940 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 15941 15942 llvm::Optional<unsigned> MaybeColumns; 15943 if (ColumnsExpr) 15944 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 15945 15946 // Check stride argument. 15947 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 15948 if (StrideConv.isInvalid()) 15949 return ExprError(); 15950 StrideExpr = StrideConv.get(); 15951 TheCall->setArg(3, StrideExpr); 15952 15953 if (MaybeRows) { 15954 if (Optional<llvm::APSInt> Value = 15955 StrideExpr->getIntegerConstantExpr(Context)) { 15956 uint64_t Stride = Value->getZExtValue(); 15957 if (Stride < *MaybeRows) { 15958 Diag(StrideExpr->getBeginLoc(), 15959 diag::err_builtin_matrix_stride_too_small); 15960 ArgError = true; 15961 } 15962 } 15963 } 15964 15965 if (ArgError || !MaybeRows || !MaybeColumns) 15966 return ExprError(); 15967 15968 TheCall->setType( 15969 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 15970 return CallResult; 15971 } 15972 15973 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 15974 ExprResult CallResult) { 15975 if (checkArgCount(*this, TheCall, 3)) 15976 return ExprError(); 15977 15978 unsigned PtrArgIdx = 1; 15979 Expr *MatrixExpr = TheCall->getArg(0); 15980 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15981 Expr *StrideExpr = TheCall->getArg(2); 15982 15983 bool ArgError = false; 15984 15985 { 15986 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 15987 if (MatrixConv.isInvalid()) 15988 return MatrixConv; 15989 MatrixExpr = MatrixConv.get(); 15990 TheCall->setArg(0, MatrixExpr); 15991 } 15992 if (MatrixExpr->isTypeDependent()) { 15993 TheCall->setType(Context.DependentTy); 15994 return TheCall; 15995 } 15996 15997 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 15998 if (!MatrixTy) { 15999 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 16000 ArgError = true; 16001 } 16002 16003 { 16004 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16005 if (PtrConv.isInvalid()) 16006 return PtrConv; 16007 PtrExpr = PtrConv.get(); 16008 TheCall->setArg(1, PtrExpr); 16009 if (PtrExpr->isTypeDependent()) { 16010 TheCall->setType(Context.DependentTy); 16011 return TheCall; 16012 } 16013 } 16014 16015 // Check pointer argument. 16016 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16017 if (!PtrTy) { 16018 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16019 << PtrArgIdx + 1; 16020 ArgError = true; 16021 } else { 16022 QualType ElementTy = PtrTy->getPointeeType(); 16023 if (ElementTy.isConstQualified()) { 16024 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 16025 ArgError = true; 16026 } 16027 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 16028 if (MatrixTy && 16029 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 16030 Diag(PtrExpr->getBeginLoc(), 16031 diag::err_builtin_matrix_pointer_arg_mismatch) 16032 << ElementTy << MatrixTy->getElementType(); 16033 ArgError = true; 16034 } 16035 } 16036 16037 // Apply default Lvalue conversions and convert the stride expression to 16038 // size_t. 16039 { 16040 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 16041 if (StrideConv.isInvalid()) 16042 return StrideConv; 16043 16044 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 16045 if (StrideConv.isInvalid()) 16046 return StrideConv; 16047 StrideExpr = StrideConv.get(); 16048 TheCall->setArg(2, StrideExpr); 16049 } 16050 16051 // Check stride argument. 16052 if (MatrixTy) { 16053 if (Optional<llvm::APSInt> Value = 16054 StrideExpr->getIntegerConstantExpr(Context)) { 16055 uint64_t Stride = Value->getZExtValue(); 16056 if (Stride < MatrixTy->getNumRows()) { 16057 Diag(StrideExpr->getBeginLoc(), 16058 diag::err_builtin_matrix_stride_too_small); 16059 ArgError = true; 16060 } 16061 } 16062 } 16063 16064 if (ArgError) 16065 return ExprError(); 16066 16067 return CallResult; 16068 } 16069 16070 /// \brief Enforce the bounds of a TCB 16071 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 16072 /// directly calls other functions in the same TCB as marked by the enforce_tcb 16073 /// and enforce_tcb_leaf attributes. 16074 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 16075 const FunctionDecl *Callee) { 16076 const FunctionDecl *Caller = getCurFunctionDecl(); 16077 16078 // Calls to builtins are not enforced. 16079 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 16080 Callee->getBuiltinID() != 0) 16081 return; 16082 16083 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 16084 // all TCBs the callee is a part of. 16085 llvm::StringSet<> CalleeTCBs; 16086 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 16087 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16088 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 16089 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16090 16091 // Go through the TCBs the caller is a part of and emit warnings if Caller 16092 // is in a TCB that the Callee is not. 16093 for_each( 16094 Caller->specific_attrs<EnforceTCBAttr>(), 16095 [&](const auto *A) { 16096 StringRef CallerTCB = A->getTCBName(); 16097 if (CalleeTCBs.count(CallerTCB) == 0) { 16098 this->Diag(TheCall->getExprLoc(), 16099 diag::warn_tcb_enforcement_violation) << Callee 16100 << CallerTCB; 16101 } 16102 }); 16103 } 16104