1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/Stmt.h" 34 #include "clang/AST/TemplateBase.h" 35 #include "clang/AST/Type.h" 36 #include "clang/AST/TypeLoc.h" 37 #include "clang/AST/UnresolvedSet.h" 38 #include "clang/Basic/AddressSpaces.h" 39 #include "clang/Basic/CharInfo.h" 40 #include "clang/Basic/Diagnostic.h" 41 #include "clang/Basic/IdentifierTable.h" 42 #include "clang/Basic/LLVM.h" 43 #include "clang/Basic/LangOptions.h" 44 #include "clang/Basic/OpenCLOptions.h" 45 #include "clang/Basic/OperatorKinds.h" 46 #include "clang/Basic/PartialDiagnostic.h" 47 #include "clang/Basic/SourceLocation.h" 48 #include "clang/Basic/SourceManager.h" 49 #include "clang/Basic/Specifiers.h" 50 #include "clang/Basic/SyncScope.h" 51 #include "clang/Basic/TargetBuiltins.h" 52 #include "clang/Basic/TargetCXXABI.h" 53 #include "clang/Basic/TargetInfo.h" 54 #include "clang/Basic/TypeTraits.h" 55 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 56 #include "clang/Sema/Initialization.h" 57 #include "clang/Sema/Lookup.h" 58 #include "clang/Sema/Ownership.h" 59 #include "clang/Sema/Scope.h" 60 #include "clang/Sema/ScopeInfo.h" 61 #include "clang/Sema/Sema.h" 62 #include "clang/Sema/SemaInternal.h" 63 #include "llvm/ADT/APFloat.h" 64 #include "llvm/ADT/APInt.h" 65 #include "llvm/ADT/APSInt.h" 66 #include "llvm/ADT/ArrayRef.h" 67 #include "llvm/ADT/DenseMap.h" 68 #include "llvm/ADT/FoldingSet.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallBitVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallString.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSwitch.h" 78 #include "llvm/ADT/Triple.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstddef> 92 #include <cstdint> 93 #include <functional> 94 #include <limits> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 99 using namespace clang; 100 using namespace sema; 101 102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 103 unsigned ByteNo) const { 104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 105 Context.getTargetInfo()); 106 } 107 108 /// Checks that a call expression's argument count is the desired number. 109 /// This is useful when doing custom type-checking. Returns true on error. 110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 111 unsigned argCount = call->getNumArgs(); 112 if (argCount == desiredArgCount) return false; 113 114 if (argCount < desiredArgCount) 115 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 116 << 0 /*function call*/ << desiredArgCount << argCount 117 << call->getSourceRange(); 118 119 // Highlight all the excess arguments. 120 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 121 call->getArg(argCount - 1)->getEndLoc()); 122 123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 124 << 0 /*function call*/ << desiredArgCount << argCount 125 << call->getArg(1)->getSourceRange(); 126 } 127 128 /// Check that the first argument to __builtin_annotation is an integer 129 /// and the second argument is a non-wide string literal. 130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 131 if (checkArgCount(S, TheCall, 2)) 132 return true; 133 134 // First argument should be an integer. 135 Expr *ValArg = TheCall->getArg(0); 136 QualType Ty = ValArg->getType(); 137 if (!Ty->isIntegerType()) { 138 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 139 << ValArg->getSourceRange(); 140 return true; 141 } 142 143 // Second argument should be a constant string. 144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 146 if (!Literal || !Literal->isAscii()) { 147 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 148 << StrArg->getSourceRange(); 149 return true; 150 } 151 152 TheCall->setType(Ty); 153 return false; 154 } 155 156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 157 // We need at least one argument. 158 if (TheCall->getNumArgs() < 1) { 159 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 160 << 0 << 1 << TheCall->getNumArgs() 161 << TheCall->getCallee()->getSourceRange(); 162 return true; 163 } 164 165 // All arguments should be wide string literals. 166 for (Expr *Arg : TheCall->arguments()) { 167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 168 if (!Literal || !Literal->isWide()) { 169 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 170 << Arg->getSourceRange(); 171 return true; 172 } 173 } 174 175 return false; 176 } 177 178 /// Check that the argument to __builtin_addressof is a glvalue, and set the 179 /// result type to the corresponding pointer type. 180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 181 if (checkArgCount(S, TheCall, 1)) 182 return true; 183 184 ExprResult Arg(TheCall->getArg(0)); 185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 186 if (ResultType.isNull()) 187 return true; 188 189 TheCall->setArg(0, Arg.get()); 190 TheCall->setType(ResultType); 191 return false; 192 } 193 194 /// Check the number of arguments, and set the result type to 195 /// the argument type. 196 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 197 if (checkArgCount(S, TheCall, 1)) 198 return true; 199 200 TheCall->setType(TheCall->getArg(0)->getType()); 201 return false; 202 } 203 204 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 205 if (checkArgCount(S, TheCall, 3)) 206 return true; 207 208 // First two arguments should be integers. 209 for (unsigned I = 0; I < 2; ++I) { 210 ExprResult Arg = TheCall->getArg(I); 211 QualType Ty = Arg.get()->getType(); 212 if (!Ty->isIntegerType()) { 213 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 214 << Ty << Arg.get()->getSourceRange(); 215 return true; 216 } 217 InitializedEntity Entity = InitializedEntity::InitializeParameter( 218 S.getASTContext(), Ty, /*consume*/ false); 219 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 220 if (Arg.isInvalid()) 221 return true; 222 TheCall->setArg(I, Arg.get()); 223 } 224 225 // Third argument should be a pointer to a non-const integer. 226 // IRGen correctly handles volatile, restrict, and address spaces, and 227 // the other qualifiers aren't possible. 228 { 229 ExprResult Arg = TheCall->getArg(2); 230 QualType Ty = Arg.get()->getType(); 231 const auto *PtrTy = Ty->getAs<PointerType>(); 232 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 233 !PtrTy->getPointeeType().isConstQualified())) { 234 S.Diag(Arg.get()->getBeginLoc(), 235 diag::err_overflow_builtin_must_be_ptr_int) 236 << Ty << Arg.get()->getSourceRange(); 237 return true; 238 } 239 InitializedEntity Entity = InitializedEntity::InitializeParameter( 240 S.getASTContext(), Ty, /*consume*/ false); 241 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 242 if (Arg.isInvalid()) 243 return true; 244 TheCall->setArg(2, Arg.get()); 245 } 246 return false; 247 } 248 249 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 250 if (checkArgCount(S, BuiltinCall, 2)) 251 return true; 252 253 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 254 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 255 Expr *Call = BuiltinCall->getArg(0); 256 Expr *Chain = BuiltinCall->getArg(1); 257 258 if (Call->getStmtClass() != Stmt::CallExprClass) { 259 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 260 << Call->getSourceRange(); 261 return true; 262 } 263 264 auto CE = cast<CallExpr>(Call); 265 if (CE->getCallee()->getType()->isBlockPointerType()) { 266 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 267 << Call->getSourceRange(); 268 return true; 269 } 270 271 const Decl *TargetDecl = CE->getCalleeDecl(); 272 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 273 if (FD->getBuiltinID()) { 274 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 275 << Call->getSourceRange(); 276 return true; 277 } 278 279 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 280 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 281 << Call->getSourceRange(); 282 return true; 283 } 284 285 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 286 if (ChainResult.isInvalid()) 287 return true; 288 if (!ChainResult.get()->getType()->isPointerType()) { 289 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 290 << Chain->getSourceRange(); 291 return true; 292 } 293 294 QualType ReturnTy = CE->getCallReturnType(S.Context); 295 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 296 QualType BuiltinTy = S.Context.getFunctionType( 297 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 298 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 299 300 Builtin = 301 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 302 303 BuiltinCall->setType(CE->getType()); 304 BuiltinCall->setValueKind(CE->getValueKind()); 305 BuiltinCall->setObjectKind(CE->getObjectKind()); 306 BuiltinCall->setCallee(Builtin); 307 BuiltinCall->setArg(1, ChainResult.get()); 308 309 return false; 310 } 311 312 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 313 /// __builtin_*_chk function, then use the object size argument specified in the 314 /// source. Otherwise, infer the object size using __builtin_object_size. 315 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 316 CallExpr *TheCall) { 317 // FIXME: There are some more useful checks we could be doing here: 318 // - Analyze the format string of sprintf to see how much of buffer is used. 319 // - Evaluate strlen of strcpy arguments, use as object size. 320 321 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 322 isConstantEvaluated()) 323 return; 324 325 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 326 if (!BuiltinID) 327 return; 328 329 unsigned DiagID = 0; 330 bool IsChkVariant = false; 331 unsigned SizeIndex, ObjectIndex; 332 switch (BuiltinID) { 333 default: 334 return; 335 case Builtin::BI__builtin___memcpy_chk: 336 case Builtin::BI__builtin___memmove_chk: 337 case Builtin::BI__builtin___memset_chk: 338 case Builtin::BI__builtin___strlcat_chk: 339 case Builtin::BI__builtin___strlcpy_chk: 340 case Builtin::BI__builtin___strncat_chk: 341 case Builtin::BI__builtin___strncpy_chk: 342 case Builtin::BI__builtin___stpncpy_chk: 343 case Builtin::BI__builtin___memccpy_chk: { 344 DiagID = diag::warn_builtin_chk_overflow; 345 IsChkVariant = true; 346 SizeIndex = TheCall->getNumArgs() - 2; 347 ObjectIndex = TheCall->getNumArgs() - 1; 348 break; 349 } 350 351 case Builtin::BI__builtin___snprintf_chk: 352 case Builtin::BI__builtin___vsnprintf_chk: { 353 DiagID = diag::warn_builtin_chk_overflow; 354 IsChkVariant = true; 355 SizeIndex = 1; 356 ObjectIndex = 3; 357 break; 358 } 359 360 case Builtin::BIstrncat: 361 case Builtin::BI__builtin_strncat: 362 case Builtin::BIstrncpy: 363 case Builtin::BI__builtin_strncpy: 364 case Builtin::BIstpncpy: 365 case Builtin::BI__builtin_stpncpy: { 366 // Whether these functions overflow depends on the runtime strlen of the 367 // string, not just the buffer size, so emitting the "always overflow" 368 // diagnostic isn't quite right. We should still diagnose passing a buffer 369 // size larger than the destination buffer though; this is a runtime abort 370 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 371 DiagID = diag::warn_fortify_source_size_mismatch; 372 SizeIndex = TheCall->getNumArgs() - 1; 373 ObjectIndex = 0; 374 break; 375 } 376 377 case Builtin::BImemcpy: 378 case Builtin::BI__builtin_memcpy: 379 case Builtin::BImemmove: 380 case Builtin::BI__builtin_memmove: 381 case Builtin::BImemset: 382 case Builtin::BI__builtin_memset: { 383 DiagID = diag::warn_fortify_source_overflow; 384 SizeIndex = TheCall->getNumArgs() - 1; 385 ObjectIndex = 0; 386 break; 387 } 388 case Builtin::BIsnprintf: 389 case Builtin::BI__builtin_snprintf: 390 case Builtin::BIvsnprintf: 391 case Builtin::BI__builtin_vsnprintf: { 392 DiagID = diag::warn_fortify_source_size_mismatch; 393 SizeIndex = 1; 394 ObjectIndex = 0; 395 break; 396 } 397 } 398 399 llvm::APSInt ObjectSize; 400 // For __builtin___*_chk, the object size is explicitly provided by the caller 401 // (usually using __builtin_object_size). Use that value to check this call. 402 if (IsChkVariant) { 403 Expr::EvalResult Result; 404 Expr *SizeArg = TheCall->getArg(ObjectIndex); 405 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 406 return; 407 ObjectSize = Result.Val.getInt(); 408 409 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 410 } else { 411 // If the parameter has a pass_object_size attribute, then we should use its 412 // (potentially) more strict checking mode. Otherwise, conservatively assume 413 // type 0. 414 int BOSType = 0; 415 if (const auto *POS = 416 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 417 BOSType = POS->getType(); 418 419 Expr *ObjArg = TheCall->getArg(ObjectIndex); 420 uint64_t Result; 421 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 422 return; 423 // Get the object size in the target's size_t width. 424 const TargetInfo &TI = getASTContext().getTargetInfo(); 425 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 426 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 427 } 428 429 // Evaluate the number of bytes of the object that this call will use. 430 Expr::EvalResult Result; 431 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 432 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 433 return; 434 llvm::APSInt UsedSize = Result.Val.getInt(); 435 436 if (UsedSize.ule(ObjectSize)) 437 return; 438 439 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 440 // Skim off the details of whichever builtin was called to produce a better 441 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 442 if (IsChkVariant) { 443 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 444 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 445 } else if (FunctionName.startswith("__builtin_")) { 446 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 447 } 448 449 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 450 PDiag(DiagID) 451 << FunctionName << ObjectSize.toString(/*Radix=*/10) 452 << UsedSize.toString(/*Radix=*/10)); 453 } 454 455 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 456 Scope::ScopeFlags NeededScopeFlags, 457 unsigned DiagID) { 458 // Scopes aren't available during instantiation. Fortunately, builtin 459 // functions cannot be template args so they cannot be formed through template 460 // instantiation. Therefore checking once during the parse is sufficient. 461 if (SemaRef.inTemplateInstantiation()) 462 return false; 463 464 Scope *S = SemaRef.getCurScope(); 465 while (S && !S->isSEHExceptScope()) 466 S = S->getParent(); 467 if (!S || !(S->getFlags() & NeededScopeFlags)) { 468 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 469 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 470 << DRE->getDecl()->getIdentifier(); 471 return true; 472 } 473 474 return false; 475 } 476 477 static inline bool isBlockPointer(Expr *Arg) { 478 return Arg->getType()->isBlockPointerType(); 479 } 480 481 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 482 /// void*, which is a requirement of device side enqueue. 483 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 484 const BlockPointerType *BPT = 485 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 486 ArrayRef<QualType> Params = 487 BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes(); 488 unsigned ArgCounter = 0; 489 bool IllegalParams = false; 490 // Iterate through the block parameters until either one is found that is not 491 // a local void*, or the block is valid. 492 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 493 I != E; ++I, ++ArgCounter) { 494 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 495 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 496 LangAS::opencl_local) { 497 // Get the location of the error. If a block literal has been passed 498 // (BlockExpr) then we can point straight to the offending argument, 499 // else we just point to the variable reference. 500 SourceLocation ErrorLoc; 501 if (isa<BlockExpr>(BlockArg)) { 502 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 503 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 504 } else if (isa<DeclRefExpr>(BlockArg)) { 505 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 506 } 507 S.Diag(ErrorLoc, 508 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 509 IllegalParams = true; 510 } 511 } 512 513 return IllegalParams; 514 } 515 516 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 517 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 518 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 519 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 520 return true; 521 } 522 return false; 523 } 524 525 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 526 if (checkArgCount(S, TheCall, 2)) 527 return true; 528 529 if (checkOpenCLSubgroupExt(S, TheCall)) 530 return true; 531 532 // First argument is an ndrange_t type. 533 Expr *NDRangeArg = TheCall->getArg(0); 534 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 535 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 536 << TheCall->getDirectCallee() << "'ndrange_t'"; 537 return true; 538 } 539 540 Expr *BlockArg = TheCall->getArg(1); 541 if (!isBlockPointer(BlockArg)) { 542 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 543 << TheCall->getDirectCallee() << "block"; 544 return true; 545 } 546 return checkOpenCLBlockArgs(S, BlockArg); 547 } 548 549 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 550 /// get_kernel_work_group_size 551 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 552 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 553 if (checkArgCount(S, TheCall, 1)) 554 return true; 555 556 Expr *BlockArg = TheCall->getArg(0); 557 if (!isBlockPointer(BlockArg)) { 558 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 559 << TheCall->getDirectCallee() << "block"; 560 return true; 561 } 562 return checkOpenCLBlockArgs(S, BlockArg); 563 } 564 565 /// Diagnose integer type and any valid implicit conversion to it. 566 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 567 const QualType &IntType); 568 569 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 570 unsigned Start, unsigned End) { 571 bool IllegalParams = false; 572 for (unsigned I = Start; I <= End; ++I) 573 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 574 S.Context.getSizeType()); 575 return IllegalParams; 576 } 577 578 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 579 /// 'local void*' parameter of passed block. 580 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 581 Expr *BlockArg, 582 unsigned NumNonVarArgs) { 583 const BlockPointerType *BPT = 584 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 585 unsigned NumBlockParams = 586 BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams(); 587 unsigned TotalNumArgs = TheCall->getNumArgs(); 588 589 // For each argument passed to the block, a corresponding uint needs to 590 // be passed to describe the size of the local memory. 591 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 592 S.Diag(TheCall->getBeginLoc(), 593 diag::err_opencl_enqueue_kernel_local_size_args); 594 return true; 595 } 596 597 // Check that the sizes of the local memory are specified by integers. 598 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 599 TotalNumArgs - 1); 600 } 601 602 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 603 /// overload formats specified in Table 6.13.17.1. 604 /// int enqueue_kernel(queue_t queue, 605 /// kernel_enqueue_flags_t flags, 606 /// const ndrange_t ndrange, 607 /// void (^block)(void)) 608 /// int enqueue_kernel(queue_t queue, 609 /// kernel_enqueue_flags_t flags, 610 /// const ndrange_t ndrange, 611 /// uint num_events_in_wait_list, 612 /// clk_event_t *event_wait_list, 613 /// clk_event_t *event_ret, 614 /// void (^block)(void)) 615 /// int enqueue_kernel(queue_t queue, 616 /// kernel_enqueue_flags_t flags, 617 /// const ndrange_t ndrange, 618 /// void (^block)(local void*, ...), 619 /// uint size0, ...) 620 /// int enqueue_kernel(queue_t queue, 621 /// kernel_enqueue_flags_t flags, 622 /// const ndrange_t ndrange, 623 /// uint num_events_in_wait_list, 624 /// clk_event_t *event_wait_list, 625 /// clk_event_t *event_ret, 626 /// void (^block)(local void*, ...), 627 /// uint size0, ...) 628 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 629 unsigned NumArgs = TheCall->getNumArgs(); 630 631 if (NumArgs < 4) { 632 S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args); 633 return true; 634 } 635 636 Expr *Arg0 = TheCall->getArg(0); 637 Expr *Arg1 = TheCall->getArg(1); 638 Expr *Arg2 = TheCall->getArg(2); 639 Expr *Arg3 = TheCall->getArg(3); 640 641 // First argument always needs to be a queue_t type. 642 if (!Arg0->getType()->isQueueT()) { 643 S.Diag(TheCall->getArg(0)->getBeginLoc(), 644 diag::err_opencl_builtin_expected_type) 645 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 646 return true; 647 } 648 649 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 650 if (!Arg1->getType()->isIntegerType()) { 651 S.Diag(TheCall->getArg(1)->getBeginLoc(), 652 diag::err_opencl_builtin_expected_type) 653 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 654 return true; 655 } 656 657 // Third argument is always an ndrange_t type. 658 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 659 S.Diag(TheCall->getArg(2)->getBeginLoc(), 660 diag::err_opencl_builtin_expected_type) 661 << TheCall->getDirectCallee() << "'ndrange_t'"; 662 return true; 663 } 664 665 // With four arguments, there is only one form that the function could be 666 // called in: no events and no variable arguments. 667 if (NumArgs == 4) { 668 // check that the last argument is the right block type. 669 if (!isBlockPointer(Arg3)) { 670 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 671 << TheCall->getDirectCallee() << "block"; 672 return true; 673 } 674 // we have a block type, check the prototype 675 const BlockPointerType *BPT = 676 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 677 if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) { 678 S.Diag(Arg3->getBeginLoc(), 679 diag::err_opencl_enqueue_kernel_blocks_no_args); 680 return true; 681 } 682 return false; 683 } 684 // we can have block + varargs. 685 if (isBlockPointer(Arg3)) 686 return (checkOpenCLBlockArgs(S, Arg3) || 687 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 688 // last two cases with either exactly 7 args or 7 args and varargs. 689 if (NumArgs >= 7) { 690 // check common block argument. 691 Expr *Arg6 = TheCall->getArg(6); 692 if (!isBlockPointer(Arg6)) { 693 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 694 << TheCall->getDirectCallee() << "block"; 695 return true; 696 } 697 if (checkOpenCLBlockArgs(S, Arg6)) 698 return true; 699 700 // Forth argument has to be any integer type. 701 if (!Arg3->getType()->isIntegerType()) { 702 S.Diag(TheCall->getArg(3)->getBeginLoc(), 703 diag::err_opencl_builtin_expected_type) 704 << TheCall->getDirectCallee() << "integer"; 705 return true; 706 } 707 // check remaining common arguments. 708 Expr *Arg4 = TheCall->getArg(4); 709 Expr *Arg5 = TheCall->getArg(5); 710 711 // Fifth argument is always passed as a pointer to clk_event_t. 712 if (!Arg4->isNullPointerConstant(S.Context, 713 Expr::NPC_ValueDependentIsNotNull) && 714 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 715 S.Diag(TheCall->getArg(4)->getBeginLoc(), 716 diag::err_opencl_builtin_expected_type) 717 << TheCall->getDirectCallee() 718 << S.Context.getPointerType(S.Context.OCLClkEventTy); 719 return true; 720 } 721 722 // Sixth argument is always passed as a pointer to clk_event_t. 723 if (!Arg5->isNullPointerConstant(S.Context, 724 Expr::NPC_ValueDependentIsNotNull) && 725 !(Arg5->getType()->isPointerType() && 726 Arg5->getType()->getPointeeType()->isClkEventT())) { 727 S.Diag(TheCall->getArg(5)->getBeginLoc(), 728 diag::err_opencl_builtin_expected_type) 729 << TheCall->getDirectCallee() 730 << S.Context.getPointerType(S.Context.OCLClkEventTy); 731 return true; 732 } 733 734 if (NumArgs == 7) 735 return false; 736 737 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 738 } 739 740 // None of the specific case has been detected, give generic error 741 S.Diag(TheCall->getBeginLoc(), 742 diag::err_opencl_enqueue_kernel_incorrect_args); 743 return true; 744 } 745 746 /// Returns OpenCL access qual. 747 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 748 return D->getAttr<OpenCLAccessAttr>(); 749 } 750 751 /// Returns true if pipe element type is different from the pointer. 752 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 753 const Expr *Arg0 = Call->getArg(0); 754 // First argument type should always be pipe. 755 if (!Arg0->getType()->isPipeType()) { 756 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 757 << Call->getDirectCallee() << Arg0->getSourceRange(); 758 return true; 759 } 760 OpenCLAccessAttr *AccessQual = 761 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 762 // Validates the access qualifier is compatible with the call. 763 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 764 // read_only and write_only, and assumed to be read_only if no qualifier is 765 // specified. 766 switch (Call->getDirectCallee()->getBuiltinID()) { 767 case Builtin::BIread_pipe: 768 case Builtin::BIreserve_read_pipe: 769 case Builtin::BIcommit_read_pipe: 770 case Builtin::BIwork_group_reserve_read_pipe: 771 case Builtin::BIsub_group_reserve_read_pipe: 772 case Builtin::BIwork_group_commit_read_pipe: 773 case Builtin::BIsub_group_commit_read_pipe: 774 if (!(!AccessQual || AccessQual->isReadOnly())) { 775 S.Diag(Arg0->getBeginLoc(), 776 diag::err_opencl_builtin_pipe_invalid_access_modifier) 777 << "read_only" << Arg0->getSourceRange(); 778 return true; 779 } 780 break; 781 case Builtin::BIwrite_pipe: 782 case Builtin::BIreserve_write_pipe: 783 case Builtin::BIcommit_write_pipe: 784 case Builtin::BIwork_group_reserve_write_pipe: 785 case Builtin::BIsub_group_reserve_write_pipe: 786 case Builtin::BIwork_group_commit_write_pipe: 787 case Builtin::BIsub_group_commit_write_pipe: 788 if (!(AccessQual && AccessQual->isWriteOnly())) { 789 S.Diag(Arg0->getBeginLoc(), 790 diag::err_opencl_builtin_pipe_invalid_access_modifier) 791 << "write_only" << Arg0->getSourceRange(); 792 return true; 793 } 794 break; 795 default: 796 break; 797 } 798 return false; 799 } 800 801 /// Returns true if pipe element type is different from the pointer. 802 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 803 const Expr *Arg0 = Call->getArg(0); 804 const Expr *ArgIdx = Call->getArg(Idx); 805 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 806 const QualType EltTy = PipeTy->getElementType(); 807 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 808 // The Idx argument should be a pointer and the type of the pointer and 809 // the type of pipe element should also be the same. 810 if (!ArgTy || 811 !S.Context.hasSameType( 812 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 813 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 814 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 815 << ArgIdx->getType() << ArgIdx->getSourceRange(); 816 return true; 817 } 818 return false; 819 } 820 821 // Performs semantic analysis for the read/write_pipe call. 822 // \param S Reference to the semantic analyzer. 823 // \param Call A pointer to the builtin call. 824 // \return True if a semantic error has been found, false otherwise. 825 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 826 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 827 // functions have two forms. 828 switch (Call->getNumArgs()) { 829 case 2: 830 if (checkOpenCLPipeArg(S, Call)) 831 return true; 832 // The call with 2 arguments should be 833 // read/write_pipe(pipe T, T*). 834 // Check packet type T. 835 if (checkOpenCLPipePacketType(S, Call, 1)) 836 return true; 837 break; 838 839 case 4: { 840 if (checkOpenCLPipeArg(S, Call)) 841 return true; 842 // The call with 4 arguments should be 843 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 844 // Check reserve_id_t. 845 if (!Call->getArg(1)->getType()->isReserveIDT()) { 846 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 847 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 848 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 849 return true; 850 } 851 852 // Check the index. 853 const Expr *Arg2 = Call->getArg(2); 854 if (!Arg2->getType()->isIntegerType() && 855 !Arg2->getType()->isUnsignedIntegerType()) { 856 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 857 << Call->getDirectCallee() << S.Context.UnsignedIntTy 858 << Arg2->getType() << Arg2->getSourceRange(); 859 return true; 860 } 861 862 // Check packet type T. 863 if (checkOpenCLPipePacketType(S, Call, 3)) 864 return true; 865 } break; 866 default: 867 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 868 << Call->getDirectCallee() << Call->getSourceRange(); 869 return true; 870 } 871 872 return false; 873 } 874 875 // Performs a semantic analysis on the {work_group_/sub_group_ 876 // /_}reserve_{read/write}_pipe 877 // \param S Reference to the semantic analyzer. 878 // \param Call The call to the builtin function to be analyzed. 879 // \return True if a semantic error was found, false otherwise. 880 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 881 if (checkArgCount(S, Call, 2)) 882 return true; 883 884 if (checkOpenCLPipeArg(S, Call)) 885 return true; 886 887 // Check the reserve size. 888 if (!Call->getArg(1)->getType()->isIntegerType() && 889 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 890 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 891 << Call->getDirectCallee() << S.Context.UnsignedIntTy 892 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 893 return true; 894 } 895 896 // Since return type of reserve_read/write_pipe built-in function is 897 // reserve_id_t, which is not defined in the builtin def file , we used int 898 // as return type and need to override the return type of these functions. 899 Call->setType(S.Context.OCLReserveIDTy); 900 901 return false; 902 } 903 904 // Performs a semantic analysis on {work_group_/sub_group_ 905 // /_}commit_{read/write}_pipe 906 // \param S Reference to the semantic analyzer. 907 // \param Call The call to the builtin function to be analyzed. 908 // \return True if a semantic error was found, false otherwise. 909 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 910 if (checkArgCount(S, Call, 2)) 911 return true; 912 913 if (checkOpenCLPipeArg(S, Call)) 914 return true; 915 916 // Check reserve_id_t. 917 if (!Call->getArg(1)->getType()->isReserveIDT()) { 918 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 919 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 920 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 921 return true; 922 } 923 924 return false; 925 } 926 927 // Performs a semantic analysis on the call to built-in Pipe 928 // Query Functions. 929 // \param S Reference to the semantic analyzer. 930 // \param Call The call to the builtin function to be analyzed. 931 // \return True if a semantic error was found, false otherwise. 932 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 933 if (checkArgCount(S, Call, 1)) 934 return true; 935 936 if (!Call->getArg(0)->getType()->isPipeType()) { 937 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 938 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 939 return true; 940 } 941 942 return false; 943 } 944 945 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 946 // Performs semantic analysis for the to_global/local/private call. 947 // \param S Reference to the semantic analyzer. 948 // \param BuiltinID ID of the builtin function. 949 // \param Call A pointer to the builtin call. 950 // \return True if a semantic error has been found, false otherwise. 951 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 952 CallExpr *Call) { 953 if (Call->getNumArgs() != 1) { 954 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 955 << Call->getDirectCallee() << Call->getSourceRange(); 956 return true; 957 } 958 959 auto RT = Call->getArg(0)->getType(); 960 if (!RT->isPointerType() || RT->getPointeeType() 961 .getAddressSpace() == LangAS::opencl_constant) { 962 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 963 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 964 return true; 965 } 966 967 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 968 S.Diag(Call->getArg(0)->getBeginLoc(), 969 diag::warn_opencl_generic_address_space_arg) 970 << Call->getDirectCallee()->getNameInfo().getAsString() 971 << Call->getArg(0)->getSourceRange(); 972 } 973 974 RT = RT->getPointeeType(); 975 auto Qual = RT.getQualifiers(); 976 switch (BuiltinID) { 977 case Builtin::BIto_global: 978 Qual.setAddressSpace(LangAS::opencl_global); 979 break; 980 case Builtin::BIto_local: 981 Qual.setAddressSpace(LangAS::opencl_local); 982 break; 983 case Builtin::BIto_private: 984 Qual.setAddressSpace(LangAS::opencl_private); 985 break; 986 default: 987 llvm_unreachable("Invalid builtin function"); 988 } 989 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 990 RT.getUnqualifiedType(), Qual))); 991 992 return false; 993 } 994 995 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 996 if (checkArgCount(S, TheCall, 1)) 997 return ExprError(); 998 999 // Compute __builtin_launder's parameter type from the argument. 1000 // The parameter type is: 1001 // * The type of the argument if it's not an array or function type, 1002 // Otherwise, 1003 // * The decayed argument type. 1004 QualType ParamTy = [&]() { 1005 QualType ArgTy = TheCall->getArg(0)->getType(); 1006 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1007 return S.Context.getPointerType(Ty->getElementType()); 1008 if (ArgTy->isFunctionType()) { 1009 return S.Context.getPointerType(ArgTy); 1010 } 1011 return ArgTy; 1012 }(); 1013 1014 TheCall->setType(ParamTy); 1015 1016 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1017 if (!ParamTy->isPointerType()) 1018 return 0; 1019 if (ParamTy->isFunctionPointerType()) 1020 return 1; 1021 if (ParamTy->isVoidPointerType()) 1022 return 2; 1023 return llvm::Optional<unsigned>{}; 1024 }(); 1025 if (DiagSelect.hasValue()) { 1026 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1027 << DiagSelect.getValue() << TheCall->getSourceRange(); 1028 return ExprError(); 1029 } 1030 1031 // We either have an incomplete class type, or we have a class template 1032 // whose instantiation has not been forced. Example: 1033 // 1034 // template <class T> struct Foo { T value; }; 1035 // Foo<int> *p = nullptr; 1036 // auto *d = __builtin_launder(p); 1037 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1038 diag::err_incomplete_type)) 1039 return ExprError(); 1040 1041 assert(ParamTy->getPointeeType()->isObjectType() && 1042 "Unhandled non-object pointer case"); 1043 1044 InitializedEntity Entity = 1045 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1046 ExprResult Arg = 1047 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1048 if (Arg.isInvalid()) 1049 return ExprError(); 1050 TheCall->setArg(0, Arg.get()); 1051 1052 return TheCall; 1053 } 1054 1055 // Emit an error and return true if the current architecture is not in the list 1056 // of supported architectures. 1057 static bool 1058 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1059 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1060 llvm::Triple::ArchType CurArch = 1061 S.getASTContext().getTargetInfo().getTriple().getArch(); 1062 if (llvm::is_contained(SupportedArchs, CurArch)) 1063 return false; 1064 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1065 << TheCall->getSourceRange(); 1066 return true; 1067 } 1068 1069 ExprResult 1070 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1071 CallExpr *TheCall) { 1072 ExprResult TheCallResult(TheCall); 1073 1074 // Find out if any arguments are required to be integer constant expressions. 1075 unsigned ICEArguments = 0; 1076 ASTContext::GetBuiltinTypeError Error; 1077 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1078 if (Error != ASTContext::GE_None) 1079 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1080 1081 // If any arguments are required to be ICE's, check and diagnose. 1082 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1083 // Skip arguments not required to be ICE's. 1084 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1085 1086 llvm::APSInt Result; 1087 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1088 return true; 1089 ICEArguments &= ~(1 << ArgNo); 1090 } 1091 1092 switch (BuiltinID) { 1093 case Builtin::BI__builtin___CFStringMakeConstantString: 1094 assert(TheCall->getNumArgs() == 1 && 1095 "Wrong # arguments to builtin CFStringMakeConstantString"); 1096 if (CheckObjCString(TheCall->getArg(0))) 1097 return ExprError(); 1098 break; 1099 case Builtin::BI__builtin_ms_va_start: 1100 case Builtin::BI__builtin_stdarg_start: 1101 case Builtin::BI__builtin_va_start: 1102 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1103 return ExprError(); 1104 break; 1105 case Builtin::BI__va_start: { 1106 switch (Context.getTargetInfo().getTriple().getArch()) { 1107 case llvm::Triple::aarch64: 1108 case llvm::Triple::arm: 1109 case llvm::Triple::thumb: 1110 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1111 return ExprError(); 1112 break; 1113 default: 1114 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1115 return ExprError(); 1116 break; 1117 } 1118 break; 1119 } 1120 1121 // The acquire, release, and no fence variants are ARM and AArch64 only. 1122 case Builtin::BI_interlockedbittestandset_acq: 1123 case Builtin::BI_interlockedbittestandset_rel: 1124 case Builtin::BI_interlockedbittestandset_nf: 1125 case Builtin::BI_interlockedbittestandreset_acq: 1126 case Builtin::BI_interlockedbittestandreset_rel: 1127 case Builtin::BI_interlockedbittestandreset_nf: 1128 if (CheckBuiltinTargetSupport( 1129 *this, BuiltinID, TheCall, 1130 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1131 return ExprError(); 1132 break; 1133 1134 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1135 case Builtin::BI_bittest64: 1136 case Builtin::BI_bittestandcomplement64: 1137 case Builtin::BI_bittestandreset64: 1138 case Builtin::BI_bittestandset64: 1139 case Builtin::BI_interlockedbittestandreset64: 1140 case Builtin::BI_interlockedbittestandset64: 1141 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1142 {llvm::Triple::x86_64, llvm::Triple::arm, 1143 llvm::Triple::thumb, llvm::Triple::aarch64})) 1144 return ExprError(); 1145 break; 1146 1147 case Builtin::BI__builtin_isgreater: 1148 case Builtin::BI__builtin_isgreaterequal: 1149 case Builtin::BI__builtin_isless: 1150 case Builtin::BI__builtin_islessequal: 1151 case Builtin::BI__builtin_islessgreater: 1152 case Builtin::BI__builtin_isunordered: 1153 if (SemaBuiltinUnorderedCompare(TheCall)) 1154 return ExprError(); 1155 break; 1156 case Builtin::BI__builtin_fpclassify: 1157 if (SemaBuiltinFPClassification(TheCall, 6)) 1158 return ExprError(); 1159 break; 1160 case Builtin::BI__builtin_isfinite: 1161 case Builtin::BI__builtin_isinf: 1162 case Builtin::BI__builtin_isinf_sign: 1163 case Builtin::BI__builtin_isnan: 1164 case Builtin::BI__builtin_isnormal: 1165 case Builtin::BI__builtin_signbit: 1166 case Builtin::BI__builtin_signbitf: 1167 case Builtin::BI__builtin_signbitl: 1168 if (SemaBuiltinFPClassification(TheCall, 1)) 1169 return ExprError(); 1170 break; 1171 case Builtin::BI__builtin_shufflevector: 1172 return SemaBuiltinShuffleVector(TheCall); 1173 // TheCall will be freed by the smart pointer here, but that's fine, since 1174 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1175 case Builtin::BI__builtin_prefetch: 1176 if (SemaBuiltinPrefetch(TheCall)) 1177 return ExprError(); 1178 break; 1179 case Builtin::BI__builtin_alloca_with_align: 1180 if (SemaBuiltinAllocaWithAlign(TheCall)) 1181 return ExprError(); 1182 break; 1183 case Builtin::BI__assume: 1184 case Builtin::BI__builtin_assume: 1185 if (SemaBuiltinAssume(TheCall)) 1186 return ExprError(); 1187 break; 1188 case Builtin::BI__builtin_assume_aligned: 1189 if (SemaBuiltinAssumeAligned(TheCall)) 1190 return ExprError(); 1191 break; 1192 case Builtin::BI__builtin_dynamic_object_size: 1193 case Builtin::BI__builtin_object_size: 1194 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1195 return ExprError(); 1196 break; 1197 case Builtin::BI__builtin_longjmp: 1198 if (SemaBuiltinLongjmp(TheCall)) 1199 return ExprError(); 1200 break; 1201 case Builtin::BI__builtin_setjmp: 1202 if (SemaBuiltinSetjmp(TheCall)) 1203 return ExprError(); 1204 break; 1205 case Builtin::BI_setjmp: 1206 case Builtin::BI_setjmpex: 1207 if (checkArgCount(*this, TheCall, 1)) 1208 return true; 1209 break; 1210 case Builtin::BI__builtin_classify_type: 1211 if (checkArgCount(*this, TheCall, 1)) return true; 1212 TheCall->setType(Context.IntTy); 1213 break; 1214 case Builtin::BI__builtin_constant_p: { 1215 if (checkArgCount(*this, TheCall, 1)) return true; 1216 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1217 if (Arg.isInvalid()) return true; 1218 TheCall->setArg(0, Arg.get()); 1219 TheCall->setType(Context.IntTy); 1220 break; 1221 } 1222 case Builtin::BI__builtin_launder: 1223 return SemaBuiltinLaunder(*this, TheCall); 1224 case Builtin::BI__sync_fetch_and_add: 1225 case Builtin::BI__sync_fetch_and_add_1: 1226 case Builtin::BI__sync_fetch_and_add_2: 1227 case Builtin::BI__sync_fetch_and_add_4: 1228 case Builtin::BI__sync_fetch_and_add_8: 1229 case Builtin::BI__sync_fetch_and_add_16: 1230 case Builtin::BI__sync_fetch_and_sub: 1231 case Builtin::BI__sync_fetch_and_sub_1: 1232 case Builtin::BI__sync_fetch_and_sub_2: 1233 case Builtin::BI__sync_fetch_and_sub_4: 1234 case Builtin::BI__sync_fetch_and_sub_8: 1235 case Builtin::BI__sync_fetch_and_sub_16: 1236 case Builtin::BI__sync_fetch_and_or: 1237 case Builtin::BI__sync_fetch_and_or_1: 1238 case Builtin::BI__sync_fetch_and_or_2: 1239 case Builtin::BI__sync_fetch_and_or_4: 1240 case Builtin::BI__sync_fetch_and_or_8: 1241 case Builtin::BI__sync_fetch_and_or_16: 1242 case Builtin::BI__sync_fetch_and_and: 1243 case Builtin::BI__sync_fetch_and_and_1: 1244 case Builtin::BI__sync_fetch_and_and_2: 1245 case Builtin::BI__sync_fetch_and_and_4: 1246 case Builtin::BI__sync_fetch_and_and_8: 1247 case Builtin::BI__sync_fetch_and_and_16: 1248 case Builtin::BI__sync_fetch_and_xor: 1249 case Builtin::BI__sync_fetch_and_xor_1: 1250 case Builtin::BI__sync_fetch_and_xor_2: 1251 case Builtin::BI__sync_fetch_and_xor_4: 1252 case Builtin::BI__sync_fetch_and_xor_8: 1253 case Builtin::BI__sync_fetch_and_xor_16: 1254 case Builtin::BI__sync_fetch_and_nand: 1255 case Builtin::BI__sync_fetch_and_nand_1: 1256 case Builtin::BI__sync_fetch_and_nand_2: 1257 case Builtin::BI__sync_fetch_and_nand_4: 1258 case Builtin::BI__sync_fetch_and_nand_8: 1259 case Builtin::BI__sync_fetch_and_nand_16: 1260 case Builtin::BI__sync_add_and_fetch: 1261 case Builtin::BI__sync_add_and_fetch_1: 1262 case Builtin::BI__sync_add_and_fetch_2: 1263 case Builtin::BI__sync_add_and_fetch_4: 1264 case Builtin::BI__sync_add_and_fetch_8: 1265 case Builtin::BI__sync_add_and_fetch_16: 1266 case Builtin::BI__sync_sub_and_fetch: 1267 case Builtin::BI__sync_sub_and_fetch_1: 1268 case Builtin::BI__sync_sub_and_fetch_2: 1269 case Builtin::BI__sync_sub_and_fetch_4: 1270 case Builtin::BI__sync_sub_and_fetch_8: 1271 case Builtin::BI__sync_sub_and_fetch_16: 1272 case Builtin::BI__sync_and_and_fetch: 1273 case Builtin::BI__sync_and_and_fetch_1: 1274 case Builtin::BI__sync_and_and_fetch_2: 1275 case Builtin::BI__sync_and_and_fetch_4: 1276 case Builtin::BI__sync_and_and_fetch_8: 1277 case Builtin::BI__sync_and_and_fetch_16: 1278 case Builtin::BI__sync_or_and_fetch: 1279 case Builtin::BI__sync_or_and_fetch_1: 1280 case Builtin::BI__sync_or_and_fetch_2: 1281 case Builtin::BI__sync_or_and_fetch_4: 1282 case Builtin::BI__sync_or_and_fetch_8: 1283 case Builtin::BI__sync_or_and_fetch_16: 1284 case Builtin::BI__sync_xor_and_fetch: 1285 case Builtin::BI__sync_xor_and_fetch_1: 1286 case Builtin::BI__sync_xor_and_fetch_2: 1287 case Builtin::BI__sync_xor_and_fetch_4: 1288 case Builtin::BI__sync_xor_and_fetch_8: 1289 case Builtin::BI__sync_xor_and_fetch_16: 1290 case Builtin::BI__sync_nand_and_fetch: 1291 case Builtin::BI__sync_nand_and_fetch_1: 1292 case Builtin::BI__sync_nand_and_fetch_2: 1293 case Builtin::BI__sync_nand_and_fetch_4: 1294 case Builtin::BI__sync_nand_and_fetch_8: 1295 case Builtin::BI__sync_nand_and_fetch_16: 1296 case Builtin::BI__sync_val_compare_and_swap: 1297 case Builtin::BI__sync_val_compare_and_swap_1: 1298 case Builtin::BI__sync_val_compare_and_swap_2: 1299 case Builtin::BI__sync_val_compare_and_swap_4: 1300 case Builtin::BI__sync_val_compare_and_swap_8: 1301 case Builtin::BI__sync_val_compare_and_swap_16: 1302 case Builtin::BI__sync_bool_compare_and_swap: 1303 case Builtin::BI__sync_bool_compare_and_swap_1: 1304 case Builtin::BI__sync_bool_compare_and_swap_2: 1305 case Builtin::BI__sync_bool_compare_and_swap_4: 1306 case Builtin::BI__sync_bool_compare_and_swap_8: 1307 case Builtin::BI__sync_bool_compare_and_swap_16: 1308 case Builtin::BI__sync_lock_test_and_set: 1309 case Builtin::BI__sync_lock_test_and_set_1: 1310 case Builtin::BI__sync_lock_test_and_set_2: 1311 case Builtin::BI__sync_lock_test_and_set_4: 1312 case Builtin::BI__sync_lock_test_and_set_8: 1313 case Builtin::BI__sync_lock_test_and_set_16: 1314 case Builtin::BI__sync_lock_release: 1315 case Builtin::BI__sync_lock_release_1: 1316 case Builtin::BI__sync_lock_release_2: 1317 case Builtin::BI__sync_lock_release_4: 1318 case Builtin::BI__sync_lock_release_8: 1319 case Builtin::BI__sync_lock_release_16: 1320 case Builtin::BI__sync_swap: 1321 case Builtin::BI__sync_swap_1: 1322 case Builtin::BI__sync_swap_2: 1323 case Builtin::BI__sync_swap_4: 1324 case Builtin::BI__sync_swap_8: 1325 case Builtin::BI__sync_swap_16: 1326 return SemaBuiltinAtomicOverloaded(TheCallResult); 1327 case Builtin::BI__sync_synchronize: 1328 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1329 << TheCall->getCallee()->getSourceRange(); 1330 break; 1331 case Builtin::BI__builtin_nontemporal_load: 1332 case Builtin::BI__builtin_nontemporal_store: 1333 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1334 #define BUILTIN(ID, TYPE, ATTRS) 1335 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1336 case Builtin::BI##ID: \ 1337 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1338 #include "clang/Basic/Builtins.def" 1339 case Builtin::BI__annotation: 1340 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1341 return ExprError(); 1342 break; 1343 case Builtin::BI__builtin_annotation: 1344 if (SemaBuiltinAnnotation(*this, TheCall)) 1345 return ExprError(); 1346 break; 1347 case Builtin::BI__builtin_addressof: 1348 if (SemaBuiltinAddressof(*this, TheCall)) 1349 return ExprError(); 1350 break; 1351 case Builtin::BI__builtin_add_overflow: 1352 case Builtin::BI__builtin_sub_overflow: 1353 case Builtin::BI__builtin_mul_overflow: 1354 if (SemaBuiltinOverflow(*this, TheCall)) 1355 return ExprError(); 1356 break; 1357 case Builtin::BI__builtin_operator_new: 1358 case Builtin::BI__builtin_operator_delete: { 1359 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1360 ExprResult Res = 1361 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1362 if (Res.isInvalid()) 1363 CorrectDelayedTyposInExpr(TheCallResult.get()); 1364 return Res; 1365 } 1366 case Builtin::BI__builtin_dump_struct: { 1367 // We first want to ensure we are called with 2 arguments 1368 if (checkArgCount(*this, TheCall, 2)) 1369 return ExprError(); 1370 // Ensure that the first argument is of type 'struct XX *' 1371 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1372 const QualType PtrArgType = PtrArg->getType(); 1373 if (!PtrArgType->isPointerType() || 1374 !PtrArgType->getPointeeType()->isRecordType()) { 1375 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1376 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1377 << "structure pointer"; 1378 return ExprError(); 1379 } 1380 1381 // Ensure that the second argument is of type 'FunctionType' 1382 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1383 const QualType FnPtrArgType = FnPtrArg->getType(); 1384 if (!FnPtrArgType->isPointerType()) { 1385 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1386 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1387 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1388 return ExprError(); 1389 } 1390 1391 const auto *FuncType = 1392 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1393 1394 if (!FuncType) { 1395 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1396 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1397 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1398 return ExprError(); 1399 } 1400 1401 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1402 if (!FT->getNumParams()) { 1403 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1404 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1405 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1406 return ExprError(); 1407 } 1408 QualType PT = FT->getParamType(0); 1409 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1410 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1411 !PT->getPointeeType().isConstQualified()) { 1412 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1413 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1414 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1415 return ExprError(); 1416 } 1417 } 1418 1419 TheCall->setType(Context.IntTy); 1420 break; 1421 } 1422 case Builtin::BI__builtin_preserve_access_index: 1423 if (SemaBuiltinPreserveAI(*this, TheCall)) 1424 return ExprError(); 1425 break; 1426 case Builtin::BI__builtin_call_with_static_chain: 1427 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1428 return ExprError(); 1429 break; 1430 case Builtin::BI__exception_code: 1431 case Builtin::BI_exception_code: 1432 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1433 diag::err_seh___except_block)) 1434 return ExprError(); 1435 break; 1436 case Builtin::BI__exception_info: 1437 case Builtin::BI_exception_info: 1438 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1439 diag::err_seh___except_filter)) 1440 return ExprError(); 1441 break; 1442 case Builtin::BI__GetExceptionInfo: 1443 if (checkArgCount(*this, TheCall, 1)) 1444 return ExprError(); 1445 1446 if (CheckCXXThrowOperand( 1447 TheCall->getBeginLoc(), 1448 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1449 TheCall)) 1450 return ExprError(); 1451 1452 TheCall->setType(Context.VoidPtrTy); 1453 break; 1454 // OpenCL v2.0, s6.13.16 - Pipe functions 1455 case Builtin::BIread_pipe: 1456 case Builtin::BIwrite_pipe: 1457 // Since those two functions are declared with var args, we need a semantic 1458 // check for the argument. 1459 if (SemaBuiltinRWPipe(*this, TheCall)) 1460 return ExprError(); 1461 break; 1462 case Builtin::BIreserve_read_pipe: 1463 case Builtin::BIreserve_write_pipe: 1464 case Builtin::BIwork_group_reserve_read_pipe: 1465 case Builtin::BIwork_group_reserve_write_pipe: 1466 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1467 return ExprError(); 1468 break; 1469 case Builtin::BIsub_group_reserve_read_pipe: 1470 case Builtin::BIsub_group_reserve_write_pipe: 1471 if (checkOpenCLSubgroupExt(*this, TheCall) || 1472 SemaBuiltinReserveRWPipe(*this, TheCall)) 1473 return ExprError(); 1474 break; 1475 case Builtin::BIcommit_read_pipe: 1476 case Builtin::BIcommit_write_pipe: 1477 case Builtin::BIwork_group_commit_read_pipe: 1478 case Builtin::BIwork_group_commit_write_pipe: 1479 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1480 return ExprError(); 1481 break; 1482 case Builtin::BIsub_group_commit_read_pipe: 1483 case Builtin::BIsub_group_commit_write_pipe: 1484 if (checkOpenCLSubgroupExt(*this, TheCall) || 1485 SemaBuiltinCommitRWPipe(*this, TheCall)) 1486 return ExprError(); 1487 break; 1488 case Builtin::BIget_pipe_num_packets: 1489 case Builtin::BIget_pipe_max_packets: 1490 if (SemaBuiltinPipePackets(*this, TheCall)) 1491 return ExprError(); 1492 break; 1493 case Builtin::BIto_global: 1494 case Builtin::BIto_local: 1495 case Builtin::BIto_private: 1496 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1497 return ExprError(); 1498 break; 1499 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1500 case Builtin::BIenqueue_kernel: 1501 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1502 return ExprError(); 1503 break; 1504 case Builtin::BIget_kernel_work_group_size: 1505 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1506 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1507 return ExprError(); 1508 break; 1509 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1510 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1511 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1512 return ExprError(); 1513 break; 1514 case Builtin::BI__builtin_os_log_format: 1515 case Builtin::BI__builtin_os_log_format_buffer_size: 1516 if (SemaBuiltinOSLogFormat(TheCall)) 1517 return ExprError(); 1518 break; 1519 } 1520 1521 // Since the target specific builtins for each arch overlap, only check those 1522 // of the arch we are compiling for. 1523 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1524 switch (Context.getTargetInfo().getTriple().getArch()) { 1525 case llvm::Triple::arm: 1526 case llvm::Triple::armeb: 1527 case llvm::Triple::thumb: 1528 case llvm::Triple::thumbeb: 1529 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) 1530 return ExprError(); 1531 break; 1532 case llvm::Triple::aarch64: 1533 case llvm::Triple::aarch64_be: 1534 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) 1535 return ExprError(); 1536 break; 1537 case llvm::Triple::hexagon: 1538 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall)) 1539 return ExprError(); 1540 break; 1541 case llvm::Triple::mips: 1542 case llvm::Triple::mipsel: 1543 case llvm::Triple::mips64: 1544 case llvm::Triple::mips64el: 1545 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) 1546 return ExprError(); 1547 break; 1548 case llvm::Triple::systemz: 1549 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) 1550 return ExprError(); 1551 break; 1552 case llvm::Triple::x86: 1553 case llvm::Triple::x86_64: 1554 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) 1555 return ExprError(); 1556 break; 1557 case llvm::Triple::ppc: 1558 case llvm::Triple::ppc64: 1559 case llvm::Triple::ppc64le: 1560 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) 1561 return ExprError(); 1562 break; 1563 default: 1564 break; 1565 } 1566 } 1567 1568 return TheCallResult; 1569 } 1570 1571 // Get the valid immediate range for the specified NEON type code. 1572 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1573 NeonTypeFlags Type(t); 1574 int IsQuad = ForceQuad ? true : Type.isQuad(); 1575 switch (Type.getEltType()) { 1576 case NeonTypeFlags::Int8: 1577 case NeonTypeFlags::Poly8: 1578 return shift ? 7 : (8 << IsQuad) - 1; 1579 case NeonTypeFlags::Int16: 1580 case NeonTypeFlags::Poly16: 1581 return shift ? 15 : (4 << IsQuad) - 1; 1582 case NeonTypeFlags::Int32: 1583 return shift ? 31 : (2 << IsQuad) - 1; 1584 case NeonTypeFlags::Int64: 1585 case NeonTypeFlags::Poly64: 1586 return shift ? 63 : (1 << IsQuad) - 1; 1587 case NeonTypeFlags::Poly128: 1588 return shift ? 127 : (1 << IsQuad) - 1; 1589 case NeonTypeFlags::Float16: 1590 assert(!shift && "cannot shift float types!"); 1591 return (4 << IsQuad) - 1; 1592 case NeonTypeFlags::Float32: 1593 assert(!shift && "cannot shift float types!"); 1594 return (2 << IsQuad) - 1; 1595 case NeonTypeFlags::Float64: 1596 assert(!shift && "cannot shift float types!"); 1597 return (1 << IsQuad) - 1; 1598 } 1599 llvm_unreachable("Invalid NeonTypeFlag!"); 1600 } 1601 1602 /// getNeonEltType - Return the QualType corresponding to the elements of 1603 /// the vector type specified by the NeonTypeFlags. This is used to check 1604 /// the pointer arguments for Neon load/store intrinsics. 1605 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1606 bool IsPolyUnsigned, bool IsInt64Long) { 1607 switch (Flags.getEltType()) { 1608 case NeonTypeFlags::Int8: 1609 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1610 case NeonTypeFlags::Int16: 1611 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1612 case NeonTypeFlags::Int32: 1613 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1614 case NeonTypeFlags::Int64: 1615 if (IsInt64Long) 1616 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1617 else 1618 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1619 : Context.LongLongTy; 1620 case NeonTypeFlags::Poly8: 1621 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1622 case NeonTypeFlags::Poly16: 1623 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1624 case NeonTypeFlags::Poly64: 1625 if (IsInt64Long) 1626 return Context.UnsignedLongTy; 1627 else 1628 return Context.UnsignedLongLongTy; 1629 case NeonTypeFlags::Poly128: 1630 break; 1631 case NeonTypeFlags::Float16: 1632 return Context.HalfTy; 1633 case NeonTypeFlags::Float32: 1634 return Context.FloatTy; 1635 case NeonTypeFlags::Float64: 1636 return Context.DoubleTy; 1637 } 1638 llvm_unreachable("Invalid NeonTypeFlag!"); 1639 } 1640 1641 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1642 llvm::APSInt Result; 1643 uint64_t mask = 0; 1644 unsigned TV = 0; 1645 int PtrArgNum = -1; 1646 bool HasConstPtr = false; 1647 switch (BuiltinID) { 1648 #define GET_NEON_OVERLOAD_CHECK 1649 #include "clang/Basic/arm_neon.inc" 1650 #include "clang/Basic/arm_fp16.inc" 1651 #undef GET_NEON_OVERLOAD_CHECK 1652 } 1653 1654 // For NEON intrinsics which are overloaded on vector element type, validate 1655 // the immediate which specifies which variant to emit. 1656 unsigned ImmArg = TheCall->getNumArgs()-1; 1657 if (mask) { 1658 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 1659 return true; 1660 1661 TV = Result.getLimitedValue(64); 1662 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 1663 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 1664 << TheCall->getArg(ImmArg)->getSourceRange(); 1665 } 1666 1667 if (PtrArgNum >= 0) { 1668 // Check that pointer arguments have the specified type. 1669 Expr *Arg = TheCall->getArg(PtrArgNum); 1670 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 1671 Arg = ICE->getSubExpr(); 1672 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 1673 QualType RHSTy = RHS.get()->getType(); 1674 1675 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); 1676 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 1677 Arch == llvm::Triple::aarch64_be; 1678 bool IsInt64Long = 1679 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; 1680 QualType EltTy = 1681 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 1682 if (HasConstPtr) 1683 EltTy = EltTy.withConst(); 1684 QualType LHSTy = Context.getPointerType(EltTy); 1685 AssignConvertType ConvTy; 1686 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 1687 if (RHS.isInvalid()) 1688 return true; 1689 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 1690 RHS.get(), AA_Assigning)) 1691 return true; 1692 } 1693 1694 // For NEON intrinsics which take an immediate value as part of the 1695 // instruction, range check them here. 1696 unsigned i = 0, l = 0, u = 0; 1697 switch (BuiltinID) { 1698 default: 1699 return false; 1700 #define GET_NEON_IMMEDIATE_CHECK 1701 #include "clang/Basic/arm_neon.inc" 1702 #include "clang/Basic/arm_fp16.inc" 1703 #undef GET_NEON_IMMEDIATE_CHECK 1704 } 1705 1706 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1707 } 1708 1709 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 1710 unsigned MaxWidth) { 1711 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 1712 BuiltinID == ARM::BI__builtin_arm_ldaex || 1713 BuiltinID == ARM::BI__builtin_arm_strex || 1714 BuiltinID == ARM::BI__builtin_arm_stlex || 1715 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1716 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1717 BuiltinID == AArch64::BI__builtin_arm_strex || 1718 BuiltinID == AArch64::BI__builtin_arm_stlex) && 1719 "unexpected ARM builtin"); 1720 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 1721 BuiltinID == ARM::BI__builtin_arm_ldaex || 1722 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1723 BuiltinID == AArch64::BI__builtin_arm_ldaex; 1724 1725 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1726 1727 // Ensure that we have the proper number of arguments. 1728 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 1729 return true; 1730 1731 // Inspect the pointer argument of the atomic builtin. This should always be 1732 // a pointer type, whose element is an integral scalar or pointer type. 1733 // Because it is a pointer type, we don't have to worry about any implicit 1734 // casts here. 1735 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 1736 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 1737 if (PointerArgRes.isInvalid()) 1738 return true; 1739 PointerArg = PointerArgRes.get(); 1740 1741 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 1742 if (!pointerType) { 1743 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 1744 << PointerArg->getType() << PointerArg->getSourceRange(); 1745 return true; 1746 } 1747 1748 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 1749 // task is to insert the appropriate casts into the AST. First work out just 1750 // what the appropriate type is. 1751 QualType ValType = pointerType->getPointeeType(); 1752 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 1753 if (IsLdrex) 1754 AddrType.addConst(); 1755 1756 // Issue a warning if the cast is dodgy. 1757 CastKind CastNeeded = CK_NoOp; 1758 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 1759 CastNeeded = CK_BitCast; 1760 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 1761 << PointerArg->getType() << Context.getPointerType(AddrType) 1762 << AA_Passing << PointerArg->getSourceRange(); 1763 } 1764 1765 // Finally, do the cast and replace the argument with the corrected version. 1766 AddrType = Context.getPointerType(AddrType); 1767 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 1768 if (PointerArgRes.isInvalid()) 1769 return true; 1770 PointerArg = PointerArgRes.get(); 1771 1772 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 1773 1774 // In general, we allow ints, floats and pointers to be loaded and stored. 1775 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 1776 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 1777 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 1778 << PointerArg->getType() << PointerArg->getSourceRange(); 1779 return true; 1780 } 1781 1782 // But ARM doesn't have instructions to deal with 128-bit versions. 1783 if (Context.getTypeSize(ValType) > MaxWidth) { 1784 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 1785 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 1786 << PointerArg->getType() << PointerArg->getSourceRange(); 1787 return true; 1788 } 1789 1790 switch (ValType.getObjCLifetime()) { 1791 case Qualifiers::OCL_None: 1792 case Qualifiers::OCL_ExplicitNone: 1793 // okay 1794 break; 1795 1796 case Qualifiers::OCL_Weak: 1797 case Qualifiers::OCL_Strong: 1798 case Qualifiers::OCL_Autoreleasing: 1799 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 1800 << ValType << PointerArg->getSourceRange(); 1801 return true; 1802 } 1803 1804 if (IsLdrex) { 1805 TheCall->setType(ValType); 1806 return false; 1807 } 1808 1809 // Initialize the argument to be stored. 1810 ExprResult ValArg = TheCall->getArg(0); 1811 InitializedEntity Entity = InitializedEntity::InitializeParameter( 1812 Context, ValType, /*consume*/ false); 1813 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 1814 if (ValArg.isInvalid()) 1815 return true; 1816 TheCall->setArg(0, ValArg.get()); 1817 1818 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 1819 // but the custom checker bypasses all default analysis. 1820 TheCall->setType(Context.IntTy); 1821 return false; 1822 } 1823 1824 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1825 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 1826 BuiltinID == ARM::BI__builtin_arm_ldaex || 1827 BuiltinID == ARM::BI__builtin_arm_strex || 1828 BuiltinID == ARM::BI__builtin_arm_stlex) { 1829 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 1830 } 1831 1832 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 1833 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1834 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 1835 } 1836 1837 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1838 BuiltinID == ARM::BI__builtin_arm_wsr64) 1839 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1840 1841 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1842 BuiltinID == ARM::BI__builtin_arm_rsrp || 1843 BuiltinID == ARM::BI__builtin_arm_wsr || 1844 BuiltinID == ARM::BI__builtin_arm_wsrp) 1845 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1846 1847 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1848 return true; 1849 1850 // For intrinsics which take an immediate value as part of the instruction, 1851 // range check them here. 1852 // FIXME: VFP Intrinsics should error if VFP not present. 1853 switch (BuiltinID) { 1854 default: return false; 1855 case ARM::BI__builtin_arm_ssat: 1856 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 1857 case ARM::BI__builtin_arm_usat: 1858 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 1859 case ARM::BI__builtin_arm_ssat16: 1860 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 1861 case ARM::BI__builtin_arm_usat16: 1862 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 1863 case ARM::BI__builtin_arm_vcvtr_f: 1864 case ARM::BI__builtin_arm_vcvtr_d: 1865 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 1866 case ARM::BI__builtin_arm_dmb: 1867 case ARM::BI__builtin_arm_dsb: 1868 case ARM::BI__builtin_arm_isb: 1869 case ARM::BI__builtin_arm_dbg: 1870 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 1871 } 1872 } 1873 1874 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, 1875 CallExpr *TheCall) { 1876 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1877 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1878 BuiltinID == AArch64::BI__builtin_arm_strex || 1879 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1880 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1881 } 1882 1883 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1884 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1885 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 1886 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 1887 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 1888 } 1889 1890 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1891 BuiltinID == AArch64::BI__builtin_arm_wsr64) 1892 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1893 1894 // Memory Tagging Extensions (MTE) Intrinsics 1895 if (BuiltinID == AArch64::BI__builtin_arm_irg || 1896 BuiltinID == AArch64::BI__builtin_arm_addg || 1897 BuiltinID == AArch64::BI__builtin_arm_gmi || 1898 BuiltinID == AArch64::BI__builtin_arm_ldg || 1899 BuiltinID == AArch64::BI__builtin_arm_stg || 1900 BuiltinID == AArch64::BI__builtin_arm_subp) { 1901 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 1902 } 1903 1904 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1905 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1906 BuiltinID == AArch64::BI__builtin_arm_wsr || 1907 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1908 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1909 1910 // Only check the valid encoding range. Any constant in this range would be 1911 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1912 // an exception for incorrect registers. This matches MSVC behavior. 1913 if (BuiltinID == AArch64::BI_ReadStatusReg || 1914 BuiltinID == AArch64::BI_WriteStatusReg) 1915 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1916 1917 if (BuiltinID == AArch64::BI__getReg) 1918 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 1919 1920 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1921 return true; 1922 1923 // For intrinsics which take an immediate value as part of the instruction, 1924 // range check them here. 1925 unsigned i = 0, l = 0, u = 0; 1926 switch (BuiltinID) { 1927 default: return false; 1928 case AArch64::BI__builtin_arm_dmb: 1929 case AArch64::BI__builtin_arm_dsb: 1930 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1931 } 1932 1933 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1934 } 1935 1936 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) { 1937 struct BuiltinAndString { 1938 unsigned BuiltinID; 1939 const char *Str; 1940 }; 1941 1942 static BuiltinAndString ValidCPU[] = { 1943 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" }, 1944 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" }, 1945 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" }, 1946 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" }, 1947 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" }, 1948 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" }, 1949 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" }, 1950 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" }, 1951 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" }, 1952 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" }, 1953 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" }, 1954 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" }, 1955 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" }, 1956 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" }, 1957 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" }, 1958 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" }, 1959 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" }, 1960 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" }, 1961 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" }, 1962 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" }, 1963 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" }, 1964 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" }, 1965 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" }, 1966 }; 1967 1968 static BuiltinAndString ValidHVX[] = { 1969 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" }, 1970 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" }, 1971 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" }, 1972 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" }, 1973 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" }, 1974 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" }, 1975 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" }, 1976 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" }, 1977 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" }, 1978 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" }, 1979 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" }, 1980 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" }, 1981 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" }, 1982 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" }, 1983 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" }, 1984 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" }, 1985 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" }, 1986 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" }, 1987 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" }, 1988 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" }, 1989 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" }, 1990 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" }, 1991 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" }, 1992 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" }, 1993 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" }, 1994 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" }, 1995 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" }, 1996 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" }, 1997 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" }, 1998 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" }, 1999 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" }, 2000 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" }, 2001 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" }, 2002 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" }, 2003 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" }, 2004 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" }, 2005 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" }, 2006 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" }, 2007 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" }, 2008 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" }, 2009 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" }, 2010 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" }, 2011 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" }, 2012 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" }, 2013 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" }, 2014 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" }, 2015 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" }, 2016 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" }, 2017 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" }, 2018 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" }, 2019 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" }, 2020 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" }, 2021 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" }, 2022 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" }, 2023 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" }, 2024 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" }, 2025 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" }, 2026 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" }, 2027 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" }, 2028 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" }, 2029 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" }, 2030 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" }, 2031 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" }, 2032 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" }, 2033 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" }, 2034 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" }, 2035 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" }, 2036 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" }, 2037 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" }, 2038 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" }, 2039 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" }, 2040 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" }, 2041 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" }, 2042 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" }, 2043 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" }, 2044 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" }, 2045 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" }, 2046 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" }, 2047 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" }, 2048 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" }, 2049 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" }, 2050 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" }, 2051 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" }, 2052 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" }, 2053 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" }, 2054 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" }, 2055 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" }, 2056 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" }, 2057 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" }, 2058 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" }, 2059 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" }, 2060 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" }, 2061 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" }, 2062 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" }, 2063 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" }, 2064 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" }, 2065 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" }, 2066 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" }, 2067 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" }, 2068 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" }, 2069 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" }, 2070 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" }, 2071 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" }, 2072 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" }, 2073 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" }, 2074 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" }, 2075 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" }, 2076 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" }, 2077 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" }, 2078 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" }, 2079 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" }, 2080 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" }, 2081 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" }, 2082 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" }, 2083 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" }, 2084 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" }, 2085 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" }, 2086 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" }, 2087 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" }, 2088 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" }, 2089 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" }, 2090 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" }, 2091 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" }, 2092 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" }, 2093 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" }, 2094 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" }, 2095 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" }, 2096 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" }, 2097 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" }, 2098 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" }, 2099 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" }, 2100 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" }, 2101 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" }, 2102 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" }, 2103 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" }, 2104 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" }, 2105 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" }, 2106 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" }, 2107 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" }, 2108 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" }, 2109 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" }, 2110 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" }, 2111 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" }, 2112 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" }, 2113 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" }, 2114 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" }, 2115 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" }, 2116 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" }, 2117 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" }, 2118 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" }, 2119 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" }, 2120 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" }, 2121 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" }, 2122 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" }, 2123 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" }, 2124 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" }, 2125 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" }, 2126 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" }, 2127 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" }, 2128 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" }, 2129 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" }, 2130 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" }, 2131 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" }, 2132 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" }, 2133 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" }, 2134 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" }, 2135 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" }, 2136 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" }, 2137 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" }, 2138 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" }, 2139 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" }, 2140 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" }, 2141 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" }, 2142 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" }, 2143 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" }, 2144 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" }, 2145 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" }, 2146 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" }, 2147 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" }, 2148 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" }, 2149 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" }, 2150 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" }, 2151 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" }, 2152 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" }, 2153 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" }, 2154 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" }, 2155 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" }, 2156 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" }, 2157 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" }, 2158 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" }, 2159 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" }, 2160 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" }, 2161 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" }, 2162 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" }, 2163 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" }, 2164 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" }, 2165 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" }, 2166 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" }, 2167 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" }, 2168 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" }, 2169 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" }, 2170 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" }, 2171 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" }, 2172 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" }, 2173 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" }, 2174 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" }, 2175 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" }, 2176 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" }, 2177 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" }, 2178 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" }, 2179 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" }, 2180 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" }, 2181 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" }, 2182 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" }, 2183 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" }, 2184 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" }, 2185 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" }, 2186 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" }, 2187 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" }, 2188 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" }, 2189 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" }, 2190 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" }, 2191 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" }, 2192 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" }, 2193 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" }, 2194 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" }, 2195 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" }, 2196 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" }, 2197 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" }, 2198 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" }, 2199 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" }, 2200 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" }, 2201 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" }, 2202 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" }, 2203 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" }, 2204 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" }, 2205 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" }, 2206 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" }, 2207 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" }, 2208 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" }, 2209 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" }, 2210 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" }, 2211 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" }, 2212 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" }, 2213 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" }, 2214 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" }, 2215 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" }, 2216 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" }, 2217 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" }, 2218 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" }, 2219 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" }, 2220 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" }, 2221 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" }, 2222 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" }, 2223 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" }, 2224 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2225 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" }, 2226 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" }, 2227 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" }, 2228 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" }, 2229 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" }, 2230 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" }, 2231 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" }, 2232 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" }, 2233 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" }, 2234 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" }, 2235 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" }, 2236 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" }, 2237 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" }, 2238 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" }, 2239 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" }, 2240 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" }, 2241 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" }, 2242 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" }, 2243 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" }, 2244 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" }, 2245 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" }, 2246 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" }, 2247 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" }, 2248 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" }, 2249 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" }, 2250 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" }, 2251 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" }, 2252 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" }, 2253 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" }, 2254 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" }, 2255 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" }, 2256 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" }, 2257 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" }, 2258 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" }, 2259 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" }, 2260 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" }, 2261 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" }, 2262 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" }, 2263 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" }, 2264 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" }, 2265 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" }, 2266 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" }, 2267 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" }, 2268 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" }, 2269 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" }, 2270 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" }, 2271 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" }, 2272 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" }, 2273 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" }, 2274 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" }, 2275 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" }, 2276 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" }, 2277 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" }, 2278 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" }, 2279 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" }, 2280 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" }, 2281 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" }, 2282 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" }, 2283 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" }, 2284 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" }, 2285 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" }, 2286 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" }, 2287 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" }, 2288 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" }, 2289 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" }, 2290 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" }, 2291 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" }, 2292 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" }, 2293 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" }, 2294 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" }, 2295 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" }, 2296 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" }, 2297 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" }, 2298 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" }, 2299 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" }, 2300 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" }, 2301 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" }, 2302 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" }, 2303 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" }, 2304 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" }, 2305 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" }, 2306 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" }, 2307 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" }, 2308 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" }, 2309 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" }, 2310 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" }, 2311 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" }, 2312 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" }, 2313 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" }, 2314 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" }, 2315 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" }, 2316 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" }, 2317 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" }, 2318 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" }, 2319 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" }, 2320 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" }, 2321 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" }, 2322 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" }, 2323 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" }, 2324 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" }, 2325 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" }, 2326 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" }, 2327 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" }, 2328 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" }, 2329 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" }, 2330 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" }, 2331 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" }, 2332 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" }, 2333 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" }, 2334 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" }, 2335 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" }, 2336 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" }, 2337 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" }, 2338 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" }, 2339 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" }, 2340 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" }, 2341 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" }, 2342 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" }, 2343 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" }, 2344 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" }, 2345 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" }, 2346 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" }, 2347 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" }, 2348 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" }, 2349 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" }, 2350 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" }, 2351 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" }, 2352 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" }, 2353 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" }, 2354 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" }, 2355 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" }, 2356 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" }, 2357 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" }, 2358 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" }, 2359 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" }, 2360 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" }, 2361 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" }, 2362 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" }, 2363 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" }, 2364 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" }, 2365 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" }, 2366 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" }, 2367 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" }, 2368 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" }, 2369 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" }, 2370 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" }, 2371 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" }, 2372 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" }, 2373 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" }, 2374 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" }, 2375 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" }, 2376 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" }, 2377 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" }, 2378 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" }, 2379 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" }, 2380 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" }, 2381 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" }, 2382 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" }, 2383 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" }, 2384 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" }, 2385 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" }, 2386 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" }, 2387 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" }, 2388 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" }, 2389 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" }, 2390 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" }, 2391 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" }, 2392 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" }, 2393 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" }, 2394 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" }, 2395 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" }, 2396 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" }, 2397 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" }, 2398 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" }, 2399 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" }, 2400 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" }, 2401 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" }, 2402 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" }, 2403 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" }, 2404 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" }, 2405 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" }, 2406 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" }, 2407 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" }, 2408 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" }, 2409 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" }, 2410 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" }, 2411 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" }, 2412 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" }, 2413 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" }, 2414 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" }, 2415 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" }, 2416 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" }, 2417 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" }, 2418 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2419 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" }, 2420 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" }, 2421 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" }, 2422 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" }, 2423 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" }, 2424 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" }, 2425 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" }, 2426 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" }, 2427 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" }, 2428 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" }, 2429 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" }, 2430 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" }, 2431 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" }, 2432 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" }, 2433 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" }, 2434 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" }, 2435 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" }, 2436 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" }, 2437 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" }, 2438 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" }, 2439 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" }, 2440 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" }, 2441 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" }, 2442 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" }, 2443 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" }, 2444 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" }, 2445 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" }, 2446 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" }, 2447 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" }, 2448 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" }, 2449 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" }, 2450 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" }, 2451 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" }, 2452 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" }, 2453 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" }, 2454 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" }, 2455 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" }, 2456 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" }, 2457 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" }, 2458 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" }, 2459 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" }, 2460 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" }, 2461 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" }, 2462 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" }, 2463 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" }, 2464 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" }, 2465 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" }, 2466 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" }, 2467 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" }, 2468 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" }, 2469 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" }, 2470 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" }, 2471 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" }, 2472 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" }, 2473 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" }, 2474 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" }, 2475 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" }, 2476 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" }, 2477 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" }, 2478 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" }, 2479 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" }, 2480 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" }, 2481 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" }, 2482 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" }, 2483 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" }, 2484 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" }, 2485 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" }, 2486 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" }, 2487 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" }, 2488 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" }, 2489 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" }, 2490 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" }, 2491 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" }, 2492 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" }, 2493 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" }, 2494 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" }, 2495 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" }, 2496 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" }, 2497 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" }, 2498 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" }, 2499 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" }, 2500 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" }, 2501 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" }, 2502 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" }, 2503 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" }, 2504 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" }, 2505 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" }, 2506 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" }, 2507 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" }, 2508 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" }, 2509 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" }, 2510 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" }, 2511 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" }, 2512 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" }, 2513 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" }, 2514 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" }, 2515 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" }, 2516 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" }, 2517 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" }, 2518 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" }, 2519 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" }, 2520 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" }, 2521 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" }, 2522 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" }, 2523 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" }, 2524 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" }, 2525 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" }, 2526 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" }, 2527 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" }, 2528 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" }, 2529 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" }, 2530 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" }, 2531 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" }, 2532 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" }, 2533 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" }, 2534 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" }, 2535 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" }, 2536 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" }, 2537 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" }, 2538 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" }, 2539 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" }, 2540 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" }, 2541 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" }, 2542 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" }, 2543 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" }, 2544 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" }, 2545 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" }, 2546 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" }, 2547 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" }, 2548 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" }, 2549 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" }, 2550 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" }, 2551 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" }, 2552 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" }, 2553 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" }, 2554 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" }, 2555 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" }, 2556 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" }, 2557 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" }, 2558 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" }, 2559 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" }, 2560 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" }, 2561 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" }, 2562 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" }, 2563 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" }, 2564 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" }, 2565 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" }, 2566 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" }, 2567 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" }, 2568 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" }, 2569 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" }, 2570 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" }, 2571 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" }, 2572 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" }, 2573 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" }, 2574 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" }, 2575 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" }, 2576 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" }, 2577 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" }, 2578 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" }, 2579 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" }, 2580 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" }, 2581 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" }, 2582 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" }, 2583 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" }, 2584 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" }, 2585 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" }, 2586 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" }, 2587 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" }, 2588 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" }, 2589 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" }, 2590 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" }, 2591 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" }, 2592 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" }, 2593 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" }, 2594 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" }, 2595 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" }, 2596 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" }, 2597 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" }, 2598 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" }, 2599 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" }, 2600 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" }, 2601 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" }, 2602 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" }, 2603 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" }, 2604 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" }, 2605 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" }, 2606 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" }, 2607 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" }, 2608 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" }, 2609 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" }, 2610 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" }, 2611 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" }, 2612 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" }, 2613 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" }, 2614 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" }, 2615 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" }, 2616 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" }, 2617 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" }, 2618 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" }, 2619 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" }, 2620 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" }, 2621 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" }, 2622 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" }, 2623 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" }, 2624 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" }, 2625 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" }, 2626 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" }, 2627 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" }, 2628 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" }, 2629 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" }, 2630 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" }, 2631 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" }, 2632 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" }, 2633 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" }, 2634 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" }, 2635 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" }, 2636 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" }, 2637 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" }, 2638 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" }, 2639 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" }, 2640 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" }, 2641 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" }, 2642 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" }, 2643 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" }, 2644 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" }, 2645 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" }, 2646 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" }, 2647 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" }, 2648 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" }, 2649 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" }, 2650 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" }, 2651 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" }, 2652 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" }, 2653 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" }, 2654 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" }, 2655 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" }, 2656 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" }, 2657 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" }, 2658 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" }, 2659 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" }, 2660 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" }, 2661 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" }, 2662 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" }, 2663 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" }, 2664 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" }, 2665 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" }, 2666 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" }, 2667 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" }, 2668 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" }, 2669 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" }, 2670 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" }, 2671 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" }, 2672 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" }, 2673 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" }, 2674 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" }, 2675 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" }, 2676 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" }, 2677 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" }, 2678 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" }, 2679 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" }, 2680 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" }, 2681 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" }, 2682 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" }, 2683 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" }, 2684 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" }, 2685 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" }, 2686 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" }, 2687 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" }, 2688 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" }, 2689 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" }, 2690 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" }, 2691 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" }, 2692 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" }, 2693 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" }, 2694 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" }, 2695 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" }, 2696 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" }, 2697 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" }, 2698 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" }, 2699 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" }, 2700 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" }, 2701 }; 2702 2703 // Sort the tables on first execution so we can binary search them. 2704 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) { 2705 return LHS.BuiltinID < RHS.BuiltinID; 2706 }; 2707 static const bool SortOnce = 2708 (llvm::sort(ValidCPU, SortCmp), 2709 llvm::sort(ValidHVX, SortCmp), true); 2710 (void)SortOnce; 2711 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) { 2712 return BI.BuiltinID < BuiltinID; 2713 }; 2714 2715 const TargetInfo &TI = Context.getTargetInfo(); 2716 2717 const BuiltinAndString *FC = 2718 llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp); 2719 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) { 2720 const TargetOptions &Opts = TI.getTargetOpts(); 2721 StringRef CPU = Opts.CPU; 2722 if (!CPU.empty()) { 2723 assert(CPU.startswith("hexagon") && "Unexpected CPU name"); 2724 CPU.consume_front("hexagon"); 2725 SmallVector<StringRef, 3> CPUs; 2726 StringRef(FC->Str).split(CPUs, ','); 2727 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; })) 2728 return Diag(TheCall->getBeginLoc(), 2729 diag::err_hexagon_builtin_unsupported_cpu); 2730 } 2731 } 2732 2733 const BuiltinAndString *FH = 2734 llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp); 2735 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) { 2736 if (!TI.hasFeature("hvx")) 2737 return Diag(TheCall->getBeginLoc(), 2738 diag::err_hexagon_builtin_requires_hvx); 2739 2740 SmallVector<StringRef, 3> HVXs; 2741 StringRef(FH->Str).split(HVXs, ','); 2742 bool IsValid = llvm::any_of(HVXs, 2743 [&TI] (StringRef V) { 2744 std::string F = "hvx" + V.str(); 2745 return TI.hasFeature(F); 2746 }); 2747 if (!IsValid) 2748 return Diag(TheCall->getBeginLoc(), 2749 diag::err_hexagon_builtin_unsupported_hvx); 2750 } 2751 2752 return false; 2753 } 2754 2755 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2756 struct ArgInfo { 2757 uint8_t OpNum; 2758 bool IsSigned; 2759 uint8_t BitWidth; 2760 uint8_t Align; 2761 }; 2762 struct BuiltinInfo { 2763 unsigned BuiltinID; 2764 ArgInfo Infos[2]; 2765 }; 2766 2767 static BuiltinInfo Infos[] = { 2768 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2769 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2770 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2771 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} }, 2772 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2773 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2774 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2775 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2776 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2777 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2778 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2779 2780 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2791 2792 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2844 {{ 1, false, 6, 0 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2851 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2852 {{ 1, false, 5, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2859 { 2, false, 5, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2861 { 2, false, 6, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2863 { 3, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2865 { 3, false, 6, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2869 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2872 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2882 {{ 2, false, 4, 0 }, 2883 { 3, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2885 {{ 2, false, 4, 0 }, 2886 { 3, false, 5, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2888 {{ 2, false, 4, 0 }, 2889 { 3, false, 5, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2891 {{ 2, false, 4, 0 }, 2892 { 3, false, 5, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2904 { 2, false, 5, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2906 { 2, false, 6, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2916 {{ 1, false, 4, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2919 {{ 1, false, 4, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2931 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2938 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2939 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2940 {{ 3, false, 1, 0 }} }, 2941 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2942 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2943 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2944 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2945 {{ 3, false, 1, 0 }} }, 2946 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2947 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2948 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2949 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2950 {{ 3, false, 1, 0 }} }, 2951 }; 2952 2953 // Use a dynamically initialized static to sort the table exactly once on 2954 // first run. 2955 static const bool SortOnce = 2956 (llvm::sort(Infos, 2957 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2958 return LHS.BuiltinID < RHS.BuiltinID; 2959 }), 2960 true); 2961 (void)SortOnce; 2962 2963 const BuiltinInfo *F = llvm::partition_point( 2964 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2965 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2966 return false; 2967 2968 bool Error = false; 2969 2970 for (const ArgInfo &A : F->Infos) { 2971 // Ignore empty ArgInfo elements. 2972 if (A.BitWidth == 0) 2973 continue; 2974 2975 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2976 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2977 if (!A.Align) { 2978 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2979 } else { 2980 unsigned M = 1 << A.Align; 2981 Min *= M; 2982 Max *= M; 2983 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2984 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2985 } 2986 } 2987 return Error; 2988 } 2989 2990 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2991 CallExpr *TheCall) { 2992 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) || 2993 CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2994 } 2995 2996 2997 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the 2998 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2999 // ordering for DSP is unspecified. MSA is ordered by the data format used 3000 // by the underlying instruction i.e., df/m, df/n and then by size. 3001 // 3002 // FIXME: The size tests here should instead be tablegen'd along with the 3003 // definitions from include/clang/Basic/BuiltinsMips.def. 3004 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3005 // be too. 3006 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3007 unsigned i = 0, l = 0, u = 0, m = 0; 3008 switch (BuiltinID) { 3009 default: return false; 3010 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3011 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3012 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3013 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3014 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3015 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3016 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3017 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3018 // df/m field. 3019 // These intrinsics take an unsigned 3 bit immediate. 3020 case Mips::BI__builtin_msa_bclri_b: 3021 case Mips::BI__builtin_msa_bnegi_b: 3022 case Mips::BI__builtin_msa_bseti_b: 3023 case Mips::BI__builtin_msa_sat_s_b: 3024 case Mips::BI__builtin_msa_sat_u_b: 3025 case Mips::BI__builtin_msa_slli_b: 3026 case Mips::BI__builtin_msa_srai_b: 3027 case Mips::BI__builtin_msa_srari_b: 3028 case Mips::BI__builtin_msa_srli_b: 3029 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3030 case Mips::BI__builtin_msa_binsli_b: 3031 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3032 // These intrinsics take an unsigned 4 bit immediate. 3033 case Mips::BI__builtin_msa_bclri_h: 3034 case Mips::BI__builtin_msa_bnegi_h: 3035 case Mips::BI__builtin_msa_bseti_h: 3036 case Mips::BI__builtin_msa_sat_s_h: 3037 case Mips::BI__builtin_msa_sat_u_h: 3038 case Mips::BI__builtin_msa_slli_h: 3039 case Mips::BI__builtin_msa_srai_h: 3040 case Mips::BI__builtin_msa_srari_h: 3041 case Mips::BI__builtin_msa_srli_h: 3042 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3043 case Mips::BI__builtin_msa_binsli_h: 3044 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3045 // These intrinsics take an unsigned 5 bit immediate. 3046 // The first block of intrinsics actually have an unsigned 5 bit field, 3047 // not a df/n field. 3048 case Mips::BI__builtin_msa_cfcmsa: 3049 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3050 case Mips::BI__builtin_msa_clei_u_b: 3051 case Mips::BI__builtin_msa_clei_u_h: 3052 case Mips::BI__builtin_msa_clei_u_w: 3053 case Mips::BI__builtin_msa_clei_u_d: 3054 case Mips::BI__builtin_msa_clti_u_b: 3055 case Mips::BI__builtin_msa_clti_u_h: 3056 case Mips::BI__builtin_msa_clti_u_w: 3057 case Mips::BI__builtin_msa_clti_u_d: 3058 case Mips::BI__builtin_msa_maxi_u_b: 3059 case Mips::BI__builtin_msa_maxi_u_h: 3060 case Mips::BI__builtin_msa_maxi_u_w: 3061 case Mips::BI__builtin_msa_maxi_u_d: 3062 case Mips::BI__builtin_msa_mini_u_b: 3063 case Mips::BI__builtin_msa_mini_u_h: 3064 case Mips::BI__builtin_msa_mini_u_w: 3065 case Mips::BI__builtin_msa_mini_u_d: 3066 case Mips::BI__builtin_msa_addvi_b: 3067 case Mips::BI__builtin_msa_addvi_h: 3068 case Mips::BI__builtin_msa_addvi_w: 3069 case Mips::BI__builtin_msa_addvi_d: 3070 case Mips::BI__builtin_msa_bclri_w: 3071 case Mips::BI__builtin_msa_bnegi_w: 3072 case Mips::BI__builtin_msa_bseti_w: 3073 case Mips::BI__builtin_msa_sat_s_w: 3074 case Mips::BI__builtin_msa_sat_u_w: 3075 case Mips::BI__builtin_msa_slli_w: 3076 case Mips::BI__builtin_msa_srai_w: 3077 case Mips::BI__builtin_msa_srari_w: 3078 case Mips::BI__builtin_msa_srli_w: 3079 case Mips::BI__builtin_msa_srlri_w: 3080 case Mips::BI__builtin_msa_subvi_b: 3081 case Mips::BI__builtin_msa_subvi_h: 3082 case Mips::BI__builtin_msa_subvi_w: 3083 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3084 case Mips::BI__builtin_msa_binsli_w: 3085 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3086 // These intrinsics take an unsigned 6 bit immediate. 3087 case Mips::BI__builtin_msa_bclri_d: 3088 case Mips::BI__builtin_msa_bnegi_d: 3089 case Mips::BI__builtin_msa_bseti_d: 3090 case Mips::BI__builtin_msa_sat_s_d: 3091 case Mips::BI__builtin_msa_sat_u_d: 3092 case Mips::BI__builtin_msa_slli_d: 3093 case Mips::BI__builtin_msa_srai_d: 3094 case Mips::BI__builtin_msa_srari_d: 3095 case Mips::BI__builtin_msa_srli_d: 3096 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3097 case Mips::BI__builtin_msa_binsli_d: 3098 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3099 // These intrinsics take a signed 5 bit immediate. 3100 case Mips::BI__builtin_msa_ceqi_b: 3101 case Mips::BI__builtin_msa_ceqi_h: 3102 case Mips::BI__builtin_msa_ceqi_w: 3103 case Mips::BI__builtin_msa_ceqi_d: 3104 case Mips::BI__builtin_msa_clti_s_b: 3105 case Mips::BI__builtin_msa_clti_s_h: 3106 case Mips::BI__builtin_msa_clti_s_w: 3107 case Mips::BI__builtin_msa_clti_s_d: 3108 case Mips::BI__builtin_msa_clei_s_b: 3109 case Mips::BI__builtin_msa_clei_s_h: 3110 case Mips::BI__builtin_msa_clei_s_w: 3111 case Mips::BI__builtin_msa_clei_s_d: 3112 case Mips::BI__builtin_msa_maxi_s_b: 3113 case Mips::BI__builtin_msa_maxi_s_h: 3114 case Mips::BI__builtin_msa_maxi_s_w: 3115 case Mips::BI__builtin_msa_maxi_s_d: 3116 case Mips::BI__builtin_msa_mini_s_b: 3117 case Mips::BI__builtin_msa_mini_s_h: 3118 case Mips::BI__builtin_msa_mini_s_w: 3119 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3120 // These intrinsics take an unsigned 8 bit immediate. 3121 case Mips::BI__builtin_msa_andi_b: 3122 case Mips::BI__builtin_msa_nori_b: 3123 case Mips::BI__builtin_msa_ori_b: 3124 case Mips::BI__builtin_msa_shf_b: 3125 case Mips::BI__builtin_msa_shf_h: 3126 case Mips::BI__builtin_msa_shf_w: 3127 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3128 case Mips::BI__builtin_msa_bseli_b: 3129 case Mips::BI__builtin_msa_bmnzi_b: 3130 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3131 // df/n format 3132 // These intrinsics take an unsigned 4 bit immediate. 3133 case Mips::BI__builtin_msa_copy_s_b: 3134 case Mips::BI__builtin_msa_copy_u_b: 3135 case Mips::BI__builtin_msa_insve_b: 3136 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3137 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3138 // These intrinsics take an unsigned 3 bit immediate. 3139 case Mips::BI__builtin_msa_copy_s_h: 3140 case Mips::BI__builtin_msa_copy_u_h: 3141 case Mips::BI__builtin_msa_insve_h: 3142 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3143 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3144 // These intrinsics take an unsigned 2 bit immediate. 3145 case Mips::BI__builtin_msa_copy_s_w: 3146 case Mips::BI__builtin_msa_copy_u_w: 3147 case Mips::BI__builtin_msa_insve_w: 3148 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3149 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3150 // These intrinsics take an unsigned 1 bit immediate. 3151 case Mips::BI__builtin_msa_copy_s_d: 3152 case Mips::BI__builtin_msa_copy_u_d: 3153 case Mips::BI__builtin_msa_insve_d: 3154 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3155 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3156 // Memory offsets and immediate loads. 3157 // These intrinsics take a signed 10 bit immediate. 3158 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3159 case Mips::BI__builtin_msa_ldi_h: 3160 case Mips::BI__builtin_msa_ldi_w: 3161 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3162 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3163 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3164 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3165 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3166 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3167 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3168 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3169 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3170 } 3171 3172 if (!m) 3173 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3174 3175 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3176 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3177 } 3178 3179 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3180 unsigned i = 0, l = 0, u = 0; 3181 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3182 BuiltinID == PPC::BI__builtin_divdeu || 3183 BuiltinID == PPC::BI__builtin_bpermd; 3184 bool IsTarget64Bit = Context.getTargetInfo() 3185 .getTypeWidth(Context 3186 .getTargetInfo() 3187 .getIntPtrType()) == 64; 3188 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3189 BuiltinID == PPC::BI__builtin_divweu || 3190 BuiltinID == PPC::BI__builtin_divde || 3191 BuiltinID == PPC::BI__builtin_divdeu; 3192 3193 if (Is64BitBltin && !IsTarget64Bit) 3194 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3195 << TheCall->getSourceRange(); 3196 3197 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || 3198 (BuiltinID == PPC::BI__builtin_bpermd && 3199 !Context.getTargetInfo().hasFeature("bpermd"))) 3200 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3201 << TheCall->getSourceRange(); 3202 3203 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3204 if (!Context.getTargetInfo().hasFeature("vsx")) 3205 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3206 << TheCall->getSourceRange(); 3207 return false; 3208 }; 3209 3210 switch (BuiltinID) { 3211 default: return false; 3212 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3213 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3214 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3215 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3216 case PPC::BI__builtin_tbegin: 3217 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3218 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3219 case PPC::BI__builtin_tabortwc: 3220 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3221 case PPC::BI__builtin_tabortwci: 3222 case PPC::BI__builtin_tabortdci: 3223 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3224 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3225 case PPC::BI__builtin_vsx_xxpermdi: 3226 case PPC::BI__builtin_vsx_xxsldwi: 3227 return SemaBuiltinVSX(TheCall); 3228 case PPC::BI__builtin_unpack_vector_int128: 3229 return SemaVSXCheck(TheCall) || 3230 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3231 case PPC::BI__builtin_pack_vector_int128: 3232 return SemaVSXCheck(TheCall); 3233 } 3234 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3235 } 3236 3237 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3238 CallExpr *TheCall) { 3239 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3240 Expr *Arg = TheCall->getArg(0); 3241 llvm::APSInt AbortCode(32); 3242 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3243 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3244 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3245 << Arg->getSourceRange(); 3246 } 3247 3248 // For intrinsics which take an immediate value as part of the instruction, 3249 // range check them here. 3250 unsigned i = 0, l = 0, u = 0; 3251 switch (BuiltinID) { 3252 default: return false; 3253 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3254 case SystemZ::BI__builtin_s390_verimb: 3255 case SystemZ::BI__builtin_s390_verimh: 3256 case SystemZ::BI__builtin_s390_verimf: 3257 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3258 case SystemZ::BI__builtin_s390_vfaeb: 3259 case SystemZ::BI__builtin_s390_vfaeh: 3260 case SystemZ::BI__builtin_s390_vfaef: 3261 case SystemZ::BI__builtin_s390_vfaebs: 3262 case SystemZ::BI__builtin_s390_vfaehs: 3263 case SystemZ::BI__builtin_s390_vfaefs: 3264 case SystemZ::BI__builtin_s390_vfaezb: 3265 case SystemZ::BI__builtin_s390_vfaezh: 3266 case SystemZ::BI__builtin_s390_vfaezf: 3267 case SystemZ::BI__builtin_s390_vfaezbs: 3268 case SystemZ::BI__builtin_s390_vfaezhs: 3269 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3270 case SystemZ::BI__builtin_s390_vfisb: 3271 case SystemZ::BI__builtin_s390_vfidb: 3272 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3273 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3274 case SystemZ::BI__builtin_s390_vftcisb: 3275 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3276 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3277 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3278 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3279 case SystemZ::BI__builtin_s390_vstrcb: 3280 case SystemZ::BI__builtin_s390_vstrch: 3281 case SystemZ::BI__builtin_s390_vstrcf: 3282 case SystemZ::BI__builtin_s390_vstrczb: 3283 case SystemZ::BI__builtin_s390_vstrczh: 3284 case SystemZ::BI__builtin_s390_vstrczf: 3285 case SystemZ::BI__builtin_s390_vstrcbs: 3286 case SystemZ::BI__builtin_s390_vstrchs: 3287 case SystemZ::BI__builtin_s390_vstrcfs: 3288 case SystemZ::BI__builtin_s390_vstrczbs: 3289 case SystemZ::BI__builtin_s390_vstrczhs: 3290 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3291 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3292 case SystemZ::BI__builtin_s390_vfminsb: 3293 case SystemZ::BI__builtin_s390_vfmaxsb: 3294 case SystemZ::BI__builtin_s390_vfmindb: 3295 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3296 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3297 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3298 } 3299 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3300 } 3301 3302 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3303 /// This checks that the target supports __builtin_cpu_supports and 3304 /// that the string argument is constant and valid. 3305 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { 3306 Expr *Arg = TheCall->getArg(0); 3307 3308 // Check if the argument is a string literal. 3309 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3310 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3311 << Arg->getSourceRange(); 3312 3313 // Check the contents of the string. 3314 StringRef Feature = 3315 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3316 if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) 3317 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3318 << Arg->getSourceRange(); 3319 return false; 3320 } 3321 3322 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3323 /// This checks that the target supports __builtin_cpu_is and 3324 /// that the string argument is constant and valid. 3325 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { 3326 Expr *Arg = TheCall->getArg(0); 3327 3328 // Check if the argument is a string literal. 3329 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3330 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3331 << Arg->getSourceRange(); 3332 3333 // Check the contents of the string. 3334 StringRef Feature = 3335 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3336 if (!S.Context.getTargetInfo().validateCpuIs(Feature)) 3337 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3338 << Arg->getSourceRange(); 3339 return false; 3340 } 3341 3342 // Check if the rounding mode is legal. 3343 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3344 // Indicates if this instruction has rounding control or just SAE. 3345 bool HasRC = false; 3346 3347 unsigned ArgNum = 0; 3348 switch (BuiltinID) { 3349 default: 3350 return false; 3351 case X86::BI__builtin_ia32_vcvttsd2si32: 3352 case X86::BI__builtin_ia32_vcvttsd2si64: 3353 case X86::BI__builtin_ia32_vcvttsd2usi32: 3354 case X86::BI__builtin_ia32_vcvttsd2usi64: 3355 case X86::BI__builtin_ia32_vcvttss2si32: 3356 case X86::BI__builtin_ia32_vcvttss2si64: 3357 case X86::BI__builtin_ia32_vcvttss2usi32: 3358 case X86::BI__builtin_ia32_vcvttss2usi64: 3359 ArgNum = 1; 3360 break; 3361 case X86::BI__builtin_ia32_maxpd512: 3362 case X86::BI__builtin_ia32_maxps512: 3363 case X86::BI__builtin_ia32_minpd512: 3364 case X86::BI__builtin_ia32_minps512: 3365 ArgNum = 2; 3366 break; 3367 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3368 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3369 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3370 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3371 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3372 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3373 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3374 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3375 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3376 case X86::BI__builtin_ia32_exp2pd_mask: 3377 case X86::BI__builtin_ia32_exp2ps_mask: 3378 case X86::BI__builtin_ia32_getexppd512_mask: 3379 case X86::BI__builtin_ia32_getexpps512_mask: 3380 case X86::BI__builtin_ia32_rcp28pd_mask: 3381 case X86::BI__builtin_ia32_rcp28ps_mask: 3382 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3383 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3384 case X86::BI__builtin_ia32_vcomisd: 3385 case X86::BI__builtin_ia32_vcomiss: 3386 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3387 ArgNum = 3; 3388 break; 3389 case X86::BI__builtin_ia32_cmppd512_mask: 3390 case X86::BI__builtin_ia32_cmpps512_mask: 3391 case X86::BI__builtin_ia32_cmpsd_mask: 3392 case X86::BI__builtin_ia32_cmpss_mask: 3393 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3394 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3395 case X86::BI__builtin_ia32_getexpss128_round_mask: 3396 case X86::BI__builtin_ia32_getmantpd512_mask: 3397 case X86::BI__builtin_ia32_getmantps512_mask: 3398 case X86::BI__builtin_ia32_maxsd_round_mask: 3399 case X86::BI__builtin_ia32_maxss_round_mask: 3400 case X86::BI__builtin_ia32_minsd_round_mask: 3401 case X86::BI__builtin_ia32_minss_round_mask: 3402 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3403 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3404 case X86::BI__builtin_ia32_reducepd512_mask: 3405 case X86::BI__builtin_ia32_reduceps512_mask: 3406 case X86::BI__builtin_ia32_rndscalepd_mask: 3407 case X86::BI__builtin_ia32_rndscaleps_mask: 3408 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3409 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3410 ArgNum = 4; 3411 break; 3412 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3413 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3414 case X86::BI__builtin_ia32_fixupimmps512_mask: 3415 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3416 case X86::BI__builtin_ia32_fixupimmsd_mask: 3417 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3418 case X86::BI__builtin_ia32_fixupimmss_mask: 3419 case X86::BI__builtin_ia32_fixupimmss_maskz: 3420 case X86::BI__builtin_ia32_getmantsd_round_mask: 3421 case X86::BI__builtin_ia32_getmantss_round_mask: 3422 case X86::BI__builtin_ia32_rangepd512_mask: 3423 case X86::BI__builtin_ia32_rangeps512_mask: 3424 case X86::BI__builtin_ia32_rangesd128_round_mask: 3425 case X86::BI__builtin_ia32_rangess128_round_mask: 3426 case X86::BI__builtin_ia32_reducesd_mask: 3427 case X86::BI__builtin_ia32_reducess_mask: 3428 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3429 case X86::BI__builtin_ia32_rndscaless_round_mask: 3430 ArgNum = 5; 3431 break; 3432 case X86::BI__builtin_ia32_vcvtsd2si64: 3433 case X86::BI__builtin_ia32_vcvtsd2si32: 3434 case X86::BI__builtin_ia32_vcvtsd2usi32: 3435 case X86::BI__builtin_ia32_vcvtsd2usi64: 3436 case X86::BI__builtin_ia32_vcvtss2si32: 3437 case X86::BI__builtin_ia32_vcvtss2si64: 3438 case X86::BI__builtin_ia32_vcvtss2usi32: 3439 case X86::BI__builtin_ia32_vcvtss2usi64: 3440 case X86::BI__builtin_ia32_sqrtpd512: 3441 case X86::BI__builtin_ia32_sqrtps512: 3442 ArgNum = 1; 3443 HasRC = true; 3444 break; 3445 case X86::BI__builtin_ia32_addpd512: 3446 case X86::BI__builtin_ia32_addps512: 3447 case X86::BI__builtin_ia32_divpd512: 3448 case X86::BI__builtin_ia32_divps512: 3449 case X86::BI__builtin_ia32_mulpd512: 3450 case X86::BI__builtin_ia32_mulps512: 3451 case X86::BI__builtin_ia32_subpd512: 3452 case X86::BI__builtin_ia32_subps512: 3453 case X86::BI__builtin_ia32_cvtsi2sd64: 3454 case X86::BI__builtin_ia32_cvtsi2ss32: 3455 case X86::BI__builtin_ia32_cvtsi2ss64: 3456 case X86::BI__builtin_ia32_cvtusi2sd64: 3457 case X86::BI__builtin_ia32_cvtusi2ss32: 3458 case X86::BI__builtin_ia32_cvtusi2ss64: 3459 ArgNum = 2; 3460 HasRC = true; 3461 break; 3462 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3463 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3464 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3465 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3466 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3467 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3468 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3469 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3470 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3471 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3472 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3473 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3474 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3475 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3476 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3477 ArgNum = 3; 3478 HasRC = true; 3479 break; 3480 case X86::BI__builtin_ia32_addss_round_mask: 3481 case X86::BI__builtin_ia32_addsd_round_mask: 3482 case X86::BI__builtin_ia32_divss_round_mask: 3483 case X86::BI__builtin_ia32_divsd_round_mask: 3484 case X86::BI__builtin_ia32_mulss_round_mask: 3485 case X86::BI__builtin_ia32_mulsd_round_mask: 3486 case X86::BI__builtin_ia32_subss_round_mask: 3487 case X86::BI__builtin_ia32_subsd_round_mask: 3488 case X86::BI__builtin_ia32_scalefpd512_mask: 3489 case X86::BI__builtin_ia32_scalefps512_mask: 3490 case X86::BI__builtin_ia32_scalefsd_round_mask: 3491 case X86::BI__builtin_ia32_scalefss_round_mask: 3492 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3493 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3494 case X86::BI__builtin_ia32_sqrtss_round_mask: 3495 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3496 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3497 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3498 case X86::BI__builtin_ia32_vfmaddss3_mask: 3499 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3500 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3501 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3502 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3503 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3504 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3505 case X86::BI__builtin_ia32_vfmaddps512_mask: 3506 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3507 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3508 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3509 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3510 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3511 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3512 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3513 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3514 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3515 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3516 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3517 ArgNum = 4; 3518 HasRC = true; 3519 break; 3520 } 3521 3522 llvm::APSInt Result; 3523 3524 // We can't check the value of a dependent argument. 3525 Expr *Arg = TheCall->getArg(ArgNum); 3526 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3527 return false; 3528 3529 // Check constant-ness first. 3530 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3531 return true; 3532 3533 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3534 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3535 // combined with ROUND_NO_EXC. 3536 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3537 Result == 8/*ROUND_NO_EXC*/ || 3538 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3539 return false; 3540 3541 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3542 << Arg->getSourceRange(); 3543 } 3544 3545 // Check if the gather/scatter scale is legal. 3546 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3547 CallExpr *TheCall) { 3548 unsigned ArgNum = 0; 3549 switch (BuiltinID) { 3550 default: 3551 return false; 3552 case X86::BI__builtin_ia32_gatherpfdpd: 3553 case X86::BI__builtin_ia32_gatherpfdps: 3554 case X86::BI__builtin_ia32_gatherpfqpd: 3555 case X86::BI__builtin_ia32_gatherpfqps: 3556 case X86::BI__builtin_ia32_scatterpfdpd: 3557 case X86::BI__builtin_ia32_scatterpfdps: 3558 case X86::BI__builtin_ia32_scatterpfqpd: 3559 case X86::BI__builtin_ia32_scatterpfqps: 3560 ArgNum = 3; 3561 break; 3562 case X86::BI__builtin_ia32_gatherd_pd: 3563 case X86::BI__builtin_ia32_gatherd_pd256: 3564 case X86::BI__builtin_ia32_gatherq_pd: 3565 case X86::BI__builtin_ia32_gatherq_pd256: 3566 case X86::BI__builtin_ia32_gatherd_ps: 3567 case X86::BI__builtin_ia32_gatherd_ps256: 3568 case X86::BI__builtin_ia32_gatherq_ps: 3569 case X86::BI__builtin_ia32_gatherq_ps256: 3570 case X86::BI__builtin_ia32_gatherd_q: 3571 case X86::BI__builtin_ia32_gatherd_q256: 3572 case X86::BI__builtin_ia32_gatherq_q: 3573 case X86::BI__builtin_ia32_gatherq_q256: 3574 case X86::BI__builtin_ia32_gatherd_d: 3575 case X86::BI__builtin_ia32_gatherd_d256: 3576 case X86::BI__builtin_ia32_gatherq_d: 3577 case X86::BI__builtin_ia32_gatherq_d256: 3578 case X86::BI__builtin_ia32_gather3div2df: 3579 case X86::BI__builtin_ia32_gather3div2di: 3580 case X86::BI__builtin_ia32_gather3div4df: 3581 case X86::BI__builtin_ia32_gather3div4di: 3582 case X86::BI__builtin_ia32_gather3div4sf: 3583 case X86::BI__builtin_ia32_gather3div4si: 3584 case X86::BI__builtin_ia32_gather3div8sf: 3585 case X86::BI__builtin_ia32_gather3div8si: 3586 case X86::BI__builtin_ia32_gather3siv2df: 3587 case X86::BI__builtin_ia32_gather3siv2di: 3588 case X86::BI__builtin_ia32_gather3siv4df: 3589 case X86::BI__builtin_ia32_gather3siv4di: 3590 case X86::BI__builtin_ia32_gather3siv4sf: 3591 case X86::BI__builtin_ia32_gather3siv4si: 3592 case X86::BI__builtin_ia32_gather3siv8sf: 3593 case X86::BI__builtin_ia32_gather3siv8si: 3594 case X86::BI__builtin_ia32_gathersiv8df: 3595 case X86::BI__builtin_ia32_gathersiv16sf: 3596 case X86::BI__builtin_ia32_gatherdiv8df: 3597 case X86::BI__builtin_ia32_gatherdiv16sf: 3598 case X86::BI__builtin_ia32_gathersiv8di: 3599 case X86::BI__builtin_ia32_gathersiv16si: 3600 case X86::BI__builtin_ia32_gatherdiv8di: 3601 case X86::BI__builtin_ia32_gatherdiv16si: 3602 case X86::BI__builtin_ia32_scatterdiv2df: 3603 case X86::BI__builtin_ia32_scatterdiv2di: 3604 case X86::BI__builtin_ia32_scatterdiv4df: 3605 case X86::BI__builtin_ia32_scatterdiv4di: 3606 case X86::BI__builtin_ia32_scatterdiv4sf: 3607 case X86::BI__builtin_ia32_scatterdiv4si: 3608 case X86::BI__builtin_ia32_scatterdiv8sf: 3609 case X86::BI__builtin_ia32_scatterdiv8si: 3610 case X86::BI__builtin_ia32_scattersiv2df: 3611 case X86::BI__builtin_ia32_scattersiv2di: 3612 case X86::BI__builtin_ia32_scattersiv4df: 3613 case X86::BI__builtin_ia32_scattersiv4di: 3614 case X86::BI__builtin_ia32_scattersiv4sf: 3615 case X86::BI__builtin_ia32_scattersiv4si: 3616 case X86::BI__builtin_ia32_scattersiv8sf: 3617 case X86::BI__builtin_ia32_scattersiv8si: 3618 case X86::BI__builtin_ia32_scattersiv8df: 3619 case X86::BI__builtin_ia32_scattersiv16sf: 3620 case X86::BI__builtin_ia32_scatterdiv8df: 3621 case X86::BI__builtin_ia32_scatterdiv16sf: 3622 case X86::BI__builtin_ia32_scattersiv8di: 3623 case X86::BI__builtin_ia32_scattersiv16si: 3624 case X86::BI__builtin_ia32_scatterdiv8di: 3625 case X86::BI__builtin_ia32_scatterdiv16si: 3626 ArgNum = 4; 3627 break; 3628 } 3629 3630 llvm::APSInt Result; 3631 3632 // We can't check the value of a dependent argument. 3633 Expr *Arg = TheCall->getArg(ArgNum); 3634 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3635 return false; 3636 3637 // Check constant-ness first. 3638 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3639 return true; 3640 3641 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3642 return false; 3643 3644 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3645 << Arg->getSourceRange(); 3646 } 3647 3648 static bool isX86_32Builtin(unsigned BuiltinID) { 3649 // These builtins only work on x86-32 targets. 3650 switch (BuiltinID) { 3651 case X86::BI__builtin_ia32_readeflags_u32: 3652 case X86::BI__builtin_ia32_writeeflags_u32: 3653 return true; 3654 } 3655 3656 return false; 3657 } 3658 3659 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3660 if (BuiltinID == X86::BI__builtin_cpu_supports) 3661 return SemaBuiltinCpuSupports(*this, TheCall); 3662 3663 if (BuiltinID == X86::BI__builtin_cpu_is) 3664 return SemaBuiltinCpuIs(*this, TheCall); 3665 3666 // Check for 32-bit only builtins on a 64-bit target. 3667 const llvm::Triple &TT = Context.getTargetInfo().getTriple(); 3668 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3669 return Diag(TheCall->getCallee()->getBeginLoc(), 3670 diag::err_32_bit_builtin_64_bit_tgt); 3671 3672 // If the intrinsic has rounding or SAE make sure its valid. 3673 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3674 return true; 3675 3676 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3677 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3678 return true; 3679 3680 // For intrinsics which take an immediate value as part of the instruction, 3681 // range check them here. 3682 int i = 0, l = 0, u = 0; 3683 switch (BuiltinID) { 3684 default: 3685 return false; 3686 case X86::BI__builtin_ia32_vec_ext_v2si: 3687 case X86::BI__builtin_ia32_vec_ext_v2di: 3688 case X86::BI__builtin_ia32_vextractf128_pd256: 3689 case X86::BI__builtin_ia32_vextractf128_ps256: 3690 case X86::BI__builtin_ia32_vextractf128_si256: 3691 case X86::BI__builtin_ia32_extract128i256: 3692 case X86::BI__builtin_ia32_extractf64x4_mask: 3693 case X86::BI__builtin_ia32_extracti64x4_mask: 3694 case X86::BI__builtin_ia32_extractf32x8_mask: 3695 case X86::BI__builtin_ia32_extracti32x8_mask: 3696 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3697 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3698 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3699 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3700 i = 1; l = 0; u = 1; 3701 break; 3702 case X86::BI__builtin_ia32_vec_set_v2di: 3703 case X86::BI__builtin_ia32_vinsertf128_pd256: 3704 case X86::BI__builtin_ia32_vinsertf128_ps256: 3705 case X86::BI__builtin_ia32_vinsertf128_si256: 3706 case X86::BI__builtin_ia32_insert128i256: 3707 case X86::BI__builtin_ia32_insertf32x8: 3708 case X86::BI__builtin_ia32_inserti32x8: 3709 case X86::BI__builtin_ia32_insertf64x4: 3710 case X86::BI__builtin_ia32_inserti64x4: 3711 case X86::BI__builtin_ia32_insertf64x2_256: 3712 case X86::BI__builtin_ia32_inserti64x2_256: 3713 case X86::BI__builtin_ia32_insertf32x4_256: 3714 case X86::BI__builtin_ia32_inserti32x4_256: 3715 i = 2; l = 0; u = 1; 3716 break; 3717 case X86::BI__builtin_ia32_vpermilpd: 3718 case X86::BI__builtin_ia32_vec_ext_v4hi: 3719 case X86::BI__builtin_ia32_vec_ext_v4si: 3720 case X86::BI__builtin_ia32_vec_ext_v4sf: 3721 case X86::BI__builtin_ia32_vec_ext_v4di: 3722 case X86::BI__builtin_ia32_extractf32x4_mask: 3723 case X86::BI__builtin_ia32_extracti32x4_mask: 3724 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3725 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3726 i = 1; l = 0; u = 3; 3727 break; 3728 case X86::BI_mm_prefetch: 3729 case X86::BI__builtin_ia32_vec_ext_v8hi: 3730 case X86::BI__builtin_ia32_vec_ext_v8si: 3731 i = 1; l = 0; u = 7; 3732 break; 3733 case X86::BI__builtin_ia32_sha1rnds4: 3734 case X86::BI__builtin_ia32_blendpd: 3735 case X86::BI__builtin_ia32_shufpd: 3736 case X86::BI__builtin_ia32_vec_set_v4hi: 3737 case X86::BI__builtin_ia32_vec_set_v4si: 3738 case X86::BI__builtin_ia32_vec_set_v4di: 3739 case X86::BI__builtin_ia32_shuf_f32x4_256: 3740 case X86::BI__builtin_ia32_shuf_f64x2_256: 3741 case X86::BI__builtin_ia32_shuf_i32x4_256: 3742 case X86::BI__builtin_ia32_shuf_i64x2_256: 3743 case X86::BI__builtin_ia32_insertf64x2_512: 3744 case X86::BI__builtin_ia32_inserti64x2_512: 3745 case X86::BI__builtin_ia32_insertf32x4: 3746 case X86::BI__builtin_ia32_inserti32x4: 3747 i = 2; l = 0; u = 3; 3748 break; 3749 case X86::BI__builtin_ia32_vpermil2pd: 3750 case X86::BI__builtin_ia32_vpermil2pd256: 3751 case X86::BI__builtin_ia32_vpermil2ps: 3752 case X86::BI__builtin_ia32_vpermil2ps256: 3753 i = 3; l = 0; u = 3; 3754 break; 3755 case X86::BI__builtin_ia32_cmpb128_mask: 3756 case X86::BI__builtin_ia32_cmpw128_mask: 3757 case X86::BI__builtin_ia32_cmpd128_mask: 3758 case X86::BI__builtin_ia32_cmpq128_mask: 3759 case X86::BI__builtin_ia32_cmpb256_mask: 3760 case X86::BI__builtin_ia32_cmpw256_mask: 3761 case X86::BI__builtin_ia32_cmpd256_mask: 3762 case X86::BI__builtin_ia32_cmpq256_mask: 3763 case X86::BI__builtin_ia32_cmpb512_mask: 3764 case X86::BI__builtin_ia32_cmpw512_mask: 3765 case X86::BI__builtin_ia32_cmpd512_mask: 3766 case X86::BI__builtin_ia32_cmpq512_mask: 3767 case X86::BI__builtin_ia32_ucmpb128_mask: 3768 case X86::BI__builtin_ia32_ucmpw128_mask: 3769 case X86::BI__builtin_ia32_ucmpd128_mask: 3770 case X86::BI__builtin_ia32_ucmpq128_mask: 3771 case X86::BI__builtin_ia32_ucmpb256_mask: 3772 case X86::BI__builtin_ia32_ucmpw256_mask: 3773 case X86::BI__builtin_ia32_ucmpd256_mask: 3774 case X86::BI__builtin_ia32_ucmpq256_mask: 3775 case X86::BI__builtin_ia32_ucmpb512_mask: 3776 case X86::BI__builtin_ia32_ucmpw512_mask: 3777 case X86::BI__builtin_ia32_ucmpd512_mask: 3778 case X86::BI__builtin_ia32_ucmpq512_mask: 3779 case X86::BI__builtin_ia32_vpcomub: 3780 case X86::BI__builtin_ia32_vpcomuw: 3781 case X86::BI__builtin_ia32_vpcomud: 3782 case X86::BI__builtin_ia32_vpcomuq: 3783 case X86::BI__builtin_ia32_vpcomb: 3784 case X86::BI__builtin_ia32_vpcomw: 3785 case X86::BI__builtin_ia32_vpcomd: 3786 case X86::BI__builtin_ia32_vpcomq: 3787 case X86::BI__builtin_ia32_vec_set_v8hi: 3788 case X86::BI__builtin_ia32_vec_set_v8si: 3789 i = 2; l = 0; u = 7; 3790 break; 3791 case X86::BI__builtin_ia32_vpermilpd256: 3792 case X86::BI__builtin_ia32_roundps: 3793 case X86::BI__builtin_ia32_roundpd: 3794 case X86::BI__builtin_ia32_roundps256: 3795 case X86::BI__builtin_ia32_roundpd256: 3796 case X86::BI__builtin_ia32_getmantpd128_mask: 3797 case X86::BI__builtin_ia32_getmantpd256_mask: 3798 case X86::BI__builtin_ia32_getmantps128_mask: 3799 case X86::BI__builtin_ia32_getmantps256_mask: 3800 case X86::BI__builtin_ia32_getmantpd512_mask: 3801 case X86::BI__builtin_ia32_getmantps512_mask: 3802 case X86::BI__builtin_ia32_vec_ext_v16qi: 3803 case X86::BI__builtin_ia32_vec_ext_v16hi: 3804 i = 1; l = 0; u = 15; 3805 break; 3806 case X86::BI__builtin_ia32_pblendd128: 3807 case X86::BI__builtin_ia32_blendps: 3808 case X86::BI__builtin_ia32_blendpd256: 3809 case X86::BI__builtin_ia32_shufpd256: 3810 case X86::BI__builtin_ia32_roundss: 3811 case X86::BI__builtin_ia32_roundsd: 3812 case X86::BI__builtin_ia32_rangepd128_mask: 3813 case X86::BI__builtin_ia32_rangepd256_mask: 3814 case X86::BI__builtin_ia32_rangepd512_mask: 3815 case X86::BI__builtin_ia32_rangeps128_mask: 3816 case X86::BI__builtin_ia32_rangeps256_mask: 3817 case X86::BI__builtin_ia32_rangeps512_mask: 3818 case X86::BI__builtin_ia32_getmantsd_round_mask: 3819 case X86::BI__builtin_ia32_getmantss_round_mask: 3820 case X86::BI__builtin_ia32_vec_set_v16qi: 3821 case X86::BI__builtin_ia32_vec_set_v16hi: 3822 i = 2; l = 0; u = 15; 3823 break; 3824 case X86::BI__builtin_ia32_vec_ext_v32qi: 3825 i = 1; l = 0; u = 31; 3826 break; 3827 case X86::BI__builtin_ia32_cmpps: 3828 case X86::BI__builtin_ia32_cmpss: 3829 case X86::BI__builtin_ia32_cmppd: 3830 case X86::BI__builtin_ia32_cmpsd: 3831 case X86::BI__builtin_ia32_cmpps256: 3832 case X86::BI__builtin_ia32_cmppd256: 3833 case X86::BI__builtin_ia32_cmpps128_mask: 3834 case X86::BI__builtin_ia32_cmppd128_mask: 3835 case X86::BI__builtin_ia32_cmpps256_mask: 3836 case X86::BI__builtin_ia32_cmppd256_mask: 3837 case X86::BI__builtin_ia32_cmpps512_mask: 3838 case X86::BI__builtin_ia32_cmppd512_mask: 3839 case X86::BI__builtin_ia32_cmpsd_mask: 3840 case X86::BI__builtin_ia32_cmpss_mask: 3841 case X86::BI__builtin_ia32_vec_set_v32qi: 3842 i = 2; l = 0; u = 31; 3843 break; 3844 case X86::BI__builtin_ia32_permdf256: 3845 case X86::BI__builtin_ia32_permdi256: 3846 case X86::BI__builtin_ia32_permdf512: 3847 case X86::BI__builtin_ia32_permdi512: 3848 case X86::BI__builtin_ia32_vpermilps: 3849 case X86::BI__builtin_ia32_vpermilps256: 3850 case X86::BI__builtin_ia32_vpermilpd512: 3851 case X86::BI__builtin_ia32_vpermilps512: 3852 case X86::BI__builtin_ia32_pshufd: 3853 case X86::BI__builtin_ia32_pshufd256: 3854 case X86::BI__builtin_ia32_pshufd512: 3855 case X86::BI__builtin_ia32_pshufhw: 3856 case X86::BI__builtin_ia32_pshufhw256: 3857 case X86::BI__builtin_ia32_pshufhw512: 3858 case X86::BI__builtin_ia32_pshuflw: 3859 case X86::BI__builtin_ia32_pshuflw256: 3860 case X86::BI__builtin_ia32_pshuflw512: 3861 case X86::BI__builtin_ia32_vcvtps2ph: 3862 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3863 case X86::BI__builtin_ia32_vcvtps2ph256: 3864 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3865 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3866 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3867 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3868 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3869 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3870 case X86::BI__builtin_ia32_rndscaleps_mask: 3871 case X86::BI__builtin_ia32_rndscalepd_mask: 3872 case X86::BI__builtin_ia32_reducepd128_mask: 3873 case X86::BI__builtin_ia32_reducepd256_mask: 3874 case X86::BI__builtin_ia32_reducepd512_mask: 3875 case X86::BI__builtin_ia32_reduceps128_mask: 3876 case X86::BI__builtin_ia32_reduceps256_mask: 3877 case X86::BI__builtin_ia32_reduceps512_mask: 3878 case X86::BI__builtin_ia32_prold512: 3879 case X86::BI__builtin_ia32_prolq512: 3880 case X86::BI__builtin_ia32_prold128: 3881 case X86::BI__builtin_ia32_prold256: 3882 case X86::BI__builtin_ia32_prolq128: 3883 case X86::BI__builtin_ia32_prolq256: 3884 case X86::BI__builtin_ia32_prord512: 3885 case X86::BI__builtin_ia32_prorq512: 3886 case X86::BI__builtin_ia32_prord128: 3887 case X86::BI__builtin_ia32_prord256: 3888 case X86::BI__builtin_ia32_prorq128: 3889 case X86::BI__builtin_ia32_prorq256: 3890 case X86::BI__builtin_ia32_fpclasspd128_mask: 3891 case X86::BI__builtin_ia32_fpclasspd256_mask: 3892 case X86::BI__builtin_ia32_fpclassps128_mask: 3893 case X86::BI__builtin_ia32_fpclassps256_mask: 3894 case X86::BI__builtin_ia32_fpclassps512_mask: 3895 case X86::BI__builtin_ia32_fpclasspd512_mask: 3896 case X86::BI__builtin_ia32_fpclasssd_mask: 3897 case X86::BI__builtin_ia32_fpclassss_mask: 3898 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3899 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3900 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3901 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3902 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3903 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3904 case X86::BI__builtin_ia32_kshiftliqi: 3905 case X86::BI__builtin_ia32_kshiftlihi: 3906 case X86::BI__builtin_ia32_kshiftlisi: 3907 case X86::BI__builtin_ia32_kshiftlidi: 3908 case X86::BI__builtin_ia32_kshiftriqi: 3909 case X86::BI__builtin_ia32_kshiftrihi: 3910 case X86::BI__builtin_ia32_kshiftrisi: 3911 case X86::BI__builtin_ia32_kshiftridi: 3912 i = 1; l = 0; u = 255; 3913 break; 3914 case X86::BI__builtin_ia32_vperm2f128_pd256: 3915 case X86::BI__builtin_ia32_vperm2f128_ps256: 3916 case X86::BI__builtin_ia32_vperm2f128_si256: 3917 case X86::BI__builtin_ia32_permti256: 3918 case X86::BI__builtin_ia32_pblendw128: 3919 case X86::BI__builtin_ia32_pblendw256: 3920 case X86::BI__builtin_ia32_blendps256: 3921 case X86::BI__builtin_ia32_pblendd256: 3922 case X86::BI__builtin_ia32_palignr128: 3923 case X86::BI__builtin_ia32_palignr256: 3924 case X86::BI__builtin_ia32_palignr512: 3925 case X86::BI__builtin_ia32_alignq512: 3926 case X86::BI__builtin_ia32_alignd512: 3927 case X86::BI__builtin_ia32_alignd128: 3928 case X86::BI__builtin_ia32_alignd256: 3929 case X86::BI__builtin_ia32_alignq128: 3930 case X86::BI__builtin_ia32_alignq256: 3931 case X86::BI__builtin_ia32_vcomisd: 3932 case X86::BI__builtin_ia32_vcomiss: 3933 case X86::BI__builtin_ia32_shuf_f32x4: 3934 case X86::BI__builtin_ia32_shuf_f64x2: 3935 case X86::BI__builtin_ia32_shuf_i32x4: 3936 case X86::BI__builtin_ia32_shuf_i64x2: 3937 case X86::BI__builtin_ia32_shufpd512: 3938 case X86::BI__builtin_ia32_shufps: 3939 case X86::BI__builtin_ia32_shufps256: 3940 case X86::BI__builtin_ia32_shufps512: 3941 case X86::BI__builtin_ia32_dbpsadbw128: 3942 case X86::BI__builtin_ia32_dbpsadbw256: 3943 case X86::BI__builtin_ia32_dbpsadbw512: 3944 case X86::BI__builtin_ia32_vpshldd128: 3945 case X86::BI__builtin_ia32_vpshldd256: 3946 case X86::BI__builtin_ia32_vpshldd512: 3947 case X86::BI__builtin_ia32_vpshldq128: 3948 case X86::BI__builtin_ia32_vpshldq256: 3949 case X86::BI__builtin_ia32_vpshldq512: 3950 case X86::BI__builtin_ia32_vpshldw128: 3951 case X86::BI__builtin_ia32_vpshldw256: 3952 case X86::BI__builtin_ia32_vpshldw512: 3953 case X86::BI__builtin_ia32_vpshrdd128: 3954 case X86::BI__builtin_ia32_vpshrdd256: 3955 case X86::BI__builtin_ia32_vpshrdd512: 3956 case X86::BI__builtin_ia32_vpshrdq128: 3957 case X86::BI__builtin_ia32_vpshrdq256: 3958 case X86::BI__builtin_ia32_vpshrdq512: 3959 case X86::BI__builtin_ia32_vpshrdw128: 3960 case X86::BI__builtin_ia32_vpshrdw256: 3961 case X86::BI__builtin_ia32_vpshrdw512: 3962 i = 2; l = 0; u = 255; 3963 break; 3964 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3965 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3966 case X86::BI__builtin_ia32_fixupimmps512_mask: 3967 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3968 case X86::BI__builtin_ia32_fixupimmsd_mask: 3969 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3970 case X86::BI__builtin_ia32_fixupimmss_mask: 3971 case X86::BI__builtin_ia32_fixupimmss_maskz: 3972 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3973 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3974 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3975 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3976 case X86::BI__builtin_ia32_fixupimmps128_mask: 3977 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3978 case X86::BI__builtin_ia32_fixupimmps256_mask: 3979 case X86::BI__builtin_ia32_fixupimmps256_maskz: 3980 case X86::BI__builtin_ia32_pternlogd512_mask: 3981 case X86::BI__builtin_ia32_pternlogd512_maskz: 3982 case X86::BI__builtin_ia32_pternlogq512_mask: 3983 case X86::BI__builtin_ia32_pternlogq512_maskz: 3984 case X86::BI__builtin_ia32_pternlogd128_mask: 3985 case X86::BI__builtin_ia32_pternlogd128_maskz: 3986 case X86::BI__builtin_ia32_pternlogd256_mask: 3987 case X86::BI__builtin_ia32_pternlogd256_maskz: 3988 case X86::BI__builtin_ia32_pternlogq128_mask: 3989 case X86::BI__builtin_ia32_pternlogq128_maskz: 3990 case X86::BI__builtin_ia32_pternlogq256_mask: 3991 case X86::BI__builtin_ia32_pternlogq256_maskz: 3992 i = 3; l = 0; u = 255; 3993 break; 3994 case X86::BI__builtin_ia32_gatherpfdpd: 3995 case X86::BI__builtin_ia32_gatherpfdps: 3996 case X86::BI__builtin_ia32_gatherpfqpd: 3997 case X86::BI__builtin_ia32_gatherpfqps: 3998 case X86::BI__builtin_ia32_scatterpfdpd: 3999 case X86::BI__builtin_ia32_scatterpfdps: 4000 case X86::BI__builtin_ia32_scatterpfqpd: 4001 case X86::BI__builtin_ia32_scatterpfqps: 4002 i = 4; l = 2; u = 3; 4003 break; 4004 case X86::BI__builtin_ia32_reducesd_mask: 4005 case X86::BI__builtin_ia32_reducess_mask: 4006 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4007 case X86::BI__builtin_ia32_rndscaless_round_mask: 4008 i = 4; l = 0; u = 255; 4009 break; 4010 } 4011 4012 // Note that we don't force a hard error on the range check here, allowing 4013 // template-generated or macro-generated dead code to potentially have out-of- 4014 // range values. These need to code generate, but don't need to necessarily 4015 // make any sense. We use a warning that defaults to an error. 4016 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4017 } 4018 4019 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4020 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4021 /// Returns true when the format fits the function and the FormatStringInfo has 4022 /// been populated. 4023 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4024 FormatStringInfo *FSI) { 4025 FSI->HasVAListArg = Format->getFirstArg() == 0; 4026 FSI->FormatIdx = Format->getFormatIdx() - 1; 4027 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4028 4029 // The way the format attribute works in GCC, the implicit this argument 4030 // of member functions is counted. However, it doesn't appear in our own 4031 // lists, so decrement format_idx in that case. 4032 if (IsCXXMember) { 4033 if(FSI->FormatIdx == 0) 4034 return false; 4035 --FSI->FormatIdx; 4036 if (FSI->FirstDataArg != 0) 4037 --FSI->FirstDataArg; 4038 } 4039 return true; 4040 } 4041 4042 /// Checks if a the given expression evaluates to null. 4043 /// 4044 /// Returns true if the value evaluates to null. 4045 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4046 // If the expression has non-null type, it doesn't evaluate to null. 4047 if (auto nullability 4048 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4049 if (*nullability == NullabilityKind::NonNull) 4050 return false; 4051 } 4052 4053 // As a special case, transparent unions initialized with zero are 4054 // considered null for the purposes of the nonnull attribute. 4055 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4056 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4057 if (const CompoundLiteralExpr *CLE = 4058 dyn_cast<CompoundLiteralExpr>(Expr)) 4059 if (const InitListExpr *ILE = 4060 dyn_cast<InitListExpr>(CLE->getInitializer())) 4061 Expr = ILE->getInit(0); 4062 } 4063 4064 bool Result; 4065 return (!Expr->isValueDependent() && 4066 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4067 !Result); 4068 } 4069 4070 static void CheckNonNullArgument(Sema &S, 4071 const Expr *ArgExpr, 4072 SourceLocation CallSiteLoc) { 4073 if (CheckNonNullExpr(S, ArgExpr)) 4074 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4075 S.PDiag(diag::warn_null_arg) 4076 << ArgExpr->getSourceRange()); 4077 } 4078 4079 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4080 FormatStringInfo FSI; 4081 if ((GetFormatStringType(Format) == FST_NSString) && 4082 getFormatStringInfo(Format, false, &FSI)) { 4083 Idx = FSI.FormatIdx; 4084 return true; 4085 } 4086 return false; 4087 } 4088 4089 /// Diagnose use of %s directive in an NSString which is being passed 4090 /// as formatting string to formatting method. 4091 static void 4092 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4093 const NamedDecl *FDecl, 4094 Expr **Args, 4095 unsigned NumArgs) { 4096 unsigned Idx = 0; 4097 bool Format = false; 4098 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4099 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4100 Idx = 2; 4101 Format = true; 4102 } 4103 else 4104 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4105 if (S.GetFormatNSStringIdx(I, Idx)) { 4106 Format = true; 4107 break; 4108 } 4109 } 4110 if (!Format || NumArgs <= Idx) 4111 return; 4112 const Expr *FormatExpr = Args[Idx]; 4113 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4114 FormatExpr = CSCE->getSubExpr(); 4115 const StringLiteral *FormatString; 4116 if (const ObjCStringLiteral *OSL = 4117 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4118 FormatString = OSL->getString(); 4119 else 4120 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4121 if (!FormatString) 4122 return; 4123 if (S.FormatStringHasSArg(FormatString)) { 4124 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4125 << "%s" << 1 << 1; 4126 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4127 << FDecl->getDeclName(); 4128 } 4129 } 4130 4131 /// Determine whether the given type has a non-null nullability annotation. 4132 static bool isNonNullType(ASTContext &ctx, QualType type) { 4133 if (auto nullability = type->getNullability(ctx)) 4134 return *nullability == NullabilityKind::NonNull; 4135 4136 return false; 4137 } 4138 4139 static void CheckNonNullArguments(Sema &S, 4140 const NamedDecl *FDecl, 4141 const FunctionProtoType *Proto, 4142 ArrayRef<const Expr *> Args, 4143 SourceLocation CallSiteLoc) { 4144 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4145 4146 // Already checked by by constant evaluator. 4147 if (S.isConstantEvaluated()) 4148 return; 4149 // Check the attributes attached to the method/function itself. 4150 llvm::SmallBitVector NonNullArgs; 4151 if (FDecl) { 4152 // Handle the nonnull attribute on the function/method declaration itself. 4153 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4154 if (!NonNull->args_size()) { 4155 // Easy case: all pointer arguments are nonnull. 4156 for (const auto *Arg : Args) 4157 if (S.isValidPointerAttrType(Arg->getType())) 4158 CheckNonNullArgument(S, Arg, CallSiteLoc); 4159 return; 4160 } 4161 4162 for (const ParamIdx &Idx : NonNull->args()) { 4163 unsigned IdxAST = Idx.getASTIndex(); 4164 if (IdxAST >= Args.size()) 4165 continue; 4166 if (NonNullArgs.empty()) 4167 NonNullArgs.resize(Args.size()); 4168 NonNullArgs.set(IdxAST); 4169 } 4170 } 4171 } 4172 4173 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4174 // Handle the nonnull attribute on the parameters of the 4175 // function/method. 4176 ArrayRef<ParmVarDecl*> parms; 4177 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4178 parms = FD->parameters(); 4179 else 4180 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4181 4182 unsigned ParamIndex = 0; 4183 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4184 I != E; ++I, ++ParamIndex) { 4185 const ParmVarDecl *PVD = *I; 4186 if (PVD->hasAttr<NonNullAttr>() || 4187 isNonNullType(S.Context, PVD->getType())) { 4188 if (NonNullArgs.empty()) 4189 NonNullArgs.resize(Args.size()); 4190 4191 NonNullArgs.set(ParamIndex); 4192 } 4193 } 4194 } else { 4195 // If we have a non-function, non-method declaration but no 4196 // function prototype, try to dig out the function prototype. 4197 if (!Proto) { 4198 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4199 QualType type = VD->getType().getNonReferenceType(); 4200 if (auto pointerType = type->getAs<PointerType>()) 4201 type = pointerType->getPointeeType(); 4202 else if (auto blockType = type->getAs<BlockPointerType>()) 4203 type = blockType->getPointeeType(); 4204 // FIXME: data member pointers? 4205 4206 // Dig out the function prototype, if there is one. 4207 Proto = type->getAs<FunctionProtoType>(); 4208 } 4209 } 4210 4211 // Fill in non-null argument information from the nullability 4212 // information on the parameter types (if we have them). 4213 if (Proto) { 4214 unsigned Index = 0; 4215 for (auto paramType : Proto->getParamTypes()) { 4216 if (isNonNullType(S.Context, paramType)) { 4217 if (NonNullArgs.empty()) 4218 NonNullArgs.resize(Args.size()); 4219 4220 NonNullArgs.set(Index); 4221 } 4222 4223 ++Index; 4224 } 4225 } 4226 } 4227 4228 // Check for non-null arguments. 4229 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4230 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4231 if (NonNullArgs[ArgIndex]) 4232 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4233 } 4234 } 4235 4236 /// Handles the checks for format strings, non-POD arguments to vararg 4237 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4238 /// attributes. 4239 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4240 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4241 bool IsMemberFunction, SourceLocation Loc, 4242 SourceRange Range, VariadicCallType CallType) { 4243 // FIXME: We should check as much as we can in the template definition. 4244 if (CurContext->isDependentContext()) 4245 return; 4246 4247 // Printf and scanf checking. 4248 llvm::SmallBitVector CheckedVarArgs; 4249 if (FDecl) { 4250 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4251 // Only create vector if there are format attributes. 4252 CheckedVarArgs.resize(Args.size()); 4253 4254 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4255 CheckedVarArgs); 4256 } 4257 } 4258 4259 // Refuse POD arguments that weren't caught by the format string 4260 // checks above. 4261 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4262 if (CallType != VariadicDoesNotApply && 4263 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4264 unsigned NumParams = Proto ? Proto->getNumParams() 4265 : FDecl && isa<FunctionDecl>(FDecl) 4266 ? cast<FunctionDecl>(FDecl)->getNumParams() 4267 : FDecl && isa<ObjCMethodDecl>(FDecl) 4268 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4269 : 0; 4270 4271 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4272 // Args[ArgIdx] can be null in malformed code. 4273 if (const Expr *Arg = Args[ArgIdx]) { 4274 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4275 checkVariadicArgument(Arg, CallType); 4276 } 4277 } 4278 } 4279 4280 if (FDecl || Proto) { 4281 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4282 4283 // Type safety checking. 4284 if (FDecl) { 4285 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4286 CheckArgumentWithTypeTag(I, Args, Loc); 4287 } 4288 } 4289 4290 if (FD) 4291 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4292 } 4293 4294 /// CheckConstructorCall - Check a constructor call for correctness and safety 4295 /// properties not enforced by the C type system. 4296 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4297 ArrayRef<const Expr *> Args, 4298 const FunctionProtoType *Proto, 4299 SourceLocation Loc) { 4300 VariadicCallType CallType = 4301 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4302 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4303 Loc, SourceRange(), CallType); 4304 } 4305 4306 /// CheckFunctionCall - Check a direct function call for various correctness 4307 /// and safety properties not strictly enforced by the C type system. 4308 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4309 const FunctionProtoType *Proto) { 4310 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4311 isa<CXXMethodDecl>(FDecl); 4312 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4313 IsMemberOperatorCall; 4314 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4315 TheCall->getCallee()); 4316 Expr** Args = TheCall->getArgs(); 4317 unsigned NumArgs = TheCall->getNumArgs(); 4318 4319 Expr *ImplicitThis = nullptr; 4320 if (IsMemberOperatorCall) { 4321 // If this is a call to a member operator, hide the first argument 4322 // from checkCall. 4323 // FIXME: Our choice of AST representation here is less than ideal. 4324 ImplicitThis = Args[0]; 4325 ++Args; 4326 --NumArgs; 4327 } else if (IsMemberFunction) 4328 ImplicitThis = 4329 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4330 4331 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4332 IsMemberFunction, TheCall->getRParenLoc(), 4333 TheCall->getCallee()->getSourceRange(), CallType); 4334 4335 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4336 // None of the checks below are needed for functions that don't have 4337 // simple names (e.g., C++ conversion functions). 4338 if (!FnInfo) 4339 return false; 4340 4341 CheckAbsoluteValueFunction(TheCall, FDecl); 4342 CheckMaxUnsignedZero(TheCall, FDecl); 4343 4344 if (getLangOpts().ObjC) 4345 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4346 4347 unsigned CMId = FDecl->getMemoryFunctionKind(); 4348 if (CMId == 0) 4349 return false; 4350 4351 // Handle memory setting and copying functions. 4352 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4353 CheckStrlcpycatArguments(TheCall, FnInfo); 4354 else if (CMId == Builtin::BIstrncat) 4355 CheckStrncatArguments(TheCall, FnInfo); 4356 else 4357 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4358 4359 return false; 4360 } 4361 4362 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4363 ArrayRef<const Expr *> Args) { 4364 VariadicCallType CallType = 4365 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4366 4367 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4368 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4369 CallType); 4370 4371 return false; 4372 } 4373 4374 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4375 const FunctionProtoType *Proto) { 4376 QualType Ty; 4377 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4378 Ty = V->getType().getNonReferenceType(); 4379 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4380 Ty = F->getType().getNonReferenceType(); 4381 else 4382 return false; 4383 4384 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4385 !Ty->isFunctionProtoType()) 4386 return false; 4387 4388 VariadicCallType CallType; 4389 if (!Proto || !Proto->isVariadic()) { 4390 CallType = VariadicDoesNotApply; 4391 } else if (Ty->isBlockPointerType()) { 4392 CallType = VariadicBlock; 4393 } else { // Ty->isFunctionPointerType() 4394 CallType = VariadicFunction; 4395 } 4396 4397 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4398 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4399 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4400 TheCall->getCallee()->getSourceRange(), CallType); 4401 4402 return false; 4403 } 4404 4405 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4406 /// such as function pointers returned from functions. 4407 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4408 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4409 TheCall->getCallee()); 4410 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4411 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4412 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4413 TheCall->getCallee()->getSourceRange(), CallType); 4414 4415 return false; 4416 } 4417 4418 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4419 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4420 return false; 4421 4422 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4423 switch (Op) { 4424 case AtomicExpr::AO__c11_atomic_init: 4425 case AtomicExpr::AO__opencl_atomic_init: 4426 llvm_unreachable("There is no ordering argument for an init"); 4427 4428 case AtomicExpr::AO__c11_atomic_load: 4429 case AtomicExpr::AO__opencl_atomic_load: 4430 case AtomicExpr::AO__atomic_load_n: 4431 case AtomicExpr::AO__atomic_load: 4432 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4433 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4434 4435 case AtomicExpr::AO__c11_atomic_store: 4436 case AtomicExpr::AO__opencl_atomic_store: 4437 case AtomicExpr::AO__atomic_store: 4438 case AtomicExpr::AO__atomic_store_n: 4439 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4440 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4441 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4442 4443 default: 4444 return true; 4445 } 4446 } 4447 4448 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4449 AtomicExpr::AtomicOp Op) { 4450 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4451 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4452 4453 // All the non-OpenCL operations take one of the following forms. 4454 // The OpenCL operations take the __c11 forms with one extra argument for 4455 // synchronization scope. 4456 enum { 4457 // C __c11_atomic_init(A *, C) 4458 Init, 4459 4460 // C __c11_atomic_load(A *, int) 4461 Load, 4462 4463 // void __atomic_load(A *, CP, int) 4464 LoadCopy, 4465 4466 // void __atomic_store(A *, CP, int) 4467 Copy, 4468 4469 // C __c11_atomic_add(A *, M, int) 4470 Arithmetic, 4471 4472 // C __atomic_exchange_n(A *, CP, int) 4473 Xchg, 4474 4475 // void __atomic_exchange(A *, C *, CP, int) 4476 GNUXchg, 4477 4478 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4479 C11CmpXchg, 4480 4481 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4482 GNUCmpXchg 4483 } Form = Init; 4484 4485 const unsigned NumForm = GNUCmpXchg + 1; 4486 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4487 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4488 // where: 4489 // C is an appropriate type, 4490 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4491 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4492 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4493 // the int parameters are for orderings. 4494 4495 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4496 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4497 "need to update code for modified forms"); 4498 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4499 AtomicExpr::AO__c11_atomic_fetch_xor + 1 == 4500 AtomicExpr::AO__atomic_load, 4501 "need to update code for modified C11 atomics"); 4502 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4503 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4504 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4505 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) || 4506 IsOpenCL; 4507 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4508 Op == AtomicExpr::AO__atomic_store_n || 4509 Op == AtomicExpr::AO__atomic_exchange_n || 4510 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4511 bool IsAddSub = false; 4512 bool IsMinMax = false; 4513 4514 switch (Op) { 4515 case AtomicExpr::AO__c11_atomic_init: 4516 case AtomicExpr::AO__opencl_atomic_init: 4517 Form = Init; 4518 break; 4519 4520 case AtomicExpr::AO__c11_atomic_load: 4521 case AtomicExpr::AO__opencl_atomic_load: 4522 case AtomicExpr::AO__atomic_load_n: 4523 Form = Load; 4524 break; 4525 4526 case AtomicExpr::AO__atomic_load: 4527 Form = LoadCopy; 4528 break; 4529 4530 case AtomicExpr::AO__c11_atomic_store: 4531 case AtomicExpr::AO__opencl_atomic_store: 4532 case AtomicExpr::AO__atomic_store: 4533 case AtomicExpr::AO__atomic_store_n: 4534 Form = Copy; 4535 break; 4536 4537 case AtomicExpr::AO__c11_atomic_fetch_add: 4538 case AtomicExpr::AO__c11_atomic_fetch_sub: 4539 case AtomicExpr::AO__opencl_atomic_fetch_add: 4540 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4541 case AtomicExpr::AO__opencl_atomic_fetch_min: 4542 case AtomicExpr::AO__opencl_atomic_fetch_max: 4543 case AtomicExpr::AO__atomic_fetch_add: 4544 case AtomicExpr::AO__atomic_fetch_sub: 4545 case AtomicExpr::AO__atomic_add_fetch: 4546 case AtomicExpr::AO__atomic_sub_fetch: 4547 IsAddSub = true; 4548 LLVM_FALLTHROUGH; 4549 case AtomicExpr::AO__c11_atomic_fetch_and: 4550 case AtomicExpr::AO__c11_atomic_fetch_or: 4551 case AtomicExpr::AO__c11_atomic_fetch_xor: 4552 case AtomicExpr::AO__opencl_atomic_fetch_and: 4553 case AtomicExpr::AO__opencl_atomic_fetch_or: 4554 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4555 case AtomicExpr::AO__atomic_fetch_and: 4556 case AtomicExpr::AO__atomic_fetch_or: 4557 case AtomicExpr::AO__atomic_fetch_xor: 4558 case AtomicExpr::AO__atomic_fetch_nand: 4559 case AtomicExpr::AO__atomic_and_fetch: 4560 case AtomicExpr::AO__atomic_or_fetch: 4561 case AtomicExpr::AO__atomic_xor_fetch: 4562 case AtomicExpr::AO__atomic_nand_fetch: 4563 Form = Arithmetic; 4564 break; 4565 4566 case AtomicExpr::AO__atomic_fetch_min: 4567 case AtomicExpr::AO__atomic_fetch_max: 4568 IsMinMax = true; 4569 Form = Arithmetic; 4570 break; 4571 4572 case AtomicExpr::AO__c11_atomic_exchange: 4573 case AtomicExpr::AO__opencl_atomic_exchange: 4574 case AtomicExpr::AO__atomic_exchange_n: 4575 Form = Xchg; 4576 break; 4577 4578 case AtomicExpr::AO__atomic_exchange: 4579 Form = GNUXchg; 4580 break; 4581 4582 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4583 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4584 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4585 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4586 Form = C11CmpXchg; 4587 break; 4588 4589 case AtomicExpr::AO__atomic_compare_exchange: 4590 case AtomicExpr::AO__atomic_compare_exchange_n: 4591 Form = GNUCmpXchg; 4592 break; 4593 } 4594 4595 unsigned AdjustedNumArgs = NumArgs[Form]; 4596 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4597 ++AdjustedNumArgs; 4598 // Check we have the right number of arguments. 4599 if (TheCall->getNumArgs() < AdjustedNumArgs) { 4600 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 4601 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4602 << TheCall->getCallee()->getSourceRange(); 4603 return ExprError(); 4604 } else if (TheCall->getNumArgs() > AdjustedNumArgs) { 4605 Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(), 4606 diag::err_typecheck_call_too_many_args) 4607 << 0 << AdjustedNumArgs << TheCall->getNumArgs() 4608 << TheCall->getCallee()->getSourceRange(); 4609 return ExprError(); 4610 } 4611 4612 // Inspect the first argument of the atomic operation. 4613 Expr *Ptr = TheCall->getArg(0); 4614 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4615 if (ConvertedPtr.isInvalid()) 4616 return ExprError(); 4617 4618 Ptr = ConvertedPtr.get(); 4619 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4620 if (!pointerType) { 4621 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4622 << Ptr->getType() << Ptr->getSourceRange(); 4623 return ExprError(); 4624 } 4625 4626 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4627 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4628 QualType ValType = AtomTy; // 'C' 4629 if (IsC11) { 4630 if (!AtomTy->isAtomicType()) { 4631 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic) 4632 << Ptr->getType() << Ptr->getSourceRange(); 4633 return ExprError(); 4634 } 4635 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4636 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4637 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic) 4638 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4639 << Ptr->getSourceRange(); 4640 return ExprError(); 4641 } 4642 ValType = AtomTy->getAs<AtomicType>()->getValueType(); 4643 } else if (Form != Load && Form != LoadCopy) { 4644 if (ValType.isConstQualified()) { 4645 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer) 4646 << Ptr->getType() << Ptr->getSourceRange(); 4647 return ExprError(); 4648 } 4649 } 4650 4651 // For an arithmetic operation, the implied arithmetic must be well-formed. 4652 if (Form == Arithmetic) { 4653 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4654 if (IsAddSub && !ValType->isIntegerType() 4655 && !ValType->isPointerType()) { 4656 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4657 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4658 return ExprError(); 4659 } 4660 if (IsMinMax) { 4661 const BuiltinType *BT = ValType->getAs<BuiltinType>(); 4662 if (!BT || (BT->getKind() != BuiltinType::Int && 4663 BT->getKind() != BuiltinType::UInt)) { 4664 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr); 4665 return ExprError(); 4666 } 4667 } 4668 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) { 4669 Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int) 4670 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4671 return ExprError(); 4672 } 4673 if (IsC11 && ValType->isPointerType() && 4674 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4675 diag::err_incomplete_type)) { 4676 return ExprError(); 4677 } 4678 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4679 // For __atomic_*_n operations, the value type must be a scalar integral or 4680 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4681 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4682 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4683 return ExprError(); 4684 } 4685 4686 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4687 !AtomTy->isScalarType()) { 4688 // For GNU atomics, require a trivially-copyable type. This is not part of 4689 // the GNU atomics specification, but we enforce it for sanity. 4690 Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy) 4691 << Ptr->getType() << Ptr->getSourceRange(); 4692 return ExprError(); 4693 } 4694 4695 switch (ValType.getObjCLifetime()) { 4696 case Qualifiers::OCL_None: 4697 case Qualifiers::OCL_ExplicitNone: 4698 // okay 4699 break; 4700 4701 case Qualifiers::OCL_Weak: 4702 case Qualifiers::OCL_Strong: 4703 case Qualifiers::OCL_Autoreleasing: 4704 // FIXME: Can this happen? By this point, ValType should be known 4705 // to be trivially copyable. 4706 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4707 << ValType << Ptr->getSourceRange(); 4708 return ExprError(); 4709 } 4710 4711 // All atomic operations have an overload which takes a pointer to a volatile 4712 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4713 // into the result or the other operands. Similarly atomic_load takes a 4714 // pointer to a const 'A'. 4715 ValType.removeLocalVolatile(); 4716 ValType.removeLocalConst(); 4717 QualType ResultType = ValType; 4718 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4719 Form == Init) 4720 ResultType = Context.VoidTy; 4721 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4722 ResultType = Context.BoolTy; 4723 4724 // The type of a parameter passed 'by value'. In the GNU atomics, such 4725 // arguments are actually passed as pointers. 4726 QualType ByValType = ValType; // 'CP' 4727 bool IsPassedByAddress = false; 4728 if (!IsC11 && !IsN) { 4729 ByValType = Ptr->getType(); 4730 IsPassedByAddress = true; 4731 } 4732 4733 // The first argument's non-CV pointer type is used to deduce the type of 4734 // subsequent arguments, except for: 4735 // - weak flag (always converted to bool) 4736 // - memory order (always converted to int) 4737 // - scope (always converted to int) 4738 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) { 4739 QualType Ty; 4740 if (i < NumVals[Form] + 1) { 4741 switch (i) { 4742 case 0: 4743 // The first argument is always a pointer. It has a fixed type. 4744 // It is always dereferenced, a nullptr is undefined. 4745 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4746 // Nothing else to do: we already know all we want about this pointer. 4747 continue; 4748 case 1: 4749 // The second argument is the non-atomic operand. For arithmetic, this 4750 // is always passed by value, and for a compare_exchange it is always 4751 // passed by address. For the rest, GNU uses by-address and C11 uses 4752 // by-value. 4753 assert(Form != Load); 4754 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4755 Ty = ValType; 4756 else if (Form == Copy || Form == Xchg) { 4757 if (IsPassedByAddress) 4758 // The value pointer is always dereferenced, a nullptr is undefined. 4759 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4760 Ty = ByValType; 4761 } else if (Form == Arithmetic) 4762 Ty = Context.getPointerDiffType(); 4763 else { 4764 Expr *ValArg = TheCall->getArg(i); 4765 // The value pointer is always dereferenced, a nullptr is undefined. 4766 CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc()); 4767 LangAS AS = LangAS::Default; 4768 // Keep address space of non-atomic pointer type. 4769 if (const PointerType *PtrTy = 4770 ValArg->getType()->getAs<PointerType>()) { 4771 AS = PtrTy->getPointeeType().getAddressSpace(); 4772 } 4773 Ty = Context.getPointerType( 4774 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4775 } 4776 break; 4777 case 2: 4778 // The third argument to compare_exchange / GNU exchange is the desired 4779 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4780 if (IsPassedByAddress) 4781 CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc()); 4782 Ty = ByValType; 4783 break; 4784 case 3: 4785 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4786 Ty = Context.BoolTy; 4787 break; 4788 } 4789 } else { 4790 // The order(s) and scope are always converted to int. 4791 Ty = Context.IntTy; 4792 } 4793 4794 InitializedEntity Entity = 4795 InitializedEntity::InitializeParameter(Context, Ty, false); 4796 ExprResult Arg = TheCall->getArg(i); 4797 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4798 if (Arg.isInvalid()) 4799 return true; 4800 TheCall->setArg(i, Arg.get()); 4801 } 4802 4803 // Permute the arguments into a 'consistent' order. 4804 SmallVector<Expr*, 5> SubExprs; 4805 SubExprs.push_back(Ptr); 4806 switch (Form) { 4807 case Init: 4808 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4809 SubExprs.push_back(TheCall->getArg(1)); // Val1 4810 break; 4811 case Load: 4812 SubExprs.push_back(TheCall->getArg(1)); // Order 4813 break; 4814 case LoadCopy: 4815 case Copy: 4816 case Arithmetic: 4817 case Xchg: 4818 SubExprs.push_back(TheCall->getArg(2)); // Order 4819 SubExprs.push_back(TheCall->getArg(1)); // Val1 4820 break; 4821 case GNUXchg: 4822 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4823 SubExprs.push_back(TheCall->getArg(3)); // Order 4824 SubExprs.push_back(TheCall->getArg(1)); // Val1 4825 SubExprs.push_back(TheCall->getArg(2)); // Val2 4826 break; 4827 case C11CmpXchg: 4828 SubExprs.push_back(TheCall->getArg(3)); // Order 4829 SubExprs.push_back(TheCall->getArg(1)); // Val1 4830 SubExprs.push_back(TheCall->getArg(4)); // OrderFail 4831 SubExprs.push_back(TheCall->getArg(2)); // Val2 4832 break; 4833 case GNUCmpXchg: 4834 SubExprs.push_back(TheCall->getArg(4)); // Order 4835 SubExprs.push_back(TheCall->getArg(1)); // Val1 4836 SubExprs.push_back(TheCall->getArg(5)); // OrderFail 4837 SubExprs.push_back(TheCall->getArg(2)); // Val2 4838 SubExprs.push_back(TheCall->getArg(3)); // Weak 4839 break; 4840 } 4841 4842 if (SubExprs.size() >= 2 && Form != Init) { 4843 llvm::APSInt Result(32); 4844 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4845 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4846 Diag(SubExprs[1]->getBeginLoc(), 4847 diag::warn_atomic_op_has_invalid_memory_order) 4848 << SubExprs[1]->getSourceRange(); 4849 } 4850 4851 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4852 auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1); 4853 llvm::APSInt Result(32); 4854 if (Scope->isIntegerConstantExpr(Result, Context) && 4855 !ScopeModel->isValid(Result.getZExtValue())) { 4856 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4857 << Scope->getSourceRange(); 4858 } 4859 SubExprs.push_back(Scope); 4860 } 4861 4862 AtomicExpr *AE = 4863 new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs, 4864 ResultType, Op, TheCall->getRParenLoc()); 4865 4866 if ((Op == AtomicExpr::AO__c11_atomic_load || 4867 Op == AtomicExpr::AO__c11_atomic_store || 4868 Op == AtomicExpr::AO__opencl_atomic_load || 4869 Op == AtomicExpr::AO__opencl_atomic_store ) && 4870 Context.AtomicUsesUnsupportedLibcall(AE)) 4871 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4872 << ((Op == AtomicExpr::AO__c11_atomic_load || 4873 Op == AtomicExpr::AO__opencl_atomic_load) 4874 ? 0 4875 : 1); 4876 4877 return AE; 4878 } 4879 4880 /// checkBuiltinArgument - Given a call to a builtin function, perform 4881 /// normal type-checking on the given argument, updating the call in 4882 /// place. This is useful when a builtin function requires custom 4883 /// type-checking for some of its arguments but not necessarily all of 4884 /// them. 4885 /// 4886 /// Returns true on error. 4887 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4888 FunctionDecl *Fn = E->getDirectCallee(); 4889 assert(Fn && "builtin call without direct callee!"); 4890 4891 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4892 InitializedEntity Entity = 4893 InitializedEntity::InitializeParameter(S.Context, Param); 4894 4895 ExprResult Arg = E->getArg(0); 4896 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4897 if (Arg.isInvalid()) 4898 return true; 4899 4900 E->setArg(ArgIndex, Arg.get()); 4901 return false; 4902 } 4903 4904 /// We have a call to a function like __sync_fetch_and_add, which is an 4905 /// overloaded function based on the pointer type of its first argument. 4906 /// The main BuildCallExpr routines have already promoted the types of 4907 /// arguments because all of these calls are prototyped as void(...). 4908 /// 4909 /// This function goes through and does final semantic checking for these 4910 /// builtins, as well as generating any warnings. 4911 ExprResult 4912 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4913 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4914 Expr *Callee = TheCall->getCallee(); 4915 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4916 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4917 4918 // Ensure that we have at least one argument to do type inference from. 4919 if (TheCall->getNumArgs() < 1) { 4920 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 4921 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 4922 return ExprError(); 4923 } 4924 4925 // Inspect the first argument of the atomic builtin. This should always be 4926 // a pointer type, whose element is an integral scalar or pointer type. 4927 // Because it is a pointer type, we don't have to worry about any implicit 4928 // casts here. 4929 // FIXME: We don't allow floating point scalars as input. 4930 Expr *FirstArg = TheCall->getArg(0); 4931 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 4932 if (FirstArgResult.isInvalid()) 4933 return ExprError(); 4934 FirstArg = FirstArgResult.get(); 4935 TheCall->setArg(0, FirstArg); 4936 4937 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 4938 if (!pointerType) { 4939 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4940 << FirstArg->getType() << FirstArg->getSourceRange(); 4941 return ExprError(); 4942 } 4943 4944 QualType ValType = pointerType->getPointeeType(); 4945 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 4946 !ValType->isBlockPointerType()) { 4947 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 4948 << FirstArg->getType() << FirstArg->getSourceRange(); 4949 return ExprError(); 4950 } 4951 4952 if (ValType.isConstQualified()) { 4953 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 4954 << FirstArg->getType() << FirstArg->getSourceRange(); 4955 return ExprError(); 4956 } 4957 4958 switch (ValType.getObjCLifetime()) { 4959 case Qualifiers::OCL_None: 4960 case Qualifiers::OCL_ExplicitNone: 4961 // okay 4962 break; 4963 4964 case Qualifiers::OCL_Weak: 4965 case Qualifiers::OCL_Strong: 4966 case Qualifiers::OCL_Autoreleasing: 4967 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4968 << ValType << FirstArg->getSourceRange(); 4969 return ExprError(); 4970 } 4971 4972 // Strip any qualifiers off ValType. 4973 ValType = ValType.getUnqualifiedType(); 4974 4975 // The majority of builtins return a value, but a few have special return 4976 // types, so allow them to override appropriately below. 4977 QualType ResultType = ValType; 4978 4979 // We need to figure out which concrete builtin this maps onto. For example, 4980 // __sync_fetch_and_add with a 2 byte object turns into 4981 // __sync_fetch_and_add_2. 4982 #define BUILTIN_ROW(x) \ 4983 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 4984 Builtin::BI##x##_8, Builtin::BI##x##_16 } 4985 4986 static const unsigned BuiltinIndices[][5] = { 4987 BUILTIN_ROW(__sync_fetch_and_add), 4988 BUILTIN_ROW(__sync_fetch_and_sub), 4989 BUILTIN_ROW(__sync_fetch_and_or), 4990 BUILTIN_ROW(__sync_fetch_and_and), 4991 BUILTIN_ROW(__sync_fetch_and_xor), 4992 BUILTIN_ROW(__sync_fetch_and_nand), 4993 4994 BUILTIN_ROW(__sync_add_and_fetch), 4995 BUILTIN_ROW(__sync_sub_and_fetch), 4996 BUILTIN_ROW(__sync_and_and_fetch), 4997 BUILTIN_ROW(__sync_or_and_fetch), 4998 BUILTIN_ROW(__sync_xor_and_fetch), 4999 BUILTIN_ROW(__sync_nand_and_fetch), 5000 5001 BUILTIN_ROW(__sync_val_compare_and_swap), 5002 BUILTIN_ROW(__sync_bool_compare_and_swap), 5003 BUILTIN_ROW(__sync_lock_test_and_set), 5004 BUILTIN_ROW(__sync_lock_release), 5005 BUILTIN_ROW(__sync_swap) 5006 }; 5007 #undef BUILTIN_ROW 5008 5009 // Determine the index of the size. 5010 unsigned SizeIndex; 5011 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5012 case 1: SizeIndex = 0; break; 5013 case 2: SizeIndex = 1; break; 5014 case 4: SizeIndex = 2; break; 5015 case 8: SizeIndex = 3; break; 5016 case 16: SizeIndex = 4; break; 5017 default: 5018 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5019 << FirstArg->getType() << FirstArg->getSourceRange(); 5020 return ExprError(); 5021 } 5022 5023 // Each of these builtins has one pointer argument, followed by some number of 5024 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5025 // that we ignore. Find out which row of BuiltinIndices to read from as well 5026 // as the number of fixed args. 5027 unsigned BuiltinID = FDecl->getBuiltinID(); 5028 unsigned BuiltinIndex, NumFixed = 1; 5029 bool WarnAboutSemanticsChange = false; 5030 switch (BuiltinID) { 5031 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5032 case Builtin::BI__sync_fetch_and_add: 5033 case Builtin::BI__sync_fetch_and_add_1: 5034 case Builtin::BI__sync_fetch_and_add_2: 5035 case Builtin::BI__sync_fetch_and_add_4: 5036 case Builtin::BI__sync_fetch_and_add_8: 5037 case Builtin::BI__sync_fetch_and_add_16: 5038 BuiltinIndex = 0; 5039 break; 5040 5041 case Builtin::BI__sync_fetch_and_sub: 5042 case Builtin::BI__sync_fetch_and_sub_1: 5043 case Builtin::BI__sync_fetch_and_sub_2: 5044 case Builtin::BI__sync_fetch_and_sub_4: 5045 case Builtin::BI__sync_fetch_and_sub_8: 5046 case Builtin::BI__sync_fetch_and_sub_16: 5047 BuiltinIndex = 1; 5048 break; 5049 5050 case Builtin::BI__sync_fetch_and_or: 5051 case Builtin::BI__sync_fetch_and_or_1: 5052 case Builtin::BI__sync_fetch_and_or_2: 5053 case Builtin::BI__sync_fetch_and_or_4: 5054 case Builtin::BI__sync_fetch_and_or_8: 5055 case Builtin::BI__sync_fetch_and_or_16: 5056 BuiltinIndex = 2; 5057 break; 5058 5059 case Builtin::BI__sync_fetch_and_and: 5060 case Builtin::BI__sync_fetch_and_and_1: 5061 case Builtin::BI__sync_fetch_and_and_2: 5062 case Builtin::BI__sync_fetch_and_and_4: 5063 case Builtin::BI__sync_fetch_and_and_8: 5064 case Builtin::BI__sync_fetch_and_and_16: 5065 BuiltinIndex = 3; 5066 break; 5067 5068 case Builtin::BI__sync_fetch_and_xor: 5069 case Builtin::BI__sync_fetch_and_xor_1: 5070 case Builtin::BI__sync_fetch_and_xor_2: 5071 case Builtin::BI__sync_fetch_and_xor_4: 5072 case Builtin::BI__sync_fetch_and_xor_8: 5073 case Builtin::BI__sync_fetch_and_xor_16: 5074 BuiltinIndex = 4; 5075 break; 5076 5077 case Builtin::BI__sync_fetch_and_nand: 5078 case Builtin::BI__sync_fetch_and_nand_1: 5079 case Builtin::BI__sync_fetch_and_nand_2: 5080 case Builtin::BI__sync_fetch_and_nand_4: 5081 case Builtin::BI__sync_fetch_and_nand_8: 5082 case Builtin::BI__sync_fetch_and_nand_16: 5083 BuiltinIndex = 5; 5084 WarnAboutSemanticsChange = true; 5085 break; 5086 5087 case Builtin::BI__sync_add_and_fetch: 5088 case Builtin::BI__sync_add_and_fetch_1: 5089 case Builtin::BI__sync_add_and_fetch_2: 5090 case Builtin::BI__sync_add_and_fetch_4: 5091 case Builtin::BI__sync_add_and_fetch_8: 5092 case Builtin::BI__sync_add_and_fetch_16: 5093 BuiltinIndex = 6; 5094 break; 5095 5096 case Builtin::BI__sync_sub_and_fetch: 5097 case Builtin::BI__sync_sub_and_fetch_1: 5098 case Builtin::BI__sync_sub_and_fetch_2: 5099 case Builtin::BI__sync_sub_and_fetch_4: 5100 case Builtin::BI__sync_sub_and_fetch_8: 5101 case Builtin::BI__sync_sub_and_fetch_16: 5102 BuiltinIndex = 7; 5103 break; 5104 5105 case Builtin::BI__sync_and_and_fetch: 5106 case Builtin::BI__sync_and_and_fetch_1: 5107 case Builtin::BI__sync_and_and_fetch_2: 5108 case Builtin::BI__sync_and_and_fetch_4: 5109 case Builtin::BI__sync_and_and_fetch_8: 5110 case Builtin::BI__sync_and_and_fetch_16: 5111 BuiltinIndex = 8; 5112 break; 5113 5114 case Builtin::BI__sync_or_and_fetch: 5115 case Builtin::BI__sync_or_and_fetch_1: 5116 case Builtin::BI__sync_or_and_fetch_2: 5117 case Builtin::BI__sync_or_and_fetch_4: 5118 case Builtin::BI__sync_or_and_fetch_8: 5119 case Builtin::BI__sync_or_and_fetch_16: 5120 BuiltinIndex = 9; 5121 break; 5122 5123 case Builtin::BI__sync_xor_and_fetch: 5124 case Builtin::BI__sync_xor_and_fetch_1: 5125 case Builtin::BI__sync_xor_and_fetch_2: 5126 case Builtin::BI__sync_xor_and_fetch_4: 5127 case Builtin::BI__sync_xor_and_fetch_8: 5128 case Builtin::BI__sync_xor_and_fetch_16: 5129 BuiltinIndex = 10; 5130 break; 5131 5132 case Builtin::BI__sync_nand_and_fetch: 5133 case Builtin::BI__sync_nand_and_fetch_1: 5134 case Builtin::BI__sync_nand_and_fetch_2: 5135 case Builtin::BI__sync_nand_and_fetch_4: 5136 case Builtin::BI__sync_nand_and_fetch_8: 5137 case Builtin::BI__sync_nand_and_fetch_16: 5138 BuiltinIndex = 11; 5139 WarnAboutSemanticsChange = true; 5140 break; 5141 5142 case Builtin::BI__sync_val_compare_and_swap: 5143 case Builtin::BI__sync_val_compare_and_swap_1: 5144 case Builtin::BI__sync_val_compare_and_swap_2: 5145 case Builtin::BI__sync_val_compare_and_swap_4: 5146 case Builtin::BI__sync_val_compare_and_swap_8: 5147 case Builtin::BI__sync_val_compare_and_swap_16: 5148 BuiltinIndex = 12; 5149 NumFixed = 2; 5150 break; 5151 5152 case Builtin::BI__sync_bool_compare_and_swap: 5153 case Builtin::BI__sync_bool_compare_and_swap_1: 5154 case Builtin::BI__sync_bool_compare_and_swap_2: 5155 case Builtin::BI__sync_bool_compare_and_swap_4: 5156 case Builtin::BI__sync_bool_compare_and_swap_8: 5157 case Builtin::BI__sync_bool_compare_and_swap_16: 5158 BuiltinIndex = 13; 5159 NumFixed = 2; 5160 ResultType = Context.BoolTy; 5161 break; 5162 5163 case Builtin::BI__sync_lock_test_and_set: 5164 case Builtin::BI__sync_lock_test_and_set_1: 5165 case Builtin::BI__sync_lock_test_and_set_2: 5166 case Builtin::BI__sync_lock_test_and_set_4: 5167 case Builtin::BI__sync_lock_test_and_set_8: 5168 case Builtin::BI__sync_lock_test_and_set_16: 5169 BuiltinIndex = 14; 5170 break; 5171 5172 case Builtin::BI__sync_lock_release: 5173 case Builtin::BI__sync_lock_release_1: 5174 case Builtin::BI__sync_lock_release_2: 5175 case Builtin::BI__sync_lock_release_4: 5176 case Builtin::BI__sync_lock_release_8: 5177 case Builtin::BI__sync_lock_release_16: 5178 BuiltinIndex = 15; 5179 NumFixed = 0; 5180 ResultType = Context.VoidTy; 5181 break; 5182 5183 case Builtin::BI__sync_swap: 5184 case Builtin::BI__sync_swap_1: 5185 case Builtin::BI__sync_swap_2: 5186 case Builtin::BI__sync_swap_4: 5187 case Builtin::BI__sync_swap_8: 5188 case Builtin::BI__sync_swap_16: 5189 BuiltinIndex = 16; 5190 break; 5191 } 5192 5193 // Now that we know how many fixed arguments we expect, first check that we 5194 // have at least that many. 5195 if (TheCall->getNumArgs() < 1+NumFixed) { 5196 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5197 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5198 << Callee->getSourceRange(); 5199 return ExprError(); 5200 } 5201 5202 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5203 << Callee->getSourceRange(); 5204 5205 if (WarnAboutSemanticsChange) { 5206 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5207 << Callee->getSourceRange(); 5208 } 5209 5210 // Get the decl for the concrete builtin from this, we can tell what the 5211 // concrete integer type we should convert to is. 5212 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5213 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5214 FunctionDecl *NewBuiltinDecl; 5215 if (NewBuiltinID == BuiltinID) 5216 NewBuiltinDecl = FDecl; 5217 else { 5218 // Perform builtin lookup to avoid redeclaring it. 5219 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5220 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5221 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5222 assert(Res.getFoundDecl()); 5223 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5224 if (!NewBuiltinDecl) 5225 return ExprError(); 5226 } 5227 5228 // The first argument --- the pointer --- has a fixed type; we 5229 // deduce the types of the rest of the arguments accordingly. Walk 5230 // the remaining arguments, converting them to the deduced value type. 5231 for (unsigned i = 0; i != NumFixed; ++i) { 5232 ExprResult Arg = TheCall->getArg(i+1); 5233 5234 // GCC does an implicit conversion to the pointer or integer ValType. This 5235 // can fail in some cases (1i -> int**), check for this error case now. 5236 // Initialize the argument. 5237 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5238 ValType, /*consume*/ false); 5239 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5240 if (Arg.isInvalid()) 5241 return ExprError(); 5242 5243 // Okay, we have something that *can* be converted to the right type. Check 5244 // to see if there is a potentially weird extension going on here. This can 5245 // happen when you do an atomic operation on something like an char* and 5246 // pass in 42. The 42 gets converted to char. This is even more strange 5247 // for things like 45.123 -> char, etc. 5248 // FIXME: Do this check. 5249 TheCall->setArg(i+1, Arg.get()); 5250 } 5251 5252 // Create a new DeclRefExpr to refer to the new decl. 5253 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5254 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5255 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5256 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5257 5258 // Set the callee in the CallExpr. 5259 // FIXME: This loses syntactic information. 5260 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5261 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5262 CK_BuiltinFnToFnPtr); 5263 TheCall->setCallee(PromotedCall.get()); 5264 5265 // Change the result type of the call to match the original value type. This 5266 // is arbitrary, but the codegen for these builtins ins design to handle it 5267 // gracefully. 5268 TheCall->setType(ResultType); 5269 5270 return TheCallResult; 5271 } 5272 5273 /// SemaBuiltinNontemporalOverloaded - We have a call to 5274 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5275 /// overloaded function based on the pointer type of its last argument. 5276 /// 5277 /// This function goes through and does final semantic checking for these 5278 /// builtins. 5279 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5280 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5281 DeclRefExpr *DRE = 5282 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5283 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5284 unsigned BuiltinID = FDecl->getBuiltinID(); 5285 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5286 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5287 "Unexpected nontemporal load/store builtin!"); 5288 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5289 unsigned numArgs = isStore ? 2 : 1; 5290 5291 // Ensure that we have the proper number of arguments. 5292 if (checkArgCount(*this, TheCall, numArgs)) 5293 return ExprError(); 5294 5295 // Inspect the last argument of the nontemporal builtin. This should always 5296 // be a pointer type, from which we imply the type of the memory access. 5297 // Because it is a pointer type, we don't have to worry about any implicit 5298 // casts here. 5299 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5300 ExprResult PointerArgResult = 5301 DefaultFunctionArrayLvalueConversion(PointerArg); 5302 5303 if (PointerArgResult.isInvalid()) 5304 return ExprError(); 5305 PointerArg = PointerArgResult.get(); 5306 TheCall->setArg(numArgs - 1, PointerArg); 5307 5308 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5309 if (!pointerType) { 5310 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5311 << PointerArg->getType() << PointerArg->getSourceRange(); 5312 return ExprError(); 5313 } 5314 5315 QualType ValType = pointerType->getPointeeType(); 5316 5317 // Strip any qualifiers off ValType. 5318 ValType = ValType.getUnqualifiedType(); 5319 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5320 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5321 !ValType->isVectorType()) { 5322 Diag(DRE->getBeginLoc(), 5323 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5324 << PointerArg->getType() << PointerArg->getSourceRange(); 5325 return ExprError(); 5326 } 5327 5328 if (!isStore) { 5329 TheCall->setType(ValType); 5330 return TheCallResult; 5331 } 5332 5333 ExprResult ValArg = TheCall->getArg(0); 5334 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5335 Context, ValType, /*consume*/ false); 5336 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5337 if (ValArg.isInvalid()) 5338 return ExprError(); 5339 5340 TheCall->setArg(0, ValArg.get()); 5341 TheCall->setType(Context.VoidTy); 5342 return TheCallResult; 5343 } 5344 5345 /// CheckObjCString - Checks that the argument to the builtin 5346 /// CFString constructor is correct 5347 /// Note: It might also make sense to do the UTF-16 conversion here (would 5348 /// simplify the backend). 5349 bool Sema::CheckObjCString(Expr *Arg) { 5350 Arg = Arg->IgnoreParenCasts(); 5351 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5352 5353 if (!Literal || !Literal->isAscii()) { 5354 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5355 << Arg->getSourceRange(); 5356 return true; 5357 } 5358 5359 if (Literal->containsNonAsciiOrNull()) { 5360 StringRef String = Literal->getString(); 5361 unsigned NumBytes = String.size(); 5362 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5363 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5364 llvm::UTF16 *ToPtr = &ToBuf[0]; 5365 5366 llvm::ConversionResult Result = 5367 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5368 ToPtr + NumBytes, llvm::strictConversion); 5369 // Check for conversion failure. 5370 if (Result != llvm::conversionOK) 5371 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5372 << Arg->getSourceRange(); 5373 } 5374 return false; 5375 } 5376 5377 /// CheckObjCString - Checks that the format string argument to the os_log() 5378 /// and os_trace() functions is correct, and converts it to const char *. 5379 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5380 Arg = Arg->IgnoreParenCasts(); 5381 auto *Literal = dyn_cast<StringLiteral>(Arg); 5382 if (!Literal) { 5383 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5384 Literal = ObjcLiteral->getString(); 5385 } 5386 } 5387 5388 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5389 return ExprError( 5390 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5391 << Arg->getSourceRange()); 5392 } 5393 5394 ExprResult Result(Literal); 5395 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5396 InitializedEntity Entity = 5397 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5398 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5399 return Result; 5400 } 5401 5402 /// Check that the user is calling the appropriate va_start builtin for the 5403 /// target and calling convention. 5404 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5405 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5406 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5407 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64; 5408 bool IsWindows = TT.isOSWindows(); 5409 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5410 if (IsX64 || IsAArch64) { 5411 CallingConv CC = CC_C; 5412 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5413 CC = FD->getType()->getAs<FunctionType>()->getCallConv(); 5414 if (IsMSVAStart) { 5415 // Don't allow this in System V ABI functions. 5416 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5417 return S.Diag(Fn->getBeginLoc(), 5418 diag::err_ms_va_start_used_in_sysv_function); 5419 } else { 5420 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5421 // On x64 Windows, don't allow this in System V ABI functions. 5422 // (Yes, that means there's no corresponding way to support variadic 5423 // System V ABI functions on Windows.) 5424 if ((IsWindows && CC == CC_X86_64SysV) || 5425 (!IsWindows && CC == CC_Win64)) 5426 return S.Diag(Fn->getBeginLoc(), 5427 diag::err_va_start_used_in_wrong_abi_function) 5428 << !IsWindows; 5429 } 5430 return false; 5431 } 5432 5433 if (IsMSVAStart) 5434 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5435 return false; 5436 } 5437 5438 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5439 ParmVarDecl **LastParam = nullptr) { 5440 // Determine whether the current function, block, or obj-c method is variadic 5441 // and get its parameter list. 5442 bool IsVariadic = false; 5443 ArrayRef<ParmVarDecl *> Params; 5444 DeclContext *Caller = S.CurContext; 5445 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5446 IsVariadic = Block->isVariadic(); 5447 Params = Block->parameters(); 5448 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5449 IsVariadic = FD->isVariadic(); 5450 Params = FD->parameters(); 5451 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5452 IsVariadic = MD->isVariadic(); 5453 // FIXME: This isn't correct for methods (results in bogus warning). 5454 Params = MD->parameters(); 5455 } else if (isa<CapturedDecl>(Caller)) { 5456 // We don't support va_start in a CapturedDecl. 5457 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5458 return true; 5459 } else { 5460 // This must be some other declcontext that parses exprs. 5461 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5462 return true; 5463 } 5464 5465 if (!IsVariadic) { 5466 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5467 return true; 5468 } 5469 5470 if (LastParam) 5471 *LastParam = Params.empty() ? nullptr : Params.back(); 5472 5473 return false; 5474 } 5475 5476 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5477 /// for validity. Emit an error and return true on failure; return false 5478 /// on success. 5479 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5480 Expr *Fn = TheCall->getCallee(); 5481 5482 if (checkVAStartABI(*this, BuiltinID, Fn)) 5483 return true; 5484 5485 if (TheCall->getNumArgs() > 2) { 5486 Diag(TheCall->getArg(2)->getBeginLoc(), 5487 diag::err_typecheck_call_too_many_args) 5488 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5489 << Fn->getSourceRange() 5490 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5491 (*(TheCall->arg_end() - 1))->getEndLoc()); 5492 return true; 5493 } 5494 5495 if (TheCall->getNumArgs() < 2) { 5496 return Diag(TheCall->getEndLoc(), 5497 diag::err_typecheck_call_too_few_args_at_least) 5498 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5499 } 5500 5501 // Type-check the first argument normally. 5502 if (checkBuiltinArgument(*this, TheCall, 0)) 5503 return true; 5504 5505 // Check that the current function is variadic, and get its last parameter. 5506 ParmVarDecl *LastParam; 5507 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5508 return true; 5509 5510 // Verify that the second argument to the builtin is the last argument of the 5511 // current function or method. 5512 bool SecondArgIsLastNamedArgument = false; 5513 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5514 5515 // These are valid if SecondArgIsLastNamedArgument is false after the next 5516 // block. 5517 QualType Type; 5518 SourceLocation ParamLoc; 5519 bool IsCRegister = false; 5520 5521 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5522 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5523 SecondArgIsLastNamedArgument = PV == LastParam; 5524 5525 Type = PV->getType(); 5526 ParamLoc = PV->getLocation(); 5527 IsCRegister = 5528 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5529 } 5530 } 5531 5532 if (!SecondArgIsLastNamedArgument) 5533 Diag(TheCall->getArg(1)->getBeginLoc(), 5534 diag::warn_second_arg_of_va_start_not_last_named_param); 5535 else if (IsCRegister || Type->isReferenceType() || 5536 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5537 // Promotable integers are UB, but enumerations need a bit of 5538 // extra checking to see what their promotable type actually is. 5539 if (!Type->isPromotableIntegerType()) 5540 return false; 5541 if (!Type->isEnumeralType()) 5542 return true; 5543 const EnumDecl *ED = Type->getAs<EnumType>()->getDecl(); 5544 return !(ED && 5545 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5546 }()) { 5547 unsigned Reason = 0; 5548 if (Type->isReferenceType()) Reason = 1; 5549 else if (IsCRegister) Reason = 2; 5550 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5551 Diag(ParamLoc, diag::note_parameter_type) << Type; 5552 } 5553 5554 TheCall->setType(Context.VoidTy); 5555 return false; 5556 } 5557 5558 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5559 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5560 // const char *named_addr); 5561 5562 Expr *Func = Call->getCallee(); 5563 5564 if (Call->getNumArgs() < 3) 5565 return Diag(Call->getEndLoc(), 5566 diag::err_typecheck_call_too_few_args_at_least) 5567 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5568 5569 // Type-check the first argument normally. 5570 if (checkBuiltinArgument(*this, Call, 0)) 5571 return true; 5572 5573 // Check that the current function is variadic. 5574 if (checkVAStartIsInVariadicFunction(*this, Func)) 5575 return true; 5576 5577 // __va_start on Windows does not validate the parameter qualifiers 5578 5579 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5580 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5581 5582 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5583 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5584 5585 const QualType &ConstCharPtrTy = 5586 Context.getPointerType(Context.CharTy.withConst()); 5587 if (!Arg1Ty->isPointerType() || 5588 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5589 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5590 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5591 << 0 /* qualifier difference */ 5592 << 3 /* parameter mismatch */ 5593 << 2 << Arg1->getType() << ConstCharPtrTy; 5594 5595 const QualType SizeTy = Context.getSizeType(); 5596 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5597 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5598 << Arg2->getType() << SizeTy << 1 /* different class */ 5599 << 0 /* qualifier difference */ 5600 << 3 /* parameter mismatch */ 5601 << 3 << Arg2->getType() << SizeTy; 5602 5603 return false; 5604 } 5605 5606 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5607 /// friends. This is declared to take (...), so we have to check everything. 5608 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5609 if (TheCall->getNumArgs() < 2) 5610 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5611 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5612 if (TheCall->getNumArgs() > 2) 5613 return Diag(TheCall->getArg(2)->getBeginLoc(), 5614 diag::err_typecheck_call_too_many_args) 5615 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5616 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5617 (*(TheCall->arg_end() - 1))->getEndLoc()); 5618 5619 ExprResult OrigArg0 = TheCall->getArg(0); 5620 ExprResult OrigArg1 = TheCall->getArg(1); 5621 5622 // Do standard promotions between the two arguments, returning their common 5623 // type. 5624 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 5625 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5626 return true; 5627 5628 // Make sure any conversions are pushed back into the call; this is 5629 // type safe since unordered compare builtins are declared as "_Bool 5630 // foo(...)". 5631 TheCall->setArg(0, OrigArg0.get()); 5632 TheCall->setArg(1, OrigArg1.get()); 5633 5634 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5635 return false; 5636 5637 // If the common type isn't a real floating type, then the arguments were 5638 // invalid for this operation. 5639 if (Res.isNull() || !Res->isRealFloatingType()) 5640 return Diag(OrigArg0.get()->getBeginLoc(), 5641 diag::err_typecheck_call_invalid_ordered_compare) 5642 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5643 << SourceRange(OrigArg0.get()->getBeginLoc(), 5644 OrigArg1.get()->getEndLoc()); 5645 5646 return false; 5647 } 5648 5649 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5650 /// __builtin_isnan and friends. This is declared to take (...), so we have 5651 /// to check everything. We expect the last argument to be a floating point 5652 /// value. 5653 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5654 if (TheCall->getNumArgs() < NumArgs) 5655 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5656 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5657 if (TheCall->getNumArgs() > NumArgs) 5658 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5659 diag::err_typecheck_call_too_many_args) 5660 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5661 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5662 (*(TheCall->arg_end() - 1))->getEndLoc()); 5663 5664 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5665 5666 if (OrigArg->isTypeDependent()) 5667 return false; 5668 5669 // This operation requires a non-_Complex floating-point number. 5670 if (!OrigArg->getType()->isRealFloatingType()) 5671 return Diag(OrigArg->getBeginLoc(), 5672 diag::err_typecheck_call_invalid_unary_fp) 5673 << OrigArg->getType() << OrigArg->getSourceRange(); 5674 5675 // If this is an implicit conversion from float -> float, double, or 5676 // long double, remove it. 5677 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { 5678 // Only remove standard FloatCasts, leaving other casts inplace 5679 if (Cast->getCastKind() == CK_FloatingCast) { 5680 Expr *CastArg = Cast->getSubExpr(); 5681 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { 5682 assert( 5683 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) || 5684 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) || 5685 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) && 5686 "promotion from float to either float, double, or long double is " 5687 "the only expected cast here"); 5688 Cast->setSubExpr(nullptr); 5689 TheCall->setArg(NumArgs-1, CastArg); 5690 } 5691 } 5692 } 5693 5694 return false; 5695 } 5696 5697 // Customized Sema Checking for VSX builtins that have the following signature: 5698 // vector [...] builtinName(vector [...], vector [...], const int); 5699 // Which takes the same type of vectors (any legal vector type) for the first 5700 // two arguments and takes compile time constant for the third argument. 5701 // Example builtins are : 5702 // vector double vec_xxpermdi(vector double, vector double, int); 5703 // vector short vec_xxsldwi(vector short, vector short, int); 5704 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5705 unsigned ExpectedNumArgs = 3; 5706 if (TheCall->getNumArgs() < ExpectedNumArgs) 5707 return Diag(TheCall->getEndLoc(), 5708 diag::err_typecheck_call_too_few_args_at_least) 5709 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5710 << TheCall->getSourceRange(); 5711 5712 if (TheCall->getNumArgs() > ExpectedNumArgs) 5713 return Diag(TheCall->getEndLoc(), 5714 diag::err_typecheck_call_too_many_args_at_most) 5715 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5716 << TheCall->getSourceRange(); 5717 5718 // Check the third argument is a compile time constant 5719 llvm::APSInt Value; 5720 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5721 return Diag(TheCall->getBeginLoc(), 5722 diag::err_vsx_builtin_nonconstant_argument) 5723 << 3 /* argument index */ << TheCall->getDirectCallee() 5724 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5725 TheCall->getArg(2)->getEndLoc()); 5726 5727 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5728 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5729 5730 // Check the type of argument 1 and argument 2 are vectors. 5731 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5732 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5733 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5734 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5735 << TheCall->getDirectCallee() 5736 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5737 TheCall->getArg(1)->getEndLoc()); 5738 } 5739 5740 // Check the first two arguments are the same type. 5741 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5742 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5743 << TheCall->getDirectCallee() 5744 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5745 TheCall->getArg(1)->getEndLoc()); 5746 } 5747 5748 // When default clang type checking is turned off and the customized type 5749 // checking is used, the returning type of the function must be explicitly 5750 // set. Otherwise it is _Bool by default. 5751 TheCall->setType(Arg1Ty); 5752 5753 return false; 5754 } 5755 5756 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5757 // This is declared to take (...), so we have to check everything. 5758 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5759 if (TheCall->getNumArgs() < 2) 5760 return ExprError(Diag(TheCall->getEndLoc(), 5761 diag::err_typecheck_call_too_few_args_at_least) 5762 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5763 << TheCall->getSourceRange()); 5764 5765 // Determine which of the following types of shufflevector we're checking: 5766 // 1) unary, vector mask: (lhs, mask) 5767 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5768 QualType resType = TheCall->getArg(0)->getType(); 5769 unsigned numElements = 0; 5770 5771 if (!TheCall->getArg(0)->isTypeDependent() && 5772 !TheCall->getArg(1)->isTypeDependent()) { 5773 QualType LHSType = TheCall->getArg(0)->getType(); 5774 QualType RHSType = TheCall->getArg(1)->getType(); 5775 5776 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5777 return ExprError( 5778 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5779 << TheCall->getDirectCallee() 5780 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5781 TheCall->getArg(1)->getEndLoc())); 5782 5783 numElements = LHSType->getAs<VectorType>()->getNumElements(); 5784 unsigned numResElements = TheCall->getNumArgs() - 2; 5785 5786 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5787 // with mask. If so, verify that RHS is an integer vector type with the 5788 // same number of elts as lhs. 5789 if (TheCall->getNumArgs() == 2) { 5790 if (!RHSType->hasIntegerRepresentation() || 5791 RHSType->getAs<VectorType>()->getNumElements() != numElements) 5792 return ExprError(Diag(TheCall->getBeginLoc(), 5793 diag::err_vec_builtin_incompatible_vector) 5794 << TheCall->getDirectCallee() 5795 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5796 TheCall->getArg(1)->getEndLoc())); 5797 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5798 return ExprError(Diag(TheCall->getBeginLoc(), 5799 diag::err_vec_builtin_incompatible_vector) 5800 << TheCall->getDirectCallee() 5801 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5802 TheCall->getArg(1)->getEndLoc())); 5803 } else if (numElements != numResElements) { 5804 QualType eltType = LHSType->getAs<VectorType>()->getElementType(); 5805 resType = Context.getVectorType(eltType, numResElements, 5806 VectorType::GenericVector); 5807 } 5808 } 5809 5810 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5811 if (TheCall->getArg(i)->isTypeDependent() || 5812 TheCall->getArg(i)->isValueDependent()) 5813 continue; 5814 5815 llvm::APSInt Result(32); 5816 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5817 return ExprError(Diag(TheCall->getBeginLoc(), 5818 diag::err_shufflevector_nonconstant_argument) 5819 << TheCall->getArg(i)->getSourceRange()); 5820 5821 // Allow -1 which will be translated to undef in the IR. 5822 if (Result.isSigned() && Result.isAllOnesValue()) 5823 continue; 5824 5825 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5826 return ExprError(Diag(TheCall->getBeginLoc(), 5827 diag::err_shufflevector_argument_too_large) 5828 << TheCall->getArg(i)->getSourceRange()); 5829 } 5830 5831 SmallVector<Expr*, 32> exprs; 5832 5833 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5834 exprs.push_back(TheCall->getArg(i)); 5835 TheCall->setArg(i, nullptr); 5836 } 5837 5838 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5839 TheCall->getCallee()->getBeginLoc(), 5840 TheCall->getRParenLoc()); 5841 } 5842 5843 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5844 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5845 SourceLocation BuiltinLoc, 5846 SourceLocation RParenLoc) { 5847 ExprValueKind VK = VK_RValue; 5848 ExprObjectKind OK = OK_Ordinary; 5849 QualType DstTy = TInfo->getType(); 5850 QualType SrcTy = E->getType(); 5851 5852 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5853 return ExprError(Diag(BuiltinLoc, 5854 diag::err_convertvector_non_vector) 5855 << E->getSourceRange()); 5856 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5857 return ExprError(Diag(BuiltinLoc, 5858 diag::err_convertvector_non_vector_type)); 5859 5860 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5861 unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); 5862 unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); 5863 if (SrcElts != DstElts) 5864 return ExprError(Diag(BuiltinLoc, 5865 diag::err_convertvector_incompatible_vector) 5866 << E->getSourceRange()); 5867 } 5868 5869 return new (Context) 5870 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5871 } 5872 5873 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5874 // This is declared to take (const void*, ...) and can take two 5875 // optional constant int args. 5876 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5877 unsigned NumArgs = TheCall->getNumArgs(); 5878 5879 if (NumArgs > 3) 5880 return Diag(TheCall->getEndLoc(), 5881 diag::err_typecheck_call_too_many_args_at_most) 5882 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5883 5884 // Argument 0 is checked for us and the remaining arguments must be 5885 // constant integers. 5886 for (unsigned i = 1; i != NumArgs; ++i) 5887 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5888 return true; 5889 5890 return false; 5891 } 5892 5893 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5894 // __assume does not evaluate its arguments, and should warn if its argument 5895 // has side effects. 5896 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5897 Expr *Arg = TheCall->getArg(0); 5898 if (Arg->isInstantiationDependent()) return false; 5899 5900 if (Arg->HasSideEffects(Context)) 5901 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 5902 << Arg->getSourceRange() 5903 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 5904 5905 return false; 5906 } 5907 5908 /// Handle __builtin_alloca_with_align. This is declared 5909 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 5910 /// than 8. 5911 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 5912 // The alignment must be a constant integer. 5913 Expr *Arg = TheCall->getArg(1); 5914 5915 // We can't check the value of a dependent argument. 5916 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5917 if (const auto *UE = 5918 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 5919 if (UE->getKind() == UETT_AlignOf || 5920 UE->getKind() == UETT_PreferredAlignOf) 5921 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 5922 << Arg->getSourceRange(); 5923 5924 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 5925 5926 if (!Result.isPowerOf2()) 5927 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5928 << Arg->getSourceRange(); 5929 5930 if (Result < Context.getCharWidth()) 5931 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 5932 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 5933 5934 if (Result > std::numeric_limits<int32_t>::max()) 5935 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 5936 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 5937 } 5938 5939 return false; 5940 } 5941 5942 /// Handle __builtin_assume_aligned. This is declared 5943 /// as (const void*, size_t, ...) and can take one optional constant int arg. 5944 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 5945 unsigned NumArgs = TheCall->getNumArgs(); 5946 5947 if (NumArgs > 3) 5948 return Diag(TheCall->getEndLoc(), 5949 diag::err_typecheck_call_too_many_args_at_most) 5950 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5951 5952 // The alignment must be a constant integer. 5953 Expr *Arg = TheCall->getArg(1); 5954 5955 // We can't check the value of a dependent argument. 5956 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5957 llvm::APSInt Result; 5958 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 5959 return true; 5960 5961 if (!Result.isPowerOf2()) 5962 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5963 << Arg->getSourceRange(); 5964 } 5965 5966 if (NumArgs > 2) { 5967 ExprResult Arg(TheCall->getArg(2)); 5968 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5969 Context.getSizeType(), false); 5970 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5971 if (Arg.isInvalid()) return true; 5972 TheCall->setArg(2, Arg.get()); 5973 } 5974 5975 return false; 5976 } 5977 5978 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 5979 unsigned BuiltinID = 5980 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 5981 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 5982 5983 unsigned NumArgs = TheCall->getNumArgs(); 5984 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 5985 if (NumArgs < NumRequiredArgs) { 5986 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5987 << 0 /* function call */ << NumRequiredArgs << NumArgs 5988 << TheCall->getSourceRange(); 5989 } 5990 if (NumArgs >= NumRequiredArgs + 0x100) { 5991 return Diag(TheCall->getEndLoc(), 5992 diag::err_typecheck_call_too_many_args_at_most) 5993 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 5994 << TheCall->getSourceRange(); 5995 } 5996 unsigned i = 0; 5997 5998 // For formatting call, check buffer arg. 5999 if (!IsSizeCall) { 6000 ExprResult Arg(TheCall->getArg(i)); 6001 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6002 Context, Context.VoidPtrTy, false); 6003 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6004 if (Arg.isInvalid()) 6005 return true; 6006 TheCall->setArg(i, Arg.get()); 6007 i++; 6008 } 6009 6010 // Check string literal arg. 6011 unsigned FormatIdx = i; 6012 { 6013 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6014 if (Arg.isInvalid()) 6015 return true; 6016 TheCall->setArg(i, Arg.get()); 6017 i++; 6018 } 6019 6020 // Make sure variadic args are scalar. 6021 unsigned FirstDataArg = i; 6022 while (i < NumArgs) { 6023 ExprResult Arg = DefaultVariadicArgumentPromotion( 6024 TheCall->getArg(i), VariadicFunction, nullptr); 6025 if (Arg.isInvalid()) 6026 return true; 6027 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6028 if (ArgSize.getQuantity() >= 0x100) { 6029 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6030 << i << (int)ArgSize.getQuantity() << 0xff 6031 << TheCall->getSourceRange(); 6032 } 6033 TheCall->setArg(i, Arg.get()); 6034 i++; 6035 } 6036 6037 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6038 // call to avoid duplicate diagnostics. 6039 if (!IsSizeCall) { 6040 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6041 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6042 bool Success = CheckFormatArguments( 6043 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6044 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6045 CheckedVarArgs); 6046 if (!Success) 6047 return true; 6048 } 6049 6050 if (IsSizeCall) { 6051 TheCall->setType(Context.getSizeType()); 6052 } else { 6053 TheCall->setType(Context.VoidPtrTy); 6054 } 6055 return false; 6056 } 6057 6058 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6059 /// TheCall is a constant expression. 6060 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6061 llvm::APSInt &Result) { 6062 Expr *Arg = TheCall->getArg(ArgNum); 6063 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6064 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6065 6066 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6067 6068 if (!Arg->isIntegerConstantExpr(Result, Context)) 6069 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6070 << FDecl->getDeclName() << Arg->getSourceRange(); 6071 6072 return false; 6073 } 6074 6075 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6076 /// TheCall is a constant expression in the range [Low, High]. 6077 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6078 int Low, int High, bool RangeIsError) { 6079 if (isConstantEvaluated()) 6080 return false; 6081 llvm::APSInt Result; 6082 6083 // We can't check the value of a dependent argument. 6084 Expr *Arg = TheCall->getArg(ArgNum); 6085 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6086 return false; 6087 6088 // Check constant-ness first. 6089 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6090 return true; 6091 6092 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6093 if (RangeIsError) 6094 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6095 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6096 else 6097 // Defer the warning until we know if the code will be emitted so that 6098 // dead code can ignore this. 6099 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6100 PDiag(diag::warn_argument_invalid_range) 6101 << Result.toString(10) << Low << High 6102 << Arg->getSourceRange()); 6103 } 6104 6105 return false; 6106 } 6107 6108 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6109 /// TheCall is a constant expression is a multiple of Num.. 6110 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6111 unsigned Num) { 6112 llvm::APSInt Result; 6113 6114 // We can't check the value of a dependent argument. 6115 Expr *Arg = TheCall->getArg(ArgNum); 6116 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6117 return false; 6118 6119 // Check constant-ness first. 6120 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6121 return true; 6122 6123 if (Result.getSExtValue() % Num != 0) 6124 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6125 << Num << Arg->getSourceRange(); 6126 6127 return false; 6128 } 6129 6130 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6131 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6132 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6133 if (checkArgCount(*this, TheCall, 2)) 6134 return true; 6135 Expr *Arg0 = TheCall->getArg(0); 6136 Expr *Arg1 = TheCall->getArg(1); 6137 6138 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6139 if (FirstArg.isInvalid()) 6140 return true; 6141 QualType FirstArgType = FirstArg.get()->getType(); 6142 if (!FirstArgType->isAnyPointerType()) 6143 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6144 << "first" << FirstArgType << Arg0->getSourceRange(); 6145 TheCall->setArg(0, FirstArg.get()); 6146 6147 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6148 if (SecArg.isInvalid()) 6149 return true; 6150 QualType SecArgType = SecArg.get()->getType(); 6151 if (!SecArgType->isIntegerType()) 6152 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6153 << "second" << SecArgType << Arg1->getSourceRange(); 6154 6155 // Derive the return type from the pointer argument. 6156 TheCall->setType(FirstArgType); 6157 return false; 6158 } 6159 6160 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6161 if (checkArgCount(*this, TheCall, 2)) 6162 return true; 6163 6164 Expr *Arg0 = TheCall->getArg(0); 6165 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6166 if (FirstArg.isInvalid()) 6167 return true; 6168 QualType FirstArgType = FirstArg.get()->getType(); 6169 if (!FirstArgType->isAnyPointerType()) 6170 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6171 << "first" << FirstArgType << Arg0->getSourceRange(); 6172 TheCall->setArg(0, FirstArg.get()); 6173 6174 // Derive the return type from the pointer argument. 6175 TheCall->setType(FirstArgType); 6176 6177 // Second arg must be an constant in range [0,15] 6178 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6179 } 6180 6181 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6182 if (checkArgCount(*this, TheCall, 2)) 6183 return true; 6184 Expr *Arg0 = TheCall->getArg(0); 6185 Expr *Arg1 = TheCall->getArg(1); 6186 6187 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6188 if (FirstArg.isInvalid()) 6189 return true; 6190 QualType FirstArgType = FirstArg.get()->getType(); 6191 if (!FirstArgType->isAnyPointerType()) 6192 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6193 << "first" << FirstArgType << Arg0->getSourceRange(); 6194 6195 QualType SecArgType = Arg1->getType(); 6196 if (!SecArgType->isIntegerType()) 6197 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6198 << "second" << SecArgType << Arg1->getSourceRange(); 6199 TheCall->setType(Context.IntTy); 6200 return false; 6201 } 6202 6203 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6204 BuiltinID == AArch64::BI__builtin_arm_stg) { 6205 if (checkArgCount(*this, TheCall, 1)) 6206 return true; 6207 Expr *Arg0 = TheCall->getArg(0); 6208 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6209 if (FirstArg.isInvalid()) 6210 return true; 6211 6212 QualType FirstArgType = FirstArg.get()->getType(); 6213 if (!FirstArgType->isAnyPointerType()) 6214 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6215 << "first" << FirstArgType << Arg0->getSourceRange(); 6216 TheCall->setArg(0, FirstArg.get()); 6217 6218 // Derive the return type from the pointer argument. 6219 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6220 TheCall->setType(FirstArgType); 6221 return false; 6222 } 6223 6224 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6225 Expr *ArgA = TheCall->getArg(0); 6226 Expr *ArgB = TheCall->getArg(1); 6227 6228 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6229 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6230 6231 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6232 return true; 6233 6234 QualType ArgTypeA = ArgExprA.get()->getType(); 6235 QualType ArgTypeB = ArgExprB.get()->getType(); 6236 6237 auto isNull = [&] (Expr *E) -> bool { 6238 return E->isNullPointerConstant( 6239 Context, Expr::NPC_ValueDependentIsNotNull); }; 6240 6241 // argument should be either a pointer or null 6242 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6243 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6244 << "first" << ArgTypeA << ArgA->getSourceRange(); 6245 6246 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6247 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6248 << "second" << ArgTypeB << ArgB->getSourceRange(); 6249 6250 // Ensure Pointee types are compatible 6251 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6252 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6253 QualType pointeeA = ArgTypeA->getPointeeType(); 6254 QualType pointeeB = ArgTypeB->getPointeeType(); 6255 if (!Context.typesAreCompatible( 6256 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6257 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6258 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6259 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6260 << ArgB->getSourceRange(); 6261 } 6262 } 6263 6264 // at least one argument should be pointer type 6265 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6266 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6267 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6268 6269 if (isNull(ArgA)) // adopt type of the other pointer 6270 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6271 6272 if (isNull(ArgB)) 6273 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6274 6275 TheCall->setArg(0, ArgExprA.get()); 6276 TheCall->setArg(1, ArgExprB.get()); 6277 TheCall->setType(Context.LongLongTy); 6278 return false; 6279 } 6280 assert(false && "Unhandled ARM MTE intrinsic"); 6281 return true; 6282 } 6283 6284 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6285 /// TheCall is an ARM/AArch64 special register string literal. 6286 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6287 int ArgNum, unsigned ExpectedFieldNum, 6288 bool AllowName) { 6289 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6290 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6291 BuiltinID == ARM::BI__builtin_arm_rsr || 6292 BuiltinID == ARM::BI__builtin_arm_rsrp || 6293 BuiltinID == ARM::BI__builtin_arm_wsr || 6294 BuiltinID == ARM::BI__builtin_arm_wsrp; 6295 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6296 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6297 BuiltinID == AArch64::BI__builtin_arm_rsr || 6298 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6299 BuiltinID == AArch64::BI__builtin_arm_wsr || 6300 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6301 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6302 6303 // We can't check the value of a dependent argument. 6304 Expr *Arg = TheCall->getArg(ArgNum); 6305 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6306 return false; 6307 6308 // Check if the argument is a string literal. 6309 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6310 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6311 << Arg->getSourceRange(); 6312 6313 // Check the type of special register given. 6314 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6315 SmallVector<StringRef, 6> Fields; 6316 Reg.split(Fields, ":"); 6317 6318 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6319 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6320 << Arg->getSourceRange(); 6321 6322 // If the string is the name of a register then we cannot check that it is 6323 // valid here but if the string is of one the forms described in ACLE then we 6324 // can check that the supplied fields are integers and within the valid 6325 // ranges. 6326 if (Fields.size() > 1) { 6327 bool FiveFields = Fields.size() == 5; 6328 6329 bool ValidString = true; 6330 if (IsARMBuiltin) { 6331 ValidString &= Fields[0].startswith_lower("cp") || 6332 Fields[0].startswith_lower("p"); 6333 if (ValidString) 6334 Fields[0] = 6335 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6336 6337 ValidString &= Fields[2].startswith_lower("c"); 6338 if (ValidString) 6339 Fields[2] = Fields[2].drop_front(1); 6340 6341 if (FiveFields) { 6342 ValidString &= Fields[3].startswith_lower("c"); 6343 if (ValidString) 6344 Fields[3] = Fields[3].drop_front(1); 6345 } 6346 } 6347 6348 SmallVector<int, 5> Ranges; 6349 if (FiveFields) 6350 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6351 else 6352 Ranges.append({15, 7, 15}); 6353 6354 for (unsigned i=0; i<Fields.size(); ++i) { 6355 int IntField; 6356 ValidString &= !Fields[i].getAsInteger(10, IntField); 6357 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6358 } 6359 6360 if (!ValidString) 6361 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6362 << Arg->getSourceRange(); 6363 } else if (IsAArch64Builtin && Fields.size() == 1) { 6364 // If the register name is one of those that appear in the condition below 6365 // and the special register builtin being used is one of the write builtins, 6366 // then we require that the argument provided for writing to the register 6367 // is an integer constant expression. This is because it will be lowered to 6368 // an MSR (immediate) instruction, so we need to know the immediate at 6369 // compile time. 6370 if (TheCall->getNumArgs() != 2) 6371 return false; 6372 6373 std::string RegLower = Reg.lower(); 6374 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6375 RegLower != "pan" && RegLower != "uao") 6376 return false; 6377 6378 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6379 } 6380 6381 return false; 6382 } 6383 6384 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6385 /// This checks that the target supports __builtin_longjmp and 6386 /// that val is a constant 1. 6387 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6388 if (!Context.getTargetInfo().hasSjLjLowering()) 6389 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6390 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6391 6392 Expr *Arg = TheCall->getArg(1); 6393 llvm::APSInt Result; 6394 6395 // TODO: This is less than ideal. Overload this to take a value. 6396 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6397 return true; 6398 6399 if (Result != 1) 6400 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6401 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6402 6403 return false; 6404 } 6405 6406 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6407 /// This checks that the target supports __builtin_setjmp. 6408 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6409 if (!Context.getTargetInfo().hasSjLjLowering()) 6410 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6411 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6412 return false; 6413 } 6414 6415 namespace { 6416 6417 class UncoveredArgHandler { 6418 enum { Unknown = -1, AllCovered = -2 }; 6419 6420 signed FirstUncoveredArg = Unknown; 6421 SmallVector<const Expr *, 4> DiagnosticExprs; 6422 6423 public: 6424 UncoveredArgHandler() = default; 6425 6426 bool hasUncoveredArg() const { 6427 return (FirstUncoveredArg >= 0); 6428 } 6429 6430 unsigned getUncoveredArg() const { 6431 assert(hasUncoveredArg() && "no uncovered argument"); 6432 return FirstUncoveredArg; 6433 } 6434 6435 void setAllCovered() { 6436 // A string has been found with all arguments covered, so clear out 6437 // the diagnostics. 6438 DiagnosticExprs.clear(); 6439 FirstUncoveredArg = AllCovered; 6440 } 6441 6442 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6443 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6444 6445 // Don't update if a previous string covers all arguments. 6446 if (FirstUncoveredArg == AllCovered) 6447 return; 6448 6449 // UncoveredArgHandler tracks the highest uncovered argument index 6450 // and with it all the strings that match this index. 6451 if (NewFirstUncoveredArg == FirstUncoveredArg) 6452 DiagnosticExprs.push_back(StrExpr); 6453 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6454 DiagnosticExprs.clear(); 6455 DiagnosticExprs.push_back(StrExpr); 6456 FirstUncoveredArg = NewFirstUncoveredArg; 6457 } 6458 } 6459 6460 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6461 }; 6462 6463 enum StringLiteralCheckType { 6464 SLCT_NotALiteral, 6465 SLCT_UncheckedLiteral, 6466 SLCT_CheckedLiteral 6467 }; 6468 6469 } // namespace 6470 6471 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6472 BinaryOperatorKind BinOpKind, 6473 bool AddendIsRight) { 6474 unsigned BitWidth = Offset.getBitWidth(); 6475 unsigned AddendBitWidth = Addend.getBitWidth(); 6476 // There might be negative interim results. 6477 if (Addend.isUnsigned()) { 6478 Addend = Addend.zext(++AddendBitWidth); 6479 Addend.setIsSigned(true); 6480 } 6481 // Adjust the bit width of the APSInts. 6482 if (AddendBitWidth > BitWidth) { 6483 Offset = Offset.sext(AddendBitWidth); 6484 BitWidth = AddendBitWidth; 6485 } else if (BitWidth > AddendBitWidth) { 6486 Addend = Addend.sext(BitWidth); 6487 } 6488 6489 bool Ov = false; 6490 llvm::APSInt ResOffset = Offset; 6491 if (BinOpKind == BO_Add) 6492 ResOffset = Offset.sadd_ov(Addend, Ov); 6493 else { 6494 assert(AddendIsRight && BinOpKind == BO_Sub && 6495 "operator must be add or sub with addend on the right"); 6496 ResOffset = Offset.ssub_ov(Addend, Ov); 6497 } 6498 6499 // We add an offset to a pointer here so we should support an offset as big as 6500 // possible. 6501 if (Ov) { 6502 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6503 "index (intermediate) result too big"); 6504 Offset = Offset.sext(2 * BitWidth); 6505 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6506 return; 6507 } 6508 6509 Offset = ResOffset; 6510 } 6511 6512 namespace { 6513 6514 // This is a wrapper class around StringLiteral to support offsetted string 6515 // literals as format strings. It takes the offset into account when returning 6516 // the string and its length or the source locations to display notes correctly. 6517 class FormatStringLiteral { 6518 const StringLiteral *FExpr; 6519 int64_t Offset; 6520 6521 public: 6522 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6523 : FExpr(fexpr), Offset(Offset) {} 6524 6525 StringRef getString() const { 6526 return FExpr->getString().drop_front(Offset); 6527 } 6528 6529 unsigned getByteLength() const { 6530 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6531 } 6532 6533 unsigned getLength() const { return FExpr->getLength() - Offset; } 6534 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6535 6536 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6537 6538 QualType getType() const { return FExpr->getType(); } 6539 6540 bool isAscii() const { return FExpr->isAscii(); } 6541 bool isWide() const { return FExpr->isWide(); } 6542 bool isUTF8() const { return FExpr->isUTF8(); } 6543 bool isUTF16() const { return FExpr->isUTF16(); } 6544 bool isUTF32() const { return FExpr->isUTF32(); } 6545 bool isPascal() const { return FExpr->isPascal(); } 6546 6547 SourceLocation getLocationOfByte( 6548 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6549 const TargetInfo &Target, unsigned *StartToken = nullptr, 6550 unsigned *StartTokenByteOffset = nullptr) const { 6551 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6552 StartToken, StartTokenByteOffset); 6553 } 6554 6555 SourceLocation getBeginLoc() const LLVM_READONLY { 6556 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6557 } 6558 6559 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6560 }; 6561 6562 } // namespace 6563 6564 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6565 const Expr *OrigFormatExpr, 6566 ArrayRef<const Expr *> Args, 6567 bool HasVAListArg, unsigned format_idx, 6568 unsigned firstDataArg, 6569 Sema::FormatStringType Type, 6570 bool inFunctionCall, 6571 Sema::VariadicCallType CallType, 6572 llvm::SmallBitVector &CheckedVarArgs, 6573 UncoveredArgHandler &UncoveredArg); 6574 6575 // Determine if an expression is a string literal or constant string. 6576 // If this function returns false on the arguments to a function expecting a 6577 // format string, we will usually need to emit a warning. 6578 // True string literals are then checked by CheckFormatString. 6579 static StringLiteralCheckType 6580 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6581 bool HasVAListArg, unsigned format_idx, 6582 unsigned firstDataArg, Sema::FormatStringType Type, 6583 Sema::VariadicCallType CallType, bool InFunctionCall, 6584 llvm::SmallBitVector &CheckedVarArgs, 6585 UncoveredArgHandler &UncoveredArg, 6586 llvm::APSInt Offset) { 6587 if (S.isConstantEvaluated()) 6588 return SLCT_NotALiteral; 6589 tryAgain: 6590 assert(Offset.isSigned() && "invalid offset"); 6591 6592 if (E->isTypeDependent() || E->isValueDependent()) 6593 return SLCT_NotALiteral; 6594 6595 E = E->IgnoreParenCasts(); 6596 6597 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6598 // Technically -Wformat-nonliteral does not warn about this case. 6599 // The behavior of printf and friends in this case is implementation 6600 // dependent. Ideally if the format string cannot be null then 6601 // it should have a 'nonnull' attribute in the function prototype. 6602 return SLCT_UncheckedLiteral; 6603 6604 switch (E->getStmtClass()) { 6605 case Stmt::BinaryConditionalOperatorClass: 6606 case Stmt::ConditionalOperatorClass: { 6607 // The expression is a literal if both sub-expressions were, and it was 6608 // completely checked only if both sub-expressions were checked. 6609 const AbstractConditionalOperator *C = 6610 cast<AbstractConditionalOperator>(E); 6611 6612 // Determine whether it is necessary to check both sub-expressions, for 6613 // example, because the condition expression is a constant that can be 6614 // evaluated at compile time. 6615 bool CheckLeft = true, CheckRight = true; 6616 6617 bool Cond; 6618 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6619 S.isConstantEvaluated())) { 6620 if (Cond) 6621 CheckRight = false; 6622 else 6623 CheckLeft = false; 6624 } 6625 6626 // We need to maintain the offsets for the right and the left hand side 6627 // separately to check if every possible indexed expression is a valid 6628 // string literal. They might have different offsets for different string 6629 // literals in the end. 6630 StringLiteralCheckType Left; 6631 if (!CheckLeft) 6632 Left = SLCT_UncheckedLiteral; 6633 else { 6634 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6635 HasVAListArg, format_idx, firstDataArg, 6636 Type, CallType, InFunctionCall, 6637 CheckedVarArgs, UncoveredArg, Offset); 6638 if (Left == SLCT_NotALiteral || !CheckRight) { 6639 return Left; 6640 } 6641 } 6642 6643 StringLiteralCheckType Right = 6644 checkFormatStringExpr(S, C->getFalseExpr(), Args, 6645 HasVAListArg, format_idx, firstDataArg, 6646 Type, CallType, InFunctionCall, CheckedVarArgs, 6647 UncoveredArg, Offset); 6648 6649 return (CheckLeft && Left < Right) ? Left : Right; 6650 } 6651 6652 case Stmt::ImplicitCastExprClass: 6653 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6654 goto tryAgain; 6655 6656 case Stmt::OpaqueValueExprClass: 6657 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6658 E = src; 6659 goto tryAgain; 6660 } 6661 return SLCT_NotALiteral; 6662 6663 case Stmt::PredefinedExprClass: 6664 // While __func__, etc., are technically not string literals, they 6665 // cannot contain format specifiers and thus are not a security 6666 // liability. 6667 return SLCT_UncheckedLiteral; 6668 6669 case Stmt::DeclRefExprClass: { 6670 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6671 6672 // As an exception, do not flag errors for variables binding to 6673 // const string literals. 6674 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6675 bool isConstant = false; 6676 QualType T = DR->getType(); 6677 6678 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6679 isConstant = AT->getElementType().isConstant(S.Context); 6680 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6681 isConstant = T.isConstant(S.Context) && 6682 PT->getPointeeType().isConstant(S.Context); 6683 } else if (T->isObjCObjectPointerType()) { 6684 // In ObjC, there is usually no "const ObjectPointer" type, 6685 // so don't check if the pointee type is constant. 6686 isConstant = T.isConstant(S.Context); 6687 } 6688 6689 if (isConstant) { 6690 if (const Expr *Init = VD->getAnyInitializer()) { 6691 // Look through initializers like const char c[] = { "foo" } 6692 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6693 if (InitList->isStringLiteralInit()) 6694 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6695 } 6696 return checkFormatStringExpr(S, Init, Args, 6697 HasVAListArg, format_idx, 6698 firstDataArg, Type, CallType, 6699 /*InFunctionCall*/ false, CheckedVarArgs, 6700 UncoveredArg, Offset); 6701 } 6702 } 6703 6704 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6705 // special check to see if the format string is a function parameter 6706 // of the function calling the printf function. If the function 6707 // has an attribute indicating it is a printf-like function, then we 6708 // should suppress warnings concerning non-literals being used in a call 6709 // to a vprintf function. For example: 6710 // 6711 // void 6712 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6713 // va_list ap; 6714 // va_start(ap, fmt); 6715 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6716 // ... 6717 // } 6718 if (HasVAListArg) { 6719 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6720 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6721 int PVIndex = PV->getFunctionScopeIndex() + 1; 6722 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6723 // adjust for implicit parameter 6724 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6725 if (MD->isInstance()) 6726 ++PVIndex; 6727 // We also check if the formats are compatible. 6728 // We can't pass a 'scanf' string to a 'printf' function. 6729 if (PVIndex == PVFormat->getFormatIdx() && 6730 Type == S.GetFormatStringType(PVFormat)) 6731 return SLCT_UncheckedLiteral; 6732 } 6733 } 6734 } 6735 } 6736 } 6737 6738 return SLCT_NotALiteral; 6739 } 6740 6741 case Stmt::CallExprClass: 6742 case Stmt::CXXMemberCallExprClass: { 6743 const CallExpr *CE = cast<CallExpr>(E); 6744 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6745 bool IsFirst = true; 6746 StringLiteralCheckType CommonResult; 6747 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6748 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6749 StringLiteralCheckType Result = checkFormatStringExpr( 6750 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6751 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6752 if (IsFirst) { 6753 CommonResult = Result; 6754 IsFirst = false; 6755 } 6756 } 6757 if (!IsFirst) 6758 return CommonResult; 6759 6760 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6761 unsigned BuiltinID = FD->getBuiltinID(); 6762 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6763 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6764 const Expr *Arg = CE->getArg(0); 6765 return checkFormatStringExpr(S, Arg, Args, 6766 HasVAListArg, format_idx, 6767 firstDataArg, Type, CallType, 6768 InFunctionCall, CheckedVarArgs, 6769 UncoveredArg, Offset); 6770 } 6771 } 6772 } 6773 6774 return SLCT_NotALiteral; 6775 } 6776 case Stmt::ObjCMessageExprClass: { 6777 const auto *ME = cast<ObjCMessageExpr>(E); 6778 if (const auto *ND = ME->getMethodDecl()) { 6779 if (const auto *FA = ND->getAttr<FormatArgAttr>()) { 6780 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 6781 return checkFormatStringExpr( 6782 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6783 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset); 6784 } 6785 } 6786 6787 return SLCT_NotALiteral; 6788 } 6789 case Stmt::ObjCStringLiteralClass: 6790 case Stmt::StringLiteralClass: { 6791 const StringLiteral *StrE = nullptr; 6792 6793 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 6794 StrE = ObjCFExpr->getString(); 6795 else 6796 StrE = cast<StringLiteral>(E); 6797 6798 if (StrE) { 6799 if (Offset.isNegative() || Offset > StrE->getLength()) { 6800 // TODO: It would be better to have an explicit warning for out of 6801 // bounds literals. 6802 return SLCT_NotALiteral; 6803 } 6804 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 6805 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 6806 firstDataArg, Type, InFunctionCall, CallType, 6807 CheckedVarArgs, UncoveredArg); 6808 return SLCT_CheckedLiteral; 6809 } 6810 6811 return SLCT_NotALiteral; 6812 } 6813 case Stmt::BinaryOperatorClass: { 6814 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 6815 6816 // A string literal + an int offset is still a string literal. 6817 if (BinOp->isAdditiveOp()) { 6818 Expr::EvalResult LResult, RResult; 6819 6820 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 6821 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6822 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 6823 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6824 6825 if (LIsInt != RIsInt) { 6826 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 6827 6828 if (LIsInt) { 6829 if (BinOpKind == BO_Add) { 6830 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 6831 E = BinOp->getRHS(); 6832 goto tryAgain; 6833 } 6834 } else { 6835 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 6836 E = BinOp->getLHS(); 6837 goto tryAgain; 6838 } 6839 } 6840 } 6841 6842 return SLCT_NotALiteral; 6843 } 6844 case Stmt::UnaryOperatorClass: { 6845 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 6846 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 6847 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 6848 Expr::EvalResult IndexResult; 6849 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 6850 Expr::SE_NoSideEffects, 6851 S.isConstantEvaluated())) { 6852 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 6853 /*RHS is int*/ true); 6854 E = ASE->getBase(); 6855 goto tryAgain; 6856 } 6857 } 6858 6859 return SLCT_NotALiteral; 6860 } 6861 6862 default: 6863 return SLCT_NotALiteral; 6864 } 6865 } 6866 6867 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 6868 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 6869 .Case("scanf", FST_Scanf) 6870 .Cases("printf", "printf0", FST_Printf) 6871 .Cases("NSString", "CFString", FST_NSString) 6872 .Case("strftime", FST_Strftime) 6873 .Case("strfmon", FST_Strfmon) 6874 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 6875 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 6876 .Case("os_trace", FST_OSLog) 6877 .Case("os_log", FST_OSLog) 6878 .Default(FST_Unknown); 6879 } 6880 6881 /// CheckFormatArguments - Check calls to printf and scanf (and similar 6882 /// functions) for correct use of format strings. 6883 /// Returns true if a format string has been fully checked. 6884 bool Sema::CheckFormatArguments(const FormatAttr *Format, 6885 ArrayRef<const Expr *> Args, 6886 bool IsCXXMember, 6887 VariadicCallType CallType, 6888 SourceLocation Loc, SourceRange Range, 6889 llvm::SmallBitVector &CheckedVarArgs) { 6890 FormatStringInfo FSI; 6891 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 6892 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 6893 FSI.FirstDataArg, GetFormatStringType(Format), 6894 CallType, Loc, Range, CheckedVarArgs); 6895 return false; 6896 } 6897 6898 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 6899 bool HasVAListArg, unsigned format_idx, 6900 unsigned firstDataArg, FormatStringType Type, 6901 VariadicCallType CallType, 6902 SourceLocation Loc, SourceRange Range, 6903 llvm::SmallBitVector &CheckedVarArgs) { 6904 // CHECK: printf/scanf-like function is called with no format string. 6905 if (format_idx >= Args.size()) { 6906 Diag(Loc, diag::warn_missing_format_string) << Range; 6907 return false; 6908 } 6909 6910 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 6911 6912 // CHECK: format string is not a string literal. 6913 // 6914 // Dynamically generated format strings are difficult to 6915 // automatically vet at compile time. Requiring that format strings 6916 // are string literals: (1) permits the checking of format strings by 6917 // the compiler and thereby (2) can practically remove the source of 6918 // many format string exploits. 6919 6920 // Format string can be either ObjC string (e.g. @"%d") or 6921 // C string (e.g. "%d") 6922 // ObjC string uses the same format specifiers as C string, so we can use 6923 // the same format string checking logic for both ObjC and C strings. 6924 UncoveredArgHandler UncoveredArg; 6925 StringLiteralCheckType CT = 6926 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 6927 format_idx, firstDataArg, Type, CallType, 6928 /*IsFunctionCall*/ true, CheckedVarArgs, 6929 UncoveredArg, 6930 /*no string offset*/ llvm::APSInt(64, false) = 0); 6931 6932 // Generate a diagnostic where an uncovered argument is detected. 6933 if (UncoveredArg.hasUncoveredArg()) { 6934 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 6935 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 6936 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 6937 } 6938 6939 if (CT != SLCT_NotALiteral) 6940 // Literal format string found, check done! 6941 return CT == SLCT_CheckedLiteral; 6942 6943 // Strftime is particular as it always uses a single 'time' argument, 6944 // so it is safe to pass a non-literal string. 6945 if (Type == FST_Strftime) 6946 return false; 6947 6948 // Do not emit diag when the string param is a macro expansion and the 6949 // format is either NSString or CFString. This is a hack to prevent 6950 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 6951 // which are usually used in place of NS and CF string literals. 6952 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 6953 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 6954 return false; 6955 6956 // If there are no arguments specified, warn with -Wformat-security, otherwise 6957 // warn only with -Wformat-nonliteral. 6958 if (Args.size() == firstDataArg) { 6959 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 6960 << OrigFormatExpr->getSourceRange(); 6961 switch (Type) { 6962 default: 6963 break; 6964 case FST_Kprintf: 6965 case FST_FreeBSDKPrintf: 6966 case FST_Printf: 6967 Diag(FormatLoc, diag::note_format_security_fixit) 6968 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 6969 break; 6970 case FST_NSString: 6971 Diag(FormatLoc, diag::note_format_security_fixit) 6972 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 6973 break; 6974 } 6975 } else { 6976 Diag(FormatLoc, diag::warn_format_nonliteral) 6977 << OrigFormatExpr->getSourceRange(); 6978 } 6979 return false; 6980 } 6981 6982 namespace { 6983 6984 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 6985 protected: 6986 Sema &S; 6987 const FormatStringLiteral *FExpr; 6988 const Expr *OrigFormatExpr; 6989 const Sema::FormatStringType FSType; 6990 const unsigned FirstDataArg; 6991 const unsigned NumDataArgs; 6992 const char *Beg; // Start of format string. 6993 const bool HasVAListArg; 6994 ArrayRef<const Expr *> Args; 6995 unsigned FormatIdx; 6996 llvm::SmallBitVector CoveredArgs; 6997 bool usesPositionalArgs = false; 6998 bool atFirstArg = true; 6999 bool inFunctionCall; 7000 Sema::VariadicCallType CallType; 7001 llvm::SmallBitVector &CheckedVarArgs; 7002 UncoveredArgHandler &UncoveredArg; 7003 7004 public: 7005 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7006 const Expr *origFormatExpr, 7007 const Sema::FormatStringType type, unsigned firstDataArg, 7008 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7009 ArrayRef<const Expr *> Args, unsigned formatIdx, 7010 bool inFunctionCall, Sema::VariadicCallType callType, 7011 llvm::SmallBitVector &CheckedVarArgs, 7012 UncoveredArgHandler &UncoveredArg) 7013 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7014 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7015 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7016 inFunctionCall(inFunctionCall), CallType(callType), 7017 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7018 CoveredArgs.resize(numDataArgs); 7019 CoveredArgs.reset(); 7020 } 7021 7022 void DoneProcessing(); 7023 7024 void HandleIncompleteSpecifier(const char *startSpecifier, 7025 unsigned specifierLen) override; 7026 7027 void HandleInvalidLengthModifier( 7028 const analyze_format_string::FormatSpecifier &FS, 7029 const analyze_format_string::ConversionSpecifier &CS, 7030 const char *startSpecifier, unsigned specifierLen, 7031 unsigned DiagID); 7032 7033 void HandleNonStandardLengthModifier( 7034 const analyze_format_string::FormatSpecifier &FS, 7035 const char *startSpecifier, unsigned specifierLen); 7036 7037 void HandleNonStandardConversionSpecifier( 7038 const analyze_format_string::ConversionSpecifier &CS, 7039 const char *startSpecifier, unsigned specifierLen); 7040 7041 void HandlePosition(const char *startPos, unsigned posLen) override; 7042 7043 void HandleInvalidPosition(const char *startSpecifier, 7044 unsigned specifierLen, 7045 analyze_format_string::PositionContext p) override; 7046 7047 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7048 7049 void HandleNullChar(const char *nullCharacter) override; 7050 7051 template <typename Range> 7052 static void 7053 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7054 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7055 bool IsStringLocation, Range StringRange, 7056 ArrayRef<FixItHint> Fixit = None); 7057 7058 protected: 7059 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7060 const char *startSpec, 7061 unsigned specifierLen, 7062 const char *csStart, unsigned csLen); 7063 7064 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7065 const char *startSpec, 7066 unsigned specifierLen); 7067 7068 SourceRange getFormatStringRange(); 7069 CharSourceRange getSpecifierRange(const char *startSpecifier, 7070 unsigned specifierLen); 7071 SourceLocation getLocationOfByte(const char *x); 7072 7073 const Expr *getDataArg(unsigned i) const; 7074 7075 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7076 const analyze_format_string::ConversionSpecifier &CS, 7077 const char *startSpecifier, unsigned specifierLen, 7078 unsigned argIndex); 7079 7080 template <typename Range> 7081 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7082 bool IsStringLocation, Range StringRange, 7083 ArrayRef<FixItHint> Fixit = None); 7084 }; 7085 7086 } // namespace 7087 7088 SourceRange CheckFormatHandler::getFormatStringRange() { 7089 return OrigFormatExpr->getSourceRange(); 7090 } 7091 7092 CharSourceRange CheckFormatHandler:: 7093 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7094 SourceLocation Start = getLocationOfByte(startSpecifier); 7095 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7096 7097 // Advance the end SourceLocation by one due to half-open ranges. 7098 End = End.getLocWithOffset(1); 7099 7100 return CharSourceRange::getCharRange(Start, End); 7101 } 7102 7103 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7104 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7105 S.getLangOpts(), S.Context.getTargetInfo()); 7106 } 7107 7108 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7109 unsigned specifierLen){ 7110 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7111 getLocationOfByte(startSpecifier), 7112 /*IsStringLocation*/true, 7113 getSpecifierRange(startSpecifier, specifierLen)); 7114 } 7115 7116 void CheckFormatHandler::HandleInvalidLengthModifier( 7117 const analyze_format_string::FormatSpecifier &FS, 7118 const analyze_format_string::ConversionSpecifier &CS, 7119 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7120 using namespace analyze_format_string; 7121 7122 const LengthModifier &LM = FS.getLengthModifier(); 7123 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7124 7125 // See if we know how to fix this length modifier. 7126 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7127 if (FixedLM) { 7128 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7129 getLocationOfByte(LM.getStart()), 7130 /*IsStringLocation*/true, 7131 getSpecifierRange(startSpecifier, specifierLen)); 7132 7133 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7134 << FixedLM->toString() 7135 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7136 7137 } else { 7138 FixItHint Hint; 7139 if (DiagID == diag::warn_format_nonsensical_length) 7140 Hint = FixItHint::CreateRemoval(LMRange); 7141 7142 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7143 getLocationOfByte(LM.getStart()), 7144 /*IsStringLocation*/true, 7145 getSpecifierRange(startSpecifier, specifierLen), 7146 Hint); 7147 } 7148 } 7149 7150 void CheckFormatHandler::HandleNonStandardLengthModifier( 7151 const analyze_format_string::FormatSpecifier &FS, 7152 const char *startSpecifier, unsigned specifierLen) { 7153 using namespace analyze_format_string; 7154 7155 const LengthModifier &LM = FS.getLengthModifier(); 7156 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7157 7158 // See if we know how to fix this length modifier. 7159 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7160 if (FixedLM) { 7161 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7162 << LM.toString() << 0, 7163 getLocationOfByte(LM.getStart()), 7164 /*IsStringLocation*/true, 7165 getSpecifierRange(startSpecifier, specifierLen)); 7166 7167 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7168 << FixedLM->toString() 7169 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7170 7171 } else { 7172 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7173 << LM.toString() << 0, 7174 getLocationOfByte(LM.getStart()), 7175 /*IsStringLocation*/true, 7176 getSpecifierRange(startSpecifier, specifierLen)); 7177 } 7178 } 7179 7180 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7181 const analyze_format_string::ConversionSpecifier &CS, 7182 const char *startSpecifier, unsigned specifierLen) { 7183 using namespace analyze_format_string; 7184 7185 // See if we know how to fix this conversion specifier. 7186 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7187 if (FixedCS) { 7188 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7189 << CS.toString() << /*conversion specifier*/1, 7190 getLocationOfByte(CS.getStart()), 7191 /*IsStringLocation*/true, 7192 getSpecifierRange(startSpecifier, specifierLen)); 7193 7194 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7195 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7196 << FixedCS->toString() 7197 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7198 } else { 7199 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7200 << CS.toString() << /*conversion specifier*/1, 7201 getLocationOfByte(CS.getStart()), 7202 /*IsStringLocation*/true, 7203 getSpecifierRange(startSpecifier, specifierLen)); 7204 } 7205 } 7206 7207 void CheckFormatHandler::HandlePosition(const char *startPos, 7208 unsigned posLen) { 7209 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7210 getLocationOfByte(startPos), 7211 /*IsStringLocation*/true, 7212 getSpecifierRange(startPos, posLen)); 7213 } 7214 7215 void 7216 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7217 analyze_format_string::PositionContext p) { 7218 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7219 << (unsigned) p, 7220 getLocationOfByte(startPos), /*IsStringLocation*/true, 7221 getSpecifierRange(startPos, posLen)); 7222 } 7223 7224 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7225 unsigned posLen) { 7226 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7227 getLocationOfByte(startPos), 7228 /*IsStringLocation*/true, 7229 getSpecifierRange(startPos, posLen)); 7230 } 7231 7232 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7233 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7234 // The presence of a null character is likely an error. 7235 EmitFormatDiagnostic( 7236 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7237 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7238 getFormatStringRange()); 7239 } 7240 } 7241 7242 // Note that this may return NULL if there was an error parsing or building 7243 // one of the argument expressions. 7244 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7245 return Args[FirstDataArg + i]; 7246 } 7247 7248 void CheckFormatHandler::DoneProcessing() { 7249 // Does the number of data arguments exceed the number of 7250 // format conversions in the format string? 7251 if (!HasVAListArg) { 7252 // Find any arguments that weren't covered. 7253 CoveredArgs.flip(); 7254 signed notCoveredArg = CoveredArgs.find_first(); 7255 if (notCoveredArg >= 0) { 7256 assert((unsigned)notCoveredArg < NumDataArgs); 7257 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7258 } else { 7259 UncoveredArg.setAllCovered(); 7260 } 7261 } 7262 } 7263 7264 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7265 const Expr *ArgExpr) { 7266 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7267 "Invalid state"); 7268 7269 if (!ArgExpr) 7270 return; 7271 7272 SourceLocation Loc = ArgExpr->getBeginLoc(); 7273 7274 if (S.getSourceManager().isInSystemMacro(Loc)) 7275 return; 7276 7277 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7278 for (auto E : DiagnosticExprs) 7279 PDiag << E->getSourceRange(); 7280 7281 CheckFormatHandler::EmitFormatDiagnostic( 7282 S, IsFunctionCall, DiagnosticExprs[0], 7283 PDiag, Loc, /*IsStringLocation*/false, 7284 DiagnosticExprs[0]->getSourceRange()); 7285 } 7286 7287 bool 7288 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7289 SourceLocation Loc, 7290 const char *startSpec, 7291 unsigned specifierLen, 7292 const char *csStart, 7293 unsigned csLen) { 7294 bool keepGoing = true; 7295 if (argIndex < NumDataArgs) { 7296 // Consider the argument coverered, even though the specifier doesn't 7297 // make sense. 7298 CoveredArgs.set(argIndex); 7299 } 7300 else { 7301 // If argIndex exceeds the number of data arguments we 7302 // don't issue a warning because that is just a cascade of warnings (and 7303 // they may have intended '%%' anyway). We don't want to continue processing 7304 // the format string after this point, however, as we will like just get 7305 // gibberish when trying to match arguments. 7306 keepGoing = false; 7307 } 7308 7309 StringRef Specifier(csStart, csLen); 7310 7311 // If the specifier in non-printable, it could be the first byte of a UTF-8 7312 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7313 // hex value. 7314 std::string CodePointStr; 7315 if (!llvm::sys::locale::isPrint(*csStart)) { 7316 llvm::UTF32 CodePoint; 7317 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7318 const llvm::UTF8 *E = 7319 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7320 llvm::ConversionResult Result = 7321 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7322 7323 if (Result != llvm::conversionOK) { 7324 unsigned char FirstChar = *csStart; 7325 CodePoint = (llvm::UTF32)FirstChar; 7326 } 7327 7328 llvm::raw_string_ostream OS(CodePointStr); 7329 if (CodePoint < 256) 7330 OS << "\\x" << llvm::format("%02x", CodePoint); 7331 else if (CodePoint <= 0xFFFF) 7332 OS << "\\u" << llvm::format("%04x", CodePoint); 7333 else 7334 OS << "\\U" << llvm::format("%08x", CodePoint); 7335 OS.flush(); 7336 Specifier = CodePointStr; 7337 } 7338 7339 EmitFormatDiagnostic( 7340 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7341 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7342 7343 return keepGoing; 7344 } 7345 7346 void 7347 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7348 const char *startSpec, 7349 unsigned specifierLen) { 7350 EmitFormatDiagnostic( 7351 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7352 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7353 } 7354 7355 bool 7356 CheckFormatHandler::CheckNumArgs( 7357 const analyze_format_string::FormatSpecifier &FS, 7358 const analyze_format_string::ConversionSpecifier &CS, 7359 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7360 7361 if (argIndex >= NumDataArgs) { 7362 PartialDiagnostic PDiag = FS.usesPositionalArg() 7363 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7364 << (argIndex+1) << NumDataArgs) 7365 : S.PDiag(diag::warn_printf_insufficient_data_args); 7366 EmitFormatDiagnostic( 7367 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7368 getSpecifierRange(startSpecifier, specifierLen)); 7369 7370 // Since more arguments than conversion tokens are given, by extension 7371 // all arguments are covered, so mark this as so. 7372 UncoveredArg.setAllCovered(); 7373 return false; 7374 } 7375 return true; 7376 } 7377 7378 template<typename Range> 7379 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7380 SourceLocation Loc, 7381 bool IsStringLocation, 7382 Range StringRange, 7383 ArrayRef<FixItHint> FixIt) { 7384 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7385 Loc, IsStringLocation, StringRange, FixIt); 7386 } 7387 7388 /// If the format string is not within the function call, emit a note 7389 /// so that the function call and string are in diagnostic messages. 7390 /// 7391 /// \param InFunctionCall if true, the format string is within the function 7392 /// call and only one diagnostic message will be produced. Otherwise, an 7393 /// extra note will be emitted pointing to location of the format string. 7394 /// 7395 /// \param ArgumentExpr the expression that is passed as the format string 7396 /// argument in the function call. Used for getting locations when two 7397 /// diagnostics are emitted. 7398 /// 7399 /// \param PDiag the callee should already have provided any strings for the 7400 /// diagnostic message. This function only adds locations and fixits 7401 /// to diagnostics. 7402 /// 7403 /// \param Loc primary location for diagnostic. If two diagnostics are 7404 /// required, one will be at Loc and a new SourceLocation will be created for 7405 /// the other one. 7406 /// 7407 /// \param IsStringLocation if true, Loc points to the format string should be 7408 /// used for the note. Otherwise, Loc points to the argument list and will 7409 /// be used with PDiag. 7410 /// 7411 /// \param StringRange some or all of the string to highlight. This is 7412 /// templated so it can accept either a CharSourceRange or a SourceRange. 7413 /// 7414 /// \param FixIt optional fix it hint for the format string. 7415 template <typename Range> 7416 void CheckFormatHandler::EmitFormatDiagnostic( 7417 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7418 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7419 Range StringRange, ArrayRef<FixItHint> FixIt) { 7420 if (InFunctionCall) { 7421 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7422 D << StringRange; 7423 D << FixIt; 7424 } else { 7425 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7426 << ArgumentExpr->getSourceRange(); 7427 7428 const Sema::SemaDiagnosticBuilder &Note = 7429 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7430 diag::note_format_string_defined); 7431 7432 Note << StringRange; 7433 Note << FixIt; 7434 } 7435 } 7436 7437 //===--- CHECK: Printf format string checking ------------------------------===// 7438 7439 namespace { 7440 7441 class CheckPrintfHandler : public CheckFormatHandler { 7442 public: 7443 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7444 const Expr *origFormatExpr, 7445 const Sema::FormatStringType type, unsigned firstDataArg, 7446 unsigned numDataArgs, bool isObjC, const char *beg, 7447 bool hasVAListArg, ArrayRef<const Expr *> Args, 7448 unsigned formatIdx, bool inFunctionCall, 7449 Sema::VariadicCallType CallType, 7450 llvm::SmallBitVector &CheckedVarArgs, 7451 UncoveredArgHandler &UncoveredArg) 7452 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7453 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7454 inFunctionCall, CallType, CheckedVarArgs, 7455 UncoveredArg) {} 7456 7457 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7458 7459 /// Returns true if '%@' specifiers are allowed in the format string. 7460 bool allowsObjCArg() const { 7461 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7462 FSType == Sema::FST_OSTrace; 7463 } 7464 7465 bool HandleInvalidPrintfConversionSpecifier( 7466 const analyze_printf::PrintfSpecifier &FS, 7467 const char *startSpecifier, 7468 unsigned specifierLen) override; 7469 7470 void handleInvalidMaskType(StringRef MaskType) override; 7471 7472 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7473 const char *startSpecifier, 7474 unsigned specifierLen) override; 7475 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7476 const char *StartSpecifier, 7477 unsigned SpecifierLen, 7478 const Expr *E); 7479 7480 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7481 const char *startSpecifier, unsigned specifierLen); 7482 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7483 const analyze_printf::OptionalAmount &Amt, 7484 unsigned type, 7485 const char *startSpecifier, unsigned specifierLen); 7486 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7487 const analyze_printf::OptionalFlag &flag, 7488 const char *startSpecifier, unsigned specifierLen); 7489 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7490 const analyze_printf::OptionalFlag &ignoredFlag, 7491 const analyze_printf::OptionalFlag &flag, 7492 const char *startSpecifier, unsigned specifierLen); 7493 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7494 const Expr *E); 7495 7496 void HandleEmptyObjCModifierFlag(const char *startFlag, 7497 unsigned flagLen) override; 7498 7499 void HandleInvalidObjCModifierFlag(const char *startFlag, 7500 unsigned flagLen) override; 7501 7502 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7503 const char *flagsEnd, 7504 const char *conversionPosition) 7505 override; 7506 }; 7507 7508 } // namespace 7509 7510 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7511 const analyze_printf::PrintfSpecifier &FS, 7512 const char *startSpecifier, 7513 unsigned specifierLen) { 7514 const analyze_printf::PrintfConversionSpecifier &CS = 7515 FS.getConversionSpecifier(); 7516 7517 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7518 getLocationOfByte(CS.getStart()), 7519 startSpecifier, specifierLen, 7520 CS.getStart(), CS.getLength()); 7521 } 7522 7523 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7524 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7525 } 7526 7527 bool CheckPrintfHandler::HandleAmount( 7528 const analyze_format_string::OptionalAmount &Amt, 7529 unsigned k, const char *startSpecifier, 7530 unsigned specifierLen) { 7531 if (Amt.hasDataArgument()) { 7532 if (!HasVAListArg) { 7533 unsigned argIndex = Amt.getArgIndex(); 7534 if (argIndex >= NumDataArgs) { 7535 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7536 << k, 7537 getLocationOfByte(Amt.getStart()), 7538 /*IsStringLocation*/true, 7539 getSpecifierRange(startSpecifier, specifierLen)); 7540 // Don't do any more checking. We will just emit 7541 // spurious errors. 7542 return false; 7543 } 7544 7545 // Type check the data argument. It should be an 'int'. 7546 // Although not in conformance with C99, we also allow the argument to be 7547 // an 'unsigned int' as that is a reasonably safe case. GCC also 7548 // doesn't emit a warning for that case. 7549 CoveredArgs.set(argIndex); 7550 const Expr *Arg = getDataArg(argIndex); 7551 if (!Arg) 7552 return false; 7553 7554 QualType T = Arg->getType(); 7555 7556 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7557 assert(AT.isValid()); 7558 7559 if (!AT.matchesType(S.Context, T)) { 7560 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7561 << k << AT.getRepresentativeTypeName(S.Context) 7562 << T << Arg->getSourceRange(), 7563 getLocationOfByte(Amt.getStart()), 7564 /*IsStringLocation*/true, 7565 getSpecifierRange(startSpecifier, specifierLen)); 7566 // Don't do any more checking. We will just emit 7567 // spurious errors. 7568 return false; 7569 } 7570 } 7571 } 7572 return true; 7573 } 7574 7575 void CheckPrintfHandler::HandleInvalidAmount( 7576 const analyze_printf::PrintfSpecifier &FS, 7577 const analyze_printf::OptionalAmount &Amt, 7578 unsigned type, 7579 const char *startSpecifier, 7580 unsigned specifierLen) { 7581 const analyze_printf::PrintfConversionSpecifier &CS = 7582 FS.getConversionSpecifier(); 7583 7584 FixItHint fixit = 7585 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7586 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7587 Amt.getConstantLength())) 7588 : FixItHint(); 7589 7590 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7591 << type << CS.toString(), 7592 getLocationOfByte(Amt.getStart()), 7593 /*IsStringLocation*/true, 7594 getSpecifierRange(startSpecifier, specifierLen), 7595 fixit); 7596 } 7597 7598 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7599 const analyze_printf::OptionalFlag &flag, 7600 const char *startSpecifier, 7601 unsigned specifierLen) { 7602 // Warn about pointless flag with a fixit removal. 7603 const analyze_printf::PrintfConversionSpecifier &CS = 7604 FS.getConversionSpecifier(); 7605 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7606 << flag.toString() << CS.toString(), 7607 getLocationOfByte(flag.getPosition()), 7608 /*IsStringLocation*/true, 7609 getSpecifierRange(startSpecifier, specifierLen), 7610 FixItHint::CreateRemoval( 7611 getSpecifierRange(flag.getPosition(), 1))); 7612 } 7613 7614 void CheckPrintfHandler::HandleIgnoredFlag( 7615 const analyze_printf::PrintfSpecifier &FS, 7616 const analyze_printf::OptionalFlag &ignoredFlag, 7617 const analyze_printf::OptionalFlag &flag, 7618 const char *startSpecifier, 7619 unsigned specifierLen) { 7620 // Warn about ignored flag with a fixit removal. 7621 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7622 << ignoredFlag.toString() << flag.toString(), 7623 getLocationOfByte(ignoredFlag.getPosition()), 7624 /*IsStringLocation*/true, 7625 getSpecifierRange(startSpecifier, specifierLen), 7626 FixItHint::CreateRemoval( 7627 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7628 } 7629 7630 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7631 unsigned flagLen) { 7632 // Warn about an empty flag. 7633 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7634 getLocationOfByte(startFlag), 7635 /*IsStringLocation*/true, 7636 getSpecifierRange(startFlag, flagLen)); 7637 } 7638 7639 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7640 unsigned flagLen) { 7641 // Warn about an invalid flag. 7642 auto Range = getSpecifierRange(startFlag, flagLen); 7643 StringRef flag(startFlag, flagLen); 7644 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7645 getLocationOfByte(startFlag), 7646 /*IsStringLocation*/true, 7647 Range, FixItHint::CreateRemoval(Range)); 7648 } 7649 7650 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7651 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7652 // Warn about using '[...]' without a '@' conversion. 7653 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7654 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7655 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7656 getLocationOfByte(conversionPosition), 7657 /*IsStringLocation*/true, 7658 Range, FixItHint::CreateRemoval(Range)); 7659 } 7660 7661 // Determines if the specified is a C++ class or struct containing 7662 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7663 // "c_str()"). 7664 template<typename MemberKind> 7665 static llvm::SmallPtrSet<MemberKind*, 1> 7666 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7667 const RecordType *RT = Ty->getAs<RecordType>(); 7668 llvm::SmallPtrSet<MemberKind*, 1> Results; 7669 7670 if (!RT) 7671 return Results; 7672 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7673 if (!RD || !RD->getDefinition()) 7674 return Results; 7675 7676 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7677 Sema::LookupMemberName); 7678 R.suppressDiagnostics(); 7679 7680 // We just need to include all members of the right kind turned up by the 7681 // filter, at this point. 7682 if (S.LookupQualifiedName(R, RT->getDecl())) 7683 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7684 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7685 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7686 Results.insert(FK); 7687 } 7688 return Results; 7689 } 7690 7691 /// Check if we could call '.c_str()' on an object. 7692 /// 7693 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7694 /// allow the call, or if it would be ambiguous). 7695 bool Sema::hasCStrMethod(const Expr *E) { 7696 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7697 7698 MethodSet Results = 7699 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7700 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7701 MI != ME; ++MI) 7702 if ((*MI)->getMinRequiredArguments() == 0) 7703 return true; 7704 return false; 7705 } 7706 7707 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7708 // better diagnostic if so. AT is assumed to be valid. 7709 // Returns true when a c_str() conversion method is found. 7710 bool CheckPrintfHandler::checkForCStrMembers( 7711 const analyze_printf::ArgType &AT, const Expr *E) { 7712 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7713 7714 MethodSet Results = 7715 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7716 7717 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7718 MI != ME; ++MI) { 7719 const CXXMethodDecl *Method = *MI; 7720 if (Method->getMinRequiredArguments() == 0 && 7721 AT.matchesType(S.Context, Method->getReturnType())) { 7722 // FIXME: Suggest parens if the expression needs them. 7723 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7724 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7725 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7726 return true; 7727 } 7728 } 7729 7730 return false; 7731 } 7732 7733 bool 7734 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7735 &FS, 7736 const char *startSpecifier, 7737 unsigned specifierLen) { 7738 using namespace analyze_format_string; 7739 using namespace analyze_printf; 7740 7741 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7742 7743 if (FS.consumesDataArgument()) { 7744 if (atFirstArg) { 7745 atFirstArg = false; 7746 usesPositionalArgs = FS.usesPositionalArg(); 7747 } 7748 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7749 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7750 startSpecifier, specifierLen); 7751 return false; 7752 } 7753 } 7754 7755 // First check if the field width, precision, and conversion specifier 7756 // have matching data arguments. 7757 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7758 startSpecifier, specifierLen)) { 7759 return false; 7760 } 7761 7762 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7763 startSpecifier, specifierLen)) { 7764 return false; 7765 } 7766 7767 if (!CS.consumesDataArgument()) { 7768 // FIXME: Technically specifying a precision or field width here 7769 // makes no sense. Worth issuing a warning at some point. 7770 return true; 7771 } 7772 7773 // Consume the argument. 7774 unsigned argIndex = FS.getArgIndex(); 7775 if (argIndex < NumDataArgs) { 7776 // The check to see if the argIndex is valid will come later. 7777 // We set the bit here because we may exit early from this 7778 // function if we encounter some other error. 7779 CoveredArgs.set(argIndex); 7780 } 7781 7782 // FreeBSD kernel extensions. 7783 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 7784 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 7785 // We need at least two arguments. 7786 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 7787 return false; 7788 7789 // Claim the second argument. 7790 CoveredArgs.set(argIndex + 1); 7791 7792 // Type check the first argument (int for %b, pointer for %D) 7793 const Expr *Ex = getDataArg(argIndex); 7794 const analyze_printf::ArgType &AT = 7795 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 7796 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 7797 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 7798 EmitFormatDiagnostic( 7799 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7800 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 7801 << false << Ex->getSourceRange(), 7802 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7803 getSpecifierRange(startSpecifier, specifierLen)); 7804 7805 // Type check the second argument (char * for both %b and %D) 7806 Ex = getDataArg(argIndex + 1); 7807 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 7808 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 7809 EmitFormatDiagnostic( 7810 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7811 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 7812 << false << Ex->getSourceRange(), 7813 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7814 getSpecifierRange(startSpecifier, specifierLen)); 7815 7816 return true; 7817 } 7818 7819 // Check for using an Objective-C specific conversion specifier 7820 // in a non-ObjC literal. 7821 if (!allowsObjCArg() && CS.isObjCArg()) { 7822 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7823 specifierLen); 7824 } 7825 7826 // %P can only be used with os_log. 7827 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 7828 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7829 specifierLen); 7830 } 7831 7832 // %n is not allowed with os_log. 7833 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 7834 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 7835 getLocationOfByte(CS.getStart()), 7836 /*IsStringLocation*/ false, 7837 getSpecifierRange(startSpecifier, specifierLen)); 7838 7839 return true; 7840 } 7841 7842 // Only scalars are allowed for os_trace. 7843 if (FSType == Sema::FST_OSTrace && 7844 (CS.getKind() == ConversionSpecifier::PArg || 7845 CS.getKind() == ConversionSpecifier::sArg || 7846 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 7847 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7848 specifierLen); 7849 } 7850 7851 // Check for use of public/private annotation outside of os_log(). 7852 if (FSType != Sema::FST_OSLog) { 7853 if (FS.isPublic().isSet()) { 7854 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7855 << "public", 7856 getLocationOfByte(FS.isPublic().getPosition()), 7857 /*IsStringLocation*/ false, 7858 getSpecifierRange(startSpecifier, specifierLen)); 7859 } 7860 if (FS.isPrivate().isSet()) { 7861 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7862 << "private", 7863 getLocationOfByte(FS.isPrivate().getPosition()), 7864 /*IsStringLocation*/ false, 7865 getSpecifierRange(startSpecifier, specifierLen)); 7866 } 7867 } 7868 7869 // Check for invalid use of field width 7870 if (!FS.hasValidFieldWidth()) { 7871 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 7872 startSpecifier, specifierLen); 7873 } 7874 7875 // Check for invalid use of precision 7876 if (!FS.hasValidPrecision()) { 7877 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 7878 startSpecifier, specifierLen); 7879 } 7880 7881 // Precision is mandatory for %P specifier. 7882 if (CS.getKind() == ConversionSpecifier::PArg && 7883 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 7884 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 7885 getLocationOfByte(startSpecifier), 7886 /*IsStringLocation*/ false, 7887 getSpecifierRange(startSpecifier, specifierLen)); 7888 } 7889 7890 // Check each flag does not conflict with any other component. 7891 if (!FS.hasValidThousandsGroupingPrefix()) 7892 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 7893 if (!FS.hasValidLeadingZeros()) 7894 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 7895 if (!FS.hasValidPlusPrefix()) 7896 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 7897 if (!FS.hasValidSpacePrefix()) 7898 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 7899 if (!FS.hasValidAlternativeForm()) 7900 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 7901 if (!FS.hasValidLeftJustified()) 7902 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 7903 7904 // Check that flags are not ignored by another flag 7905 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 7906 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 7907 startSpecifier, specifierLen); 7908 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 7909 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 7910 startSpecifier, specifierLen); 7911 7912 // Check the length modifier is valid with the given conversion specifier. 7913 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 7914 S.getLangOpts())) 7915 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7916 diag::warn_format_nonsensical_length); 7917 else if (!FS.hasStandardLengthModifier()) 7918 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 7919 else if (!FS.hasStandardLengthConversionCombination()) 7920 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7921 diag::warn_format_non_standard_conversion_spec); 7922 7923 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 7924 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 7925 7926 // The remaining checks depend on the data arguments. 7927 if (HasVAListArg) 7928 return true; 7929 7930 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 7931 return false; 7932 7933 const Expr *Arg = getDataArg(argIndex); 7934 if (!Arg) 7935 return true; 7936 7937 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 7938 } 7939 7940 static bool requiresParensToAddCast(const Expr *E) { 7941 // FIXME: We should have a general way to reason about operator 7942 // precedence and whether parens are actually needed here. 7943 // Take care of a few common cases where they aren't. 7944 const Expr *Inside = E->IgnoreImpCasts(); 7945 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 7946 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 7947 7948 switch (Inside->getStmtClass()) { 7949 case Stmt::ArraySubscriptExprClass: 7950 case Stmt::CallExprClass: 7951 case Stmt::CharacterLiteralClass: 7952 case Stmt::CXXBoolLiteralExprClass: 7953 case Stmt::DeclRefExprClass: 7954 case Stmt::FloatingLiteralClass: 7955 case Stmt::IntegerLiteralClass: 7956 case Stmt::MemberExprClass: 7957 case Stmt::ObjCArrayLiteralClass: 7958 case Stmt::ObjCBoolLiteralExprClass: 7959 case Stmt::ObjCBoxedExprClass: 7960 case Stmt::ObjCDictionaryLiteralClass: 7961 case Stmt::ObjCEncodeExprClass: 7962 case Stmt::ObjCIvarRefExprClass: 7963 case Stmt::ObjCMessageExprClass: 7964 case Stmt::ObjCPropertyRefExprClass: 7965 case Stmt::ObjCStringLiteralClass: 7966 case Stmt::ObjCSubscriptRefExprClass: 7967 case Stmt::ParenExprClass: 7968 case Stmt::StringLiteralClass: 7969 case Stmt::UnaryOperatorClass: 7970 return false; 7971 default: 7972 return true; 7973 } 7974 } 7975 7976 static std::pair<QualType, StringRef> 7977 shouldNotPrintDirectly(const ASTContext &Context, 7978 QualType IntendedTy, 7979 const Expr *E) { 7980 // Use a 'while' to peel off layers of typedefs. 7981 QualType TyTy = IntendedTy; 7982 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 7983 StringRef Name = UserTy->getDecl()->getName(); 7984 QualType CastTy = llvm::StringSwitch<QualType>(Name) 7985 .Case("CFIndex", Context.getNSIntegerType()) 7986 .Case("NSInteger", Context.getNSIntegerType()) 7987 .Case("NSUInteger", Context.getNSUIntegerType()) 7988 .Case("SInt32", Context.IntTy) 7989 .Case("UInt32", Context.UnsignedIntTy) 7990 .Default(QualType()); 7991 7992 if (!CastTy.isNull()) 7993 return std::make_pair(CastTy, Name); 7994 7995 TyTy = UserTy->desugar(); 7996 } 7997 7998 // Strip parens if necessary. 7999 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8000 return shouldNotPrintDirectly(Context, 8001 PE->getSubExpr()->getType(), 8002 PE->getSubExpr()); 8003 8004 // If this is a conditional expression, then its result type is constructed 8005 // via usual arithmetic conversions and thus there might be no necessary 8006 // typedef sugar there. Recurse to operands to check for NSInteger & 8007 // Co. usage condition. 8008 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8009 QualType TrueTy, FalseTy; 8010 StringRef TrueName, FalseName; 8011 8012 std::tie(TrueTy, TrueName) = 8013 shouldNotPrintDirectly(Context, 8014 CO->getTrueExpr()->getType(), 8015 CO->getTrueExpr()); 8016 std::tie(FalseTy, FalseName) = 8017 shouldNotPrintDirectly(Context, 8018 CO->getFalseExpr()->getType(), 8019 CO->getFalseExpr()); 8020 8021 if (TrueTy == FalseTy) 8022 return std::make_pair(TrueTy, TrueName); 8023 else if (TrueTy.isNull()) 8024 return std::make_pair(FalseTy, FalseName); 8025 else if (FalseTy.isNull()) 8026 return std::make_pair(TrueTy, TrueName); 8027 } 8028 8029 return std::make_pair(QualType(), StringRef()); 8030 } 8031 8032 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8033 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8034 /// type do not count. 8035 static bool 8036 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8037 QualType From = ICE->getSubExpr()->getType(); 8038 QualType To = ICE->getType(); 8039 // It's an integer promotion if the destination type is the promoted 8040 // source type. 8041 if (ICE->getCastKind() == CK_IntegralCast && 8042 From->isPromotableIntegerType() && 8043 S.Context.getPromotedIntegerType(From) == To) 8044 return true; 8045 // Look through vector types, since we do default argument promotion for 8046 // those in OpenCL. 8047 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8048 From = VecTy->getElementType(); 8049 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8050 To = VecTy->getElementType(); 8051 // It's a floating promotion if the source type is a lower rank. 8052 return ICE->getCastKind() == CK_FloatingCast && 8053 S.Context.getFloatingTypeOrder(From, To) < 0; 8054 } 8055 8056 bool 8057 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8058 const char *StartSpecifier, 8059 unsigned SpecifierLen, 8060 const Expr *E) { 8061 using namespace analyze_format_string; 8062 using namespace analyze_printf; 8063 8064 // Now type check the data expression that matches the 8065 // format specifier. 8066 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8067 if (!AT.isValid()) 8068 return true; 8069 8070 QualType ExprTy = E->getType(); 8071 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8072 ExprTy = TET->getUnderlyingExpr()->getType(); 8073 } 8074 8075 const analyze_printf::ArgType::MatchKind Match = 8076 AT.matchesType(S.Context, ExprTy); 8077 bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic; 8078 if (Match == analyze_printf::ArgType::Match) 8079 return true; 8080 8081 // Look through argument promotions for our error message's reported type. 8082 // This includes the integral and floating promotions, but excludes array 8083 // and function pointer decay (seeing that an argument intended to be a 8084 // string has type 'char [6]' is probably more confusing than 'char *') and 8085 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8086 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8087 if (isArithmeticArgumentPromotion(S, ICE)) { 8088 E = ICE->getSubExpr(); 8089 ExprTy = E->getType(); 8090 8091 // Check if we didn't match because of an implicit cast from a 'char' 8092 // or 'short' to an 'int'. This is done because printf is a varargs 8093 // function. 8094 if (ICE->getType() == S.Context.IntTy || 8095 ICE->getType() == S.Context.UnsignedIntTy) { 8096 // All further checking is done on the subexpression. 8097 if (AT.matchesType(S.Context, ExprTy)) 8098 return true; 8099 } 8100 } 8101 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8102 // Special case for 'a', which has type 'int' in C. 8103 // Note, however, that we do /not/ want to treat multibyte constants like 8104 // 'MooV' as characters! This form is deprecated but still exists. 8105 if (ExprTy == S.Context.IntTy) 8106 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8107 ExprTy = S.Context.CharTy; 8108 } 8109 8110 // Look through enums to their underlying type. 8111 bool IsEnum = false; 8112 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8113 ExprTy = EnumTy->getDecl()->getIntegerType(); 8114 IsEnum = true; 8115 } 8116 8117 // %C in an Objective-C context prints a unichar, not a wchar_t. 8118 // If the argument is an integer of some kind, believe the %C and suggest 8119 // a cast instead of changing the conversion specifier. 8120 QualType IntendedTy = ExprTy; 8121 if (isObjCContext() && 8122 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8123 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8124 !ExprTy->isCharType()) { 8125 // 'unichar' is defined as a typedef of unsigned short, but we should 8126 // prefer using the typedef if it is visible. 8127 IntendedTy = S.Context.UnsignedShortTy; 8128 8129 // While we are here, check if the value is an IntegerLiteral that happens 8130 // to be within the valid range. 8131 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8132 const llvm::APInt &V = IL->getValue(); 8133 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8134 return true; 8135 } 8136 8137 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8138 Sema::LookupOrdinaryName); 8139 if (S.LookupName(Result, S.getCurScope())) { 8140 NamedDecl *ND = Result.getFoundDecl(); 8141 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8142 if (TD->getUnderlyingType() == IntendedTy) 8143 IntendedTy = S.Context.getTypedefType(TD); 8144 } 8145 } 8146 } 8147 8148 // Special-case some of Darwin's platform-independence types by suggesting 8149 // casts to primitive types that are known to be large enough. 8150 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8151 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8152 QualType CastTy; 8153 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8154 if (!CastTy.isNull()) { 8155 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8156 // (long in ASTContext). Only complain to pedants. 8157 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8158 (AT.isSizeT() || AT.isPtrdiffT()) && 8159 AT.matchesType(S.Context, CastTy)) 8160 Pedantic = true; 8161 IntendedTy = CastTy; 8162 ShouldNotPrintDirectly = true; 8163 } 8164 } 8165 8166 // We may be able to offer a FixItHint if it is a supported type. 8167 PrintfSpecifier fixedFS = FS; 8168 bool Success = 8169 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8170 8171 if (Success) { 8172 // Get the fix string from the fixed format specifier 8173 SmallString<16> buf; 8174 llvm::raw_svector_ostream os(buf); 8175 fixedFS.toString(os); 8176 8177 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8178 8179 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8180 unsigned Diag = 8181 Pedantic 8182 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8183 : diag::warn_format_conversion_argument_type_mismatch; 8184 // In this case, the specifier is wrong and should be changed to match 8185 // the argument. 8186 EmitFormatDiagnostic(S.PDiag(Diag) 8187 << AT.getRepresentativeTypeName(S.Context) 8188 << IntendedTy << IsEnum << E->getSourceRange(), 8189 E->getBeginLoc(), 8190 /*IsStringLocation*/ false, SpecRange, 8191 FixItHint::CreateReplacement(SpecRange, os.str())); 8192 } else { 8193 // The canonical type for formatting this value is different from the 8194 // actual type of the expression. (This occurs, for example, with Darwin's 8195 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8196 // should be printed as 'long' for 64-bit compatibility.) 8197 // Rather than emitting a normal format/argument mismatch, we want to 8198 // add a cast to the recommended type (and correct the format string 8199 // if necessary). 8200 SmallString<16> CastBuf; 8201 llvm::raw_svector_ostream CastFix(CastBuf); 8202 CastFix << "("; 8203 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8204 CastFix << ")"; 8205 8206 SmallVector<FixItHint,4> Hints; 8207 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8208 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8209 8210 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8211 // If there's already a cast present, just replace it. 8212 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8213 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8214 8215 } else if (!requiresParensToAddCast(E)) { 8216 // If the expression has high enough precedence, 8217 // just write the C-style cast. 8218 Hints.push_back( 8219 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8220 } else { 8221 // Otherwise, add parens around the expression as well as the cast. 8222 CastFix << "("; 8223 Hints.push_back( 8224 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8225 8226 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8227 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8228 } 8229 8230 if (ShouldNotPrintDirectly) { 8231 // The expression has a type that should not be printed directly. 8232 // We extract the name from the typedef because we don't want to show 8233 // the underlying type in the diagnostic. 8234 StringRef Name; 8235 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8236 Name = TypedefTy->getDecl()->getName(); 8237 else 8238 Name = CastTyName; 8239 unsigned Diag = Pedantic 8240 ? diag::warn_format_argument_needs_cast_pedantic 8241 : diag::warn_format_argument_needs_cast; 8242 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8243 << E->getSourceRange(), 8244 E->getBeginLoc(), /*IsStringLocation=*/false, 8245 SpecRange, Hints); 8246 } else { 8247 // In this case, the expression could be printed using a different 8248 // specifier, but we've decided that the specifier is probably correct 8249 // and we should cast instead. Just use the normal warning message. 8250 EmitFormatDiagnostic( 8251 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8252 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8253 << E->getSourceRange(), 8254 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8255 } 8256 } 8257 } else { 8258 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8259 SpecifierLen); 8260 // Since the warning for passing non-POD types to variadic functions 8261 // was deferred until now, we emit a warning for non-POD 8262 // arguments here. 8263 switch (S.isValidVarArgType(ExprTy)) { 8264 case Sema::VAK_Valid: 8265 case Sema::VAK_ValidInCXX11: { 8266 unsigned Diag = 8267 Pedantic 8268 ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8269 : diag::warn_format_conversion_argument_type_mismatch; 8270 8271 EmitFormatDiagnostic( 8272 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8273 << IsEnum << CSR << E->getSourceRange(), 8274 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8275 break; 8276 } 8277 case Sema::VAK_Undefined: 8278 case Sema::VAK_MSVCUndefined: 8279 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8280 << S.getLangOpts().CPlusPlus11 << ExprTy 8281 << CallType 8282 << AT.getRepresentativeTypeName(S.Context) << CSR 8283 << E->getSourceRange(), 8284 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8285 checkForCStrMembers(AT, E); 8286 break; 8287 8288 case Sema::VAK_Invalid: 8289 if (ExprTy->isObjCObjectType()) 8290 EmitFormatDiagnostic( 8291 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8292 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8293 << AT.getRepresentativeTypeName(S.Context) << CSR 8294 << E->getSourceRange(), 8295 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8296 else 8297 // FIXME: If this is an initializer list, suggest removing the braces 8298 // or inserting a cast to the target type. 8299 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8300 << isa<InitListExpr>(E) << ExprTy << CallType 8301 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8302 break; 8303 } 8304 8305 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8306 "format string specifier index out of range"); 8307 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8308 } 8309 8310 return true; 8311 } 8312 8313 //===--- CHECK: Scanf format string checking ------------------------------===// 8314 8315 namespace { 8316 8317 class CheckScanfHandler : public CheckFormatHandler { 8318 public: 8319 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8320 const Expr *origFormatExpr, Sema::FormatStringType type, 8321 unsigned firstDataArg, unsigned numDataArgs, 8322 const char *beg, bool hasVAListArg, 8323 ArrayRef<const Expr *> Args, unsigned formatIdx, 8324 bool inFunctionCall, Sema::VariadicCallType CallType, 8325 llvm::SmallBitVector &CheckedVarArgs, 8326 UncoveredArgHandler &UncoveredArg) 8327 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8328 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8329 inFunctionCall, CallType, CheckedVarArgs, 8330 UncoveredArg) {} 8331 8332 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8333 const char *startSpecifier, 8334 unsigned specifierLen) override; 8335 8336 bool HandleInvalidScanfConversionSpecifier( 8337 const analyze_scanf::ScanfSpecifier &FS, 8338 const char *startSpecifier, 8339 unsigned specifierLen) override; 8340 8341 void HandleIncompleteScanList(const char *start, const char *end) override; 8342 }; 8343 8344 } // namespace 8345 8346 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8347 const char *end) { 8348 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8349 getLocationOfByte(end), /*IsStringLocation*/true, 8350 getSpecifierRange(start, end - start)); 8351 } 8352 8353 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8354 const analyze_scanf::ScanfSpecifier &FS, 8355 const char *startSpecifier, 8356 unsigned specifierLen) { 8357 const analyze_scanf::ScanfConversionSpecifier &CS = 8358 FS.getConversionSpecifier(); 8359 8360 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8361 getLocationOfByte(CS.getStart()), 8362 startSpecifier, specifierLen, 8363 CS.getStart(), CS.getLength()); 8364 } 8365 8366 bool CheckScanfHandler::HandleScanfSpecifier( 8367 const analyze_scanf::ScanfSpecifier &FS, 8368 const char *startSpecifier, 8369 unsigned specifierLen) { 8370 using namespace analyze_scanf; 8371 using namespace analyze_format_string; 8372 8373 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8374 8375 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8376 // be used to decide if we are using positional arguments consistently. 8377 if (FS.consumesDataArgument()) { 8378 if (atFirstArg) { 8379 atFirstArg = false; 8380 usesPositionalArgs = FS.usesPositionalArg(); 8381 } 8382 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8383 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8384 startSpecifier, specifierLen); 8385 return false; 8386 } 8387 } 8388 8389 // Check if the field with is non-zero. 8390 const OptionalAmount &Amt = FS.getFieldWidth(); 8391 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8392 if (Amt.getConstantAmount() == 0) { 8393 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8394 Amt.getConstantLength()); 8395 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8396 getLocationOfByte(Amt.getStart()), 8397 /*IsStringLocation*/true, R, 8398 FixItHint::CreateRemoval(R)); 8399 } 8400 } 8401 8402 if (!FS.consumesDataArgument()) { 8403 // FIXME: Technically specifying a precision or field width here 8404 // makes no sense. Worth issuing a warning at some point. 8405 return true; 8406 } 8407 8408 // Consume the argument. 8409 unsigned argIndex = FS.getArgIndex(); 8410 if (argIndex < NumDataArgs) { 8411 // The check to see if the argIndex is valid will come later. 8412 // We set the bit here because we may exit early from this 8413 // function if we encounter some other error. 8414 CoveredArgs.set(argIndex); 8415 } 8416 8417 // Check the length modifier is valid with the given conversion specifier. 8418 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8419 S.getLangOpts())) 8420 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8421 diag::warn_format_nonsensical_length); 8422 else if (!FS.hasStandardLengthModifier()) 8423 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8424 else if (!FS.hasStandardLengthConversionCombination()) 8425 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8426 diag::warn_format_non_standard_conversion_spec); 8427 8428 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8429 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8430 8431 // The remaining checks depend on the data arguments. 8432 if (HasVAListArg) 8433 return true; 8434 8435 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8436 return false; 8437 8438 // Check that the argument type matches the format specifier. 8439 const Expr *Ex = getDataArg(argIndex); 8440 if (!Ex) 8441 return true; 8442 8443 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8444 8445 if (!AT.isValid()) { 8446 return true; 8447 } 8448 8449 analyze_format_string::ArgType::MatchKind Match = 8450 AT.matchesType(S.Context, Ex->getType()); 8451 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8452 if (Match == analyze_format_string::ArgType::Match) 8453 return true; 8454 8455 ScanfSpecifier fixedFS = FS; 8456 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8457 S.getLangOpts(), S.Context); 8458 8459 unsigned Diag = 8460 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8461 : diag::warn_format_conversion_argument_type_mismatch; 8462 8463 if (Success) { 8464 // Get the fix string from the fixed format specifier. 8465 SmallString<128> buf; 8466 llvm::raw_svector_ostream os(buf); 8467 fixedFS.toString(os); 8468 8469 EmitFormatDiagnostic( 8470 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8471 << Ex->getType() << false << Ex->getSourceRange(), 8472 Ex->getBeginLoc(), 8473 /*IsStringLocation*/ false, 8474 getSpecifierRange(startSpecifier, specifierLen), 8475 FixItHint::CreateReplacement( 8476 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8477 } else { 8478 EmitFormatDiagnostic(S.PDiag(Diag) 8479 << AT.getRepresentativeTypeName(S.Context) 8480 << Ex->getType() << false << Ex->getSourceRange(), 8481 Ex->getBeginLoc(), 8482 /*IsStringLocation*/ false, 8483 getSpecifierRange(startSpecifier, specifierLen)); 8484 } 8485 8486 return true; 8487 } 8488 8489 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8490 const Expr *OrigFormatExpr, 8491 ArrayRef<const Expr *> Args, 8492 bool HasVAListArg, unsigned format_idx, 8493 unsigned firstDataArg, 8494 Sema::FormatStringType Type, 8495 bool inFunctionCall, 8496 Sema::VariadicCallType CallType, 8497 llvm::SmallBitVector &CheckedVarArgs, 8498 UncoveredArgHandler &UncoveredArg) { 8499 // CHECK: is the format string a wide literal? 8500 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8501 CheckFormatHandler::EmitFormatDiagnostic( 8502 S, inFunctionCall, Args[format_idx], 8503 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8504 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8505 return; 8506 } 8507 8508 // Str - The format string. NOTE: this is NOT null-terminated! 8509 StringRef StrRef = FExpr->getString(); 8510 const char *Str = StrRef.data(); 8511 // Account for cases where the string literal is truncated in a declaration. 8512 const ConstantArrayType *T = 8513 S.Context.getAsConstantArrayType(FExpr->getType()); 8514 assert(T && "String literal not of constant array type!"); 8515 size_t TypeSize = T->getSize().getZExtValue(); 8516 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8517 const unsigned numDataArgs = Args.size() - firstDataArg; 8518 8519 // Emit a warning if the string literal is truncated and does not contain an 8520 // embedded null character. 8521 if (TypeSize <= StrRef.size() && 8522 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8523 CheckFormatHandler::EmitFormatDiagnostic( 8524 S, inFunctionCall, Args[format_idx], 8525 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8526 FExpr->getBeginLoc(), 8527 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8528 return; 8529 } 8530 8531 // CHECK: empty format string? 8532 if (StrLen == 0 && numDataArgs > 0) { 8533 CheckFormatHandler::EmitFormatDiagnostic( 8534 S, inFunctionCall, Args[format_idx], 8535 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8536 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8537 return; 8538 } 8539 8540 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8541 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8542 Type == Sema::FST_OSTrace) { 8543 CheckPrintfHandler H( 8544 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8545 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8546 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8547 CheckedVarArgs, UncoveredArg); 8548 8549 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8550 S.getLangOpts(), 8551 S.Context.getTargetInfo(), 8552 Type == Sema::FST_FreeBSDKPrintf)) 8553 H.DoneProcessing(); 8554 } else if (Type == Sema::FST_Scanf) { 8555 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8556 numDataArgs, Str, HasVAListArg, Args, format_idx, 8557 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8558 8559 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8560 S.getLangOpts(), 8561 S.Context.getTargetInfo())) 8562 H.DoneProcessing(); 8563 } // TODO: handle other formats 8564 } 8565 8566 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8567 // Str - The format string. NOTE: this is NOT null-terminated! 8568 StringRef StrRef = FExpr->getString(); 8569 const char *Str = StrRef.data(); 8570 // Account for cases where the string literal is truncated in a declaration. 8571 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8572 assert(T && "String literal not of constant array type!"); 8573 size_t TypeSize = T->getSize().getZExtValue(); 8574 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8575 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8576 getLangOpts(), 8577 Context.getTargetInfo()); 8578 } 8579 8580 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8581 8582 // Returns the related absolute value function that is larger, of 0 if one 8583 // does not exist. 8584 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8585 switch (AbsFunction) { 8586 default: 8587 return 0; 8588 8589 case Builtin::BI__builtin_abs: 8590 return Builtin::BI__builtin_labs; 8591 case Builtin::BI__builtin_labs: 8592 return Builtin::BI__builtin_llabs; 8593 case Builtin::BI__builtin_llabs: 8594 return 0; 8595 8596 case Builtin::BI__builtin_fabsf: 8597 return Builtin::BI__builtin_fabs; 8598 case Builtin::BI__builtin_fabs: 8599 return Builtin::BI__builtin_fabsl; 8600 case Builtin::BI__builtin_fabsl: 8601 return 0; 8602 8603 case Builtin::BI__builtin_cabsf: 8604 return Builtin::BI__builtin_cabs; 8605 case Builtin::BI__builtin_cabs: 8606 return Builtin::BI__builtin_cabsl; 8607 case Builtin::BI__builtin_cabsl: 8608 return 0; 8609 8610 case Builtin::BIabs: 8611 return Builtin::BIlabs; 8612 case Builtin::BIlabs: 8613 return Builtin::BIllabs; 8614 case Builtin::BIllabs: 8615 return 0; 8616 8617 case Builtin::BIfabsf: 8618 return Builtin::BIfabs; 8619 case Builtin::BIfabs: 8620 return Builtin::BIfabsl; 8621 case Builtin::BIfabsl: 8622 return 0; 8623 8624 case Builtin::BIcabsf: 8625 return Builtin::BIcabs; 8626 case Builtin::BIcabs: 8627 return Builtin::BIcabsl; 8628 case Builtin::BIcabsl: 8629 return 0; 8630 } 8631 } 8632 8633 // Returns the argument type of the absolute value function. 8634 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8635 unsigned AbsType) { 8636 if (AbsType == 0) 8637 return QualType(); 8638 8639 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8640 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8641 if (Error != ASTContext::GE_None) 8642 return QualType(); 8643 8644 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8645 if (!FT) 8646 return QualType(); 8647 8648 if (FT->getNumParams() != 1) 8649 return QualType(); 8650 8651 return FT->getParamType(0); 8652 } 8653 8654 // Returns the best absolute value function, or zero, based on type and 8655 // current absolute value function. 8656 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8657 unsigned AbsFunctionKind) { 8658 unsigned BestKind = 0; 8659 uint64_t ArgSize = Context.getTypeSize(ArgType); 8660 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8661 Kind = getLargerAbsoluteValueFunction(Kind)) { 8662 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8663 if (Context.getTypeSize(ParamType) >= ArgSize) { 8664 if (BestKind == 0) 8665 BestKind = Kind; 8666 else if (Context.hasSameType(ParamType, ArgType)) { 8667 BestKind = Kind; 8668 break; 8669 } 8670 } 8671 } 8672 return BestKind; 8673 } 8674 8675 enum AbsoluteValueKind { 8676 AVK_Integer, 8677 AVK_Floating, 8678 AVK_Complex 8679 }; 8680 8681 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8682 if (T->isIntegralOrEnumerationType()) 8683 return AVK_Integer; 8684 if (T->isRealFloatingType()) 8685 return AVK_Floating; 8686 if (T->isAnyComplexType()) 8687 return AVK_Complex; 8688 8689 llvm_unreachable("Type not integer, floating, or complex"); 8690 } 8691 8692 // Changes the absolute value function to a different type. Preserves whether 8693 // the function is a builtin. 8694 static unsigned changeAbsFunction(unsigned AbsKind, 8695 AbsoluteValueKind ValueKind) { 8696 switch (ValueKind) { 8697 case AVK_Integer: 8698 switch (AbsKind) { 8699 default: 8700 return 0; 8701 case Builtin::BI__builtin_fabsf: 8702 case Builtin::BI__builtin_fabs: 8703 case Builtin::BI__builtin_fabsl: 8704 case Builtin::BI__builtin_cabsf: 8705 case Builtin::BI__builtin_cabs: 8706 case Builtin::BI__builtin_cabsl: 8707 return Builtin::BI__builtin_abs; 8708 case Builtin::BIfabsf: 8709 case Builtin::BIfabs: 8710 case Builtin::BIfabsl: 8711 case Builtin::BIcabsf: 8712 case Builtin::BIcabs: 8713 case Builtin::BIcabsl: 8714 return Builtin::BIabs; 8715 } 8716 case AVK_Floating: 8717 switch (AbsKind) { 8718 default: 8719 return 0; 8720 case Builtin::BI__builtin_abs: 8721 case Builtin::BI__builtin_labs: 8722 case Builtin::BI__builtin_llabs: 8723 case Builtin::BI__builtin_cabsf: 8724 case Builtin::BI__builtin_cabs: 8725 case Builtin::BI__builtin_cabsl: 8726 return Builtin::BI__builtin_fabsf; 8727 case Builtin::BIabs: 8728 case Builtin::BIlabs: 8729 case Builtin::BIllabs: 8730 case Builtin::BIcabsf: 8731 case Builtin::BIcabs: 8732 case Builtin::BIcabsl: 8733 return Builtin::BIfabsf; 8734 } 8735 case AVK_Complex: 8736 switch (AbsKind) { 8737 default: 8738 return 0; 8739 case Builtin::BI__builtin_abs: 8740 case Builtin::BI__builtin_labs: 8741 case Builtin::BI__builtin_llabs: 8742 case Builtin::BI__builtin_fabsf: 8743 case Builtin::BI__builtin_fabs: 8744 case Builtin::BI__builtin_fabsl: 8745 return Builtin::BI__builtin_cabsf; 8746 case Builtin::BIabs: 8747 case Builtin::BIlabs: 8748 case Builtin::BIllabs: 8749 case Builtin::BIfabsf: 8750 case Builtin::BIfabs: 8751 case Builtin::BIfabsl: 8752 return Builtin::BIcabsf; 8753 } 8754 } 8755 llvm_unreachable("Unable to convert function"); 8756 } 8757 8758 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 8759 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 8760 if (!FnInfo) 8761 return 0; 8762 8763 switch (FDecl->getBuiltinID()) { 8764 default: 8765 return 0; 8766 case Builtin::BI__builtin_abs: 8767 case Builtin::BI__builtin_fabs: 8768 case Builtin::BI__builtin_fabsf: 8769 case Builtin::BI__builtin_fabsl: 8770 case Builtin::BI__builtin_labs: 8771 case Builtin::BI__builtin_llabs: 8772 case Builtin::BI__builtin_cabs: 8773 case Builtin::BI__builtin_cabsf: 8774 case Builtin::BI__builtin_cabsl: 8775 case Builtin::BIabs: 8776 case Builtin::BIlabs: 8777 case Builtin::BIllabs: 8778 case Builtin::BIfabs: 8779 case Builtin::BIfabsf: 8780 case Builtin::BIfabsl: 8781 case Builtin::BIcabs: 8782 case Builtin::BIcabsf: 8783 case Builtin::BIcabsl: 8784 return FDecl->getBuiltinID(); 8785 } 8786 llvm_unreachable("Unknown Builtin type"); 8787 } 8788 8789 // If the replacement is valid, emit a note with replacement function. 8790 // Additionally, suggest including the proper header if not already included. 8791 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 8792 unsigned AbsKind, QualType ArgType) { 8793 bool EmitHeaderHint = true; 8794 const char *HeaderName = nullptr; 8795 const char *FunctionName = nullptr; 8796 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 8797 FunctionName = "std::abs"; 8798 if (ArgType->isIntegralOrEnumerationType()) { 8799 HeaderName = "cstdlib"; 8800 } else if (ArgType->isRealFloatingType()) { 8801 HeaderName = "cmath"; 8802 } else { 8803 llvm_unreachable("Invalid Type"); 8804 } 8805 8806 // Lookup all std::abs 8807 if (NamespaceDecl *Std = S.getStdNamespace()) { 8808 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 8809 R.suppressDiagnostics(); 8810 S.LookupQualifiedName(R, Std); 8811 8812 for (const auto *I : R) { 8813 const FunctionDecl *FDecl = nullptr; 8814 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 8815 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 8816 } else { 8817 FDecl = dyn_cast<FunctionDecl>(I); 8818 } 8819 if (!FDecl) 8820 continue; 8821 8822 // Found std::abs(), check that they are the right ones. 8823 if (FDecl->getNumParams() != 1) 8824 continue; 8825 8826 // Check that the parameter type can handle the argument. 8827 QualType ParamType = FDecl->getParamDecl(0)->getType(); 8828 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 8829 S.Context.getTypeSize(ArgType) <= 8830 S.Context.getTypeSize(ParamType)) { 8831 // Found a function, don't need the header hint. 8832 EmitHeaderHint = false; 8833 break; 8834 } 8835 } 8836 } 8837 } else { 8838 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 8839 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 8840 8841 if (HeaderName) { 8842 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 8843 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 8844 R.suppressDiagnostics(); 8845 S.LookupName(R, S.getCurScope()); 8846 8847 if (R.isSingleResult()) { 8848 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 8849 if (FD && FD->getBuiltinID() == AbsKind) { 8850 EmitHeaderHint = false; 8851 } else { 8852 return; 8853 } 8854 } else if (!R.empty()) { 8855 return; 8856 } 8857 } 8858 } 8859 8860 S.Diag(Loc, diag::note_replace_abs_function) 8861 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 8862 8863 if (!HeaderName) 8864 return; 8865 8866 if (!EmitHeaderHint) 8867 return; 8868 8869 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 8870 << FunctionName; 8871 } 8872 8873 template <std::size_t StrLen> 8874 static bool IsStdFunction(const FunctionDecl *FDecl, 8875 const char (&Str)[StrLen]) { 8876 if (!FDecl) 8877 return false; 8878 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 8879 return false; 8880 if (!FDecl->isInStdNamespace()) 8881 return false; 8882 8883 return true; 8884 } 8885 8886 // Warn when using the wrong abs() function. 8887 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 8888 const FunctionDecl *FDecl) { 8889 if (Call->getNumArgs() != 1) 8890 return; 8891 8892 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 8893 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 8894 if (AbsKind == 0 && !IsStdAbs) 8895 return; 8896 8897 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 8898 QualType ParamType = Call->getArg(0)->getType(); 8899 8900 // Unsigned types cannot be negative. Suggest removing the absolute value 8901 // function call. 8902 if (ArgType->isUnsignedIntegerType()) { 8903 const char *FunctionName = 8904 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 8905 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 8906 Diag(Call->getExprLoc(), diag::note_remove_abs) 8907 << FunctionName 8908 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 8909 return; 8910 } 8911 8912 // Taking the absolute value of a pointer is very suspicious, they probably 8913 // wanted to index into an array, dereference a pointer, call a function, etc. 8914 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 8915 unsigned DiagType = 0; 8916 if (ArgType->isFunctionType()) 8917 DiagType = 1; 8918 else if (ArgType->isArrayType()) 8919 DiagType = 2; 8920 8921 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 8922 return; 8923 } 8924 8925 // std::abs has overloads which prevent most of the absolute value problems 8926 // from occurring. 8927 if (IsStdAbs) 8928 return; 8929 8930 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 8931 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 8932 8933 // The argument and parameter are the same kind. Check if they are the right 8934 // size. 8935 if (ArgValueKind == ParamValueKind) { 8936 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 8937 return; 8938 8939 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 8940 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 8941 << FDecl << ArgType << ParamType; 8942 8943 if (NewAbsKind == 0) 8944 return; 8945 8946 emitReplacement(*this, Call->getExprLoc(), 8947 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8948 return; 8949 } 8950 8951 // ArgValueKind != ParamValueKind 8952 // The wrong type of absolute value function was used. Attempt to find the 8953 // proper one. 8954 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 8955 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 8956 if (NewAbsKind == 0) 8957 return; 8958 8959 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 8960 << FDecl << ParamValueKind << ArgValueKind; 8961 8962 emitReplacement(*this, Call->getExprLoc(), 8963 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 8964 } 8965 8966 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 8967 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 8968 const FunctionDecl *FDecl) { 8969 if (!Call || !FDecl) return; 8970 8971 // Ignore template specializations and macros. 8972 if (inTemplateInstantiation()) return; 8973 if (Call->getExprLoc().isMacroID()) return; 8974 8975 // Only care about the one template argument, two function parameter std::max 8976 if (Call->getNumArgs() != 2) return; 8977 if (!IsStdFunction(FDecl, "max")) return; 8978 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 8979 if (!ArgList) return; 8980 if (ArgList->size() != 1) return; 8981 8982 // Check that template type argument is unsigned integer. 8983 const auto& TA = ArgList->get(0); 8984 if (TA.getKind() != TemplateArgument::Type) return; 8985 QualType ArgType = TA.getAsType(); 8986 if (!ArgType->isUnsignedIntegerType()) return; 8987 8988 // See if either argument is a literal zero. 8989 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 8990 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 8991 if (!MTE) return false; 8992 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr()); 8993 if (!Num) return false; 8994 if (Num->getValue() != 0) return false; 8995 return true; 8996 }; 8997 8998 const Expr *FirstArg = Call->getArg(0); 8999 const Expr *SecondArg = Call->getArg(1); 9000 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9001 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9002 9003 // Only warn when exactly one argument is zero. 9004 if (IsFirstArgZero == IsSecondArgZero) return; 9005 9006 SourceRange FirstRange = FirstArg->getSourceRange(); 9007 SourceRange SecondRange = SecondArg->getSourceRange(); 9008 9009 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9010 9011 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9012 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9013 9014 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9015 SourceRange RemovalRange; 9016 if (IsFirstArgZero) { 9017 RemovalRange = SourceRange(FirstRange.getBegin(), 9018 SecondRange.getBegin().getLocWithOffset(-1)); 9019 } else { 9020 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9021 SecondRange.getEnd()); 9022 } 9023 9024 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9025 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9026 << FixItHint::CreateRemoval(RemovalRange); 9027 } 9028 9029 //===--- CHECK: Standard memory functions ---------------------------------===// 9030 9031 /// Takes the expression passed to the size_t parameter of functions 9032 /// such as memcmp, strncat, etc and warns if it's a comparison. 9033 /// 9034 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9035 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9036 IdentifierInfo *FnName, 9037 SourceLocation FnLoc, 9038 SourceLocation RParenLoc) { 9039 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9040 if (!Size) 9041 return false; 9042 9043 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9044 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9045 return false; 9046 9047 SourceRange SizeRange = Size->getSourceRange(); 9048 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9049 << SizeRange << FnName; 9050 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9051 << FnName 9052 << FixItHint::CreateInsertion( 9053 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9054 << FixItHint::CreateRemoval(RParenLoc); 9055 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9056 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9057 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9058 ")"); 9059 9060 return true; 9061 } 9062 9063 /// Determine whether the given type is or contains a dynamic class type 9064 /// (e.g., whether it has a vtable). 9065 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9066 bool &IsContained) { 9067 // Look through array types while ignoring qualifiers. 9068 const Type *Ty = T->getBaseElementTypeUnsafe(); 9069 IsContained = false; 9070 9071 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9072 RD = RD ? RD->getDefinition() : nullptr; 9073 if (!RD || RD->isInvalidDecl()) 9074 return nullptr; 9075 9076 if (RD->isDynamicClass()) 9077 return RD; 9078 9079 // Check all the fields. If any bases were dynamic, the class is dynamic. 9080 // It's impossible for a class to transitively contain itself by value, so 9081 // infinite recursion is impossible. 9082 for (auto *FD : RD->fields()) { 9083 bool SubContained; 9084 if (const CXXRecordDecl *ContainedRD = 9085 getContainedDynamicClass(FD->getType(), SubContained)) { 9086 IsContained = true; 9087 return ContainedRD; 9088 } 9089 } 9090 9091 return nullptr; 9092 } 9093 9094 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9095 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9096 if (Unary->getKind() == UETT_SizeOf) 9097 return Unary; 9098 return nullptr; 9099 } 9100 9101 /// If E is a sizeof expression, returns its argument expression, 9102 /// otherwise returns NULL. 9103 static const Expr *getSizeOfExprArg(const Expr *E) { 9104 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9105 if (!SizeOf->isArgumentType()) 9106 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9107 return nullptr; 9108 } 9109 9110 /// If E is a sizeof expression, returns its argument type. 9111 static QualType getSizeOfArgType(const Expr *E) { 9112 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9113 return SizeOf->getTypeOfArgument(); 9114 return QualType(); 9115 } 9116 9117 namespace { 9118 9119 struct SearchNonTrivialToInitializeField 9120 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9121 using Super = 9122 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9123 9124 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9125 9126 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9127 SourceLocation SL) { 9128 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9129 asDerived().visitArray(PDIK, AT, SL); 9130 return; 9131 } 9132 9133 Super::visitWithKind(PDIK, FT, SL); 9134 } 9135 9136 void visitARCStrong(QualType FT, SourceLocation SL) { 9137 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9138 } 9139 void visitARCWeak(QualType FT, SourceLocation SL) { 9140 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9141 } 9142 void visitStruct(QualType FT, SourceLocation SL) { 9143 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9144 visit(FD->getType(), FD->getLocation()); 9145 } 9146 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9147 const ArrayType *AT, SourceLocation SL) { 9148 visit(getContext().getBaseElementType(AT), SL); 9149 } 9150 void visitTrivial(QualType FT, SourceLocation SL) {} 9151 9152 static void diag(QualType RT, const Expr *E, Sema &S) { 9153 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9154 } 9155 9156 ASTContext &getContext() { return S.getASTContext(); } 9157 9158 const Expr *E; 9159 Sema &S; 9160 }; 9161 9162 struct SearchNonTrivialToCopyField 9163 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9164 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9165 9166 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9167 9168 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9169 SourceLocation SL) { 9170 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9171 asDerived().visitArray(PCK, AT, SL); 9172 return; 9173 } 9174 9175 Super::visitWithKind(PCK, FT, SL); 9176 } 9177 9178 void visitARCStrong(QualType FT, SourceLocation SL) { 9179 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9180 } 9181 void visitARCWeak(QualType FT, SourceLocation SL) { 9182 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9183 } 9184 void visitStruct(QualType FT, SourceLocation SL) { 9185 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9186 visit(FD->getType(), FD->getLocation()); 9187 } 9188 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9189 SourceLocation SL) { 9190 visit(getContext().getBaseElementType(AT), SL); 9191 } 9192 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9193 SourceLocation SL) {} 9194 void visitTrivial(QualType FT, SourceLocation SL) {} 9195 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9196 9197 static void diag(QualType RT, const Expr *E, Sema &S) { 9198 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9199 } 9200 9201 ASTContext &getContext() { return S.getASTContext(); } 9202 9203 const Expr *E; 9204 Sema &S; 9205 }; 9206 9207 } 9208 9209 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9210 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9211 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9212 9213 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9214 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9215 return false; 9216 9217 return doesExprLikelyComputeSize(BO->getLHS()) || 9218 doesExprLikelyComputeSize(BO->getRHS()); 9219 } 9220 9221 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9222 } 9223 9224 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9225 /// 9226 /// \code 9227 /// #define MACRO 0 9228 /// foo(MACRO); 9229 /// foo(0); 9230 /// \endcode 9231 /// 9232 /// This should return true for the first call to foo, but not for the second 9233 /// (regardless of whether foo is a macro or function). 9234 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9235 SourceLocation CallLoc, 9236 SourceLocation ArgLoc) { 9237 if (!CallLoc.isMacroID()) 9238 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9239 9240 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9241 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9242 } 9243 9244 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9245 /// last two arguments transposed. 9246 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9247 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9248 return; 9249 9250 const Expr *SizeArg = 9251 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9252 9253 auto isLiteralZero = [](const Expr *E) { 9254 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9255 }; 9256 9257 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9258 SourceLocation CallLoc = Call->getRParenLoc(); 9259 SourceManager &SM = S.getSourceManager(); 9260 if (isLiteralZero(SizeArg) && 9261 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9262 9263 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9264 9265 // Some platforms #define bzero to __builtin_memset. See if this is the 9266 // case, and if so, emit a better diagnostic. 9267 if (BId == Builtin::BIbzero || 9268 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9269 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9270 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9271 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9272 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9273 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9274 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9275 } 9276 return; 9277 } 9278 9279 // If the second argument to a memset is a sizeof expression and the third 9280 // isn't, this is also likely an error. This should catch 9281 // 'memset(buf, sizeof(buf), 0xff)'. 9282 if (BId == Builtin::BImemset && 9283 doesExprLikelyComputeSize(Call->getArg(1)) && 9284 !doesExprLikelyComputeSize(Call->getArg(2))) { 9285 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9286 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9287 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9288 return; 9289 } 9290 } 9291 9292 /// Check for dangerous or invalid arguments to memset(). 9293 /// 9294 /// This issues warnings on known problematic, dangerous or unspecified 9295 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9296 /// function calls. 9297 /// 9298 /// \param Call The call expression to diagnose. 9299 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9300 unsigned BId, 9301 IdentifierInfo *FnName) { 9302 assert(BId != 0); 9303 9304 // It is possible to have a non-standard definition of memset. Validate 9305 // we have enough arguments, and if not, abort further checking. 9306 unsigned ExpectedNumArgs = 9307 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9308 if (Call->getNumArgs() < ExpectedNumArgs) 9309 return; 9310 9311 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9312 BId == Builtin::BIstrndup ? 1 : 2); 9313 unsigned LenArg = 9314 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9315 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9316 9317 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9318 Call->getBeginLoc(), Call->getRParenLoc())) 9319 return; 9320 9321 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9322 CheckMemaccessSize(*this, BId, Call); 9323 9324 // We have special checking when the length is a sizeof expression. 9325 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9326 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9327 llvm::FoldingSetNodeID SizeOfArgID; 9328 9329 // Although widely used, 'bzero' is not a standard function. Be more strict 9330 // with the argument types before allowing diagnostics and only allow the 9331 // form bzero(ptr, sizeof(...)). 9332 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9333 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9334 return; 9335 9336 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9337 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9338 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9339 9340 QualType DestTy = Dest->getType(); 9341 QualType PointeeTy; 9342 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9343 PointeeTy = DestPtrTy->getPointeeType(); 9344 9345 // Never warn about void type pointers. This can be used to suppress 9346 // false positives. 9347 if (PointeeTy->isVoidType()) 9348 continue; 9349 9350 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9351 // actually comparing the expressions for equality. Because computing the 9352 // expression IDs can be expensive, we only do this if the diagnostic is 9353 // enabled. 9354 if (SizeOfArg && 9355 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9356 SizeOfArg->getExprLoc())) { 9357 // We only compute IDs for expressions if the warning is enabled, and 9358 // cache the sizeof arg's ID. 9359 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9360 SizeOfArg->Profile(SizeOfArgID, Context, true); 9361 llvm::FoldingSetNodeID DestID; 9362 Dest->Profile(DestID, Context, true); 9363 if (DestID == SizeOfArgID) { 9364 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9365 // over sizeof(src) as well. 9366 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9367 StringRef ReadableName = FnName->getName(); 9368 9369 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9370 if (UnaryOp->getOpcode() == UO_AddrOf) 9371 ActionIdx = 1; // If its an address-of operator, just remove it. 9372 if (!PointeeTy->isIncompleteType() && 9373 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9374 ActionIdx = 2; // If the pointee's size is sizeof(char), 9375 // suggest an explicit length. 9376 9377 // If the function is defined as a builtin macro, do not show macro 9378 // expansion. 9379 SourceLocation SL = SizeOfArg->getExprLoc(); 9380 SourceRange DSR = Dest->getSourceRange(); 9381 SourceRange SSR = SizeOfArg->getSourceRange(); 9382 SourceManager &SM = getSourceManager(); 9383 9384 if (SM.isMacroArgExpansion(SL)) { 9385 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9386 SL = SM.getSpellingLoc(SL); 9387 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9388 SM.getSpellingLoc(DSR.getEnd())); 9389 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9390 SM.getSpellingLoc(SSR.getEnd())); 9391 } 9392 9393 DiagRuntimeBehavior(SL, SizeOfArg, 9394 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9395 << ReadableName 9396 << PointeeTy 9397 << DestTy 9398 << DSR 9399 << SSR); 9400 DiagRuntimeBehavior(SL, SizeOfArg, 9401 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9402 << ActionIdx 9403 << SSR); 9404 9405 break; 9406 } 9407 } 9408 9409 // Also check for cases where the sizeof argument is the exact same 9410 // type as the memory argument, and where it points to a user-defined 9411 // record type. 9412 if (SizeOfArgTy != QualType()) { 9413 if (PointeeTy->isRecordType() && 9414 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9415 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9416 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9417 << FnName << SizeOfArgTy << ArgIdx 9418 << PointeeTy << Dest->getSourceRange() 9419 << LenExpr->getSourceRange()); 9420 break; 9421 } 9422 } 9423 } else if (DestTy->isArrayType()) { 9424 PointeeTy = DestTy; 9425 } 9426 9427 if (PointeeTy == QualType()) 9428 continue; 9429 9430 // Always complain about dynamic classes. 9431 bool IsContained; 9432 if (const CXXRecordDecl *ContainedRD = 9433 getContainedDynamicClass(PointeeTy, IsContained)) { 9434 9435 unsigned OperationType = 0; 9436 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9437 // "overwritten" if we're warning about the destination for any call 9438 // but memcmp; otherwise a verb appropriate to the call. 9439 if (ArgIdx != 0 || IsCmp) { 9440 if (BId == Builtin::BImemcpy) 9441 OperationType = 1; 9442 else if(BId == Builtin::BImemmove) 9443 OperationType = 2; 9444 else if (IsCmp) 9445 OperationType = 3; 9446 } 9447 9448 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9449 PDiag(diag::warn_dyn_class_memaccess) 9450 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9451 << IsContained << ContainedRD << OperationType 9452 << Call->getCallee()->getSourceRange()); 9453 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9454 BId != Builtin::BImemset) 9455 DiagRuntimeBehavior( 9456 Dest->getExprLoc(), Dest, 9457 PDiag(diag::warn_arc_object_memaccess) 9458 << ArgIdx << FnName << PointeeTy 9459 << Call->getCallee()->getSourceRange()); 9460 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9461 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9462 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9463 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9464 PDiag(diag::warn_cstruct_memaccess) 9465 << ArgIdx << FnName << PointeeTy << 0); 9466 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9467 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9468 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9469 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9470 PDiag(diag::warn_cstruct_memaccess) 9471 << ArgIdx << FnName << PointeeTy << 1); 9472 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9473 } else { 9474 continue; 9475 } 9476 } else 9477 continue; 9478 9479 DiagRuntimeBehavior( 9480 Dest->getExprLoc(), Dest, 9481 PDiag(diag::note_bad_memaccess_silence) 9482 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9483 break; 9484 } 9485 } 9486 9487 // A little helper routine: ignore addition and subtraction of integer literals. 9488 // This intentionally does not ignore all integer constant expressions because 9489 // we don't want to remove sizeof(). 9490 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9491 Ex = Ex->IgnoreParenCasts(); 9492 9493 while (true) { 9494 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9495 if (!BO || !BO->isAdditiveOp()) 9496 break; 9497 9498 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9499 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9500 9501 if (isa<IntegerLiteral>(RHS)) 9502 Ex = LHS; 9503 else if (isa<IntegerLiteral>(LHS)) 9504 Ex = RHS; 9505 else 9506 break; 9507 } 9508 9509 return Ex; 9510 } 9511 9512 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9513 ASTContext &Context) { 9514 // Only handle constant-sized or VLAs, but not flexible members. 9515 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9516 // Only issue the FIXIT for arrays of size > 1. 9517 if (CAT->getSize().getSExtValue() <= 1) 9518 return false; 9519 } else if (!Ty->isVariableArrayType()) { 9520 return false; 9521 } 9522 return true; 9523 } 9524 9525 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9526 // be the size of the source, instead of the destination. 9527 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9528 IdentifierInfo *FnName) { 9529 9530 // Don't crash if the user has the wrong number of arguments 9531 unsigned NumArgs = Call->getNumArgs(); 9532 if ((NumArgs != 3) && (NumArgs != 4)) 9533 return; 9534 9535 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9536 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9537 const Expr *CompareWithSrc = nullptr; 9538 9539 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9540 Call->getBeginLoc(), Call->getRParenLoc())) 9541 return; 9542 9543 // Look for 'strlcpy(dst, x, sizeof(x))' 9544 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9545 CompareWithSrc = Ex; 9546 else { 9547 // Look for 'strlcpy(dst, x, strlen(x))' 9548 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9549 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9550 SizeCall->getNumArgs() == 1) 9551 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9552 } 9553 } 9554 9555 if (!CompareWithSrc) 9556 return; 9557 9558 // Determine if the argument to sizeof/strlen is equal to the source 9559 // argument. In principle there's all kinds of things you could do 9560 // here, for instance creating an == expression and evaluating it with 9561 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9562 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9563 if (!SrcArgDRE) 9564 return; 9565 9566 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9567 if (!CompareWithSrcDRE || 9568 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9569 return; 9570 9571 const Expr *OriginalSizeArg = Call->getArg(2); 9572 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9573 << OriginalSizeArg->getSourceRange() << FnName; 9574 9575 // Output a FIXIT hint if the destination is an array (rather than a 9576 // pointer to an array). This could be enhanced to handle some 9577 // pointers if we know the actual size, like if DstArg is 'array+2' 9578 // we could say 'sizeof(array)-2'. 9579 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9580 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9581 return; 9582 9583 SmallString<128> sizeString; 9584 llvm::raw_svector_ostream OS(sizeString); 9585 OS << "sizeof("; 9586 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9587 OS << ")"; 9588 9589 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9590 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9591 OS.str()); 9592 } 9593 9594 /// Check if two expressions refer to the same declaration. 9595 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9596 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9597 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9598 return D1->getDecl() == D2->getDecl(); 9599 return false; 9600 } 9601 9602 static const Expr *getStrlenExprArg(const Expr *E) { 9603 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9604 const FunctionDecl *FD = CE->getDirectCallee(); 9605 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9606 return nullptr; 9607 return CE->getArg(0)->IgnoreParenCasts(); 9608 } 9609 return nullptr; 9610 } 9611 9612 // Warn on anti-patterns as the 'size' argument to strncat. 9613 // The correct size argument should look like following: 9614 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9615 void Sema::CheckStrncatArguments(const CallExpr *CE, 9616 IdentifierInfo *FnName) { 9617 // Don't crash if the user has the wrong number of arguments. 9618 if (CE->getNumArgs() < 3) 9619 return; 9620 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9621 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9622 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9623 9624 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9625 CE->getRParenLoc())) 9626 return; 9627 9628 // Identify common expressions, which are wrongly used as the size argument 9629 // to strncat and may lead to buffer overflows. 9630 unsigned PatternType = 0; 9631 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9632 // - sizeof(dst) 9633 if (referToTheSameDecl(SizeOfArg, DstArg)) 9634 PatternType = 1; 9635 // - sizeof(src) 9636 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9637 PatternType = 2; 9638 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9639 if (BE->getOpcode() == BO_Sub) { 9640 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9641 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9642 // - sizeof(dst) - strlen(dst) 9643 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9644 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9645 PatternType = 1; 9646 // - sizeof(src) - (anything) 9647 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9648 PatternType = 2; 9649 } 9650 } 9651 9652 if (PatternType == 0) 9653 return; 9654 9655 // Generate the diagnostic. 9656 SourceLocation SL = LenArg->getBeginLoc(); 9657 SourceRange SR = LenArg->getSourceRange(); 9658 SourceManager &SM = getSourceManager(); 9659 9660 // If the function is defined as a builtin macro, do not show macro expansion. 9661 if (SM.isMacroArgExpansion(SL)) { 9662 SL = SM.getSpellingLoc(SL); 9663 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9664 SM.getSpellingLoc(SR.getEnd())); 9665 } 9666 9667 // Check if the destination is an array (rather than a pointer to an array). 9668 QualType DstTy = DstArg->getType(); 9669 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9670 Context); 9671 if (!isKnownSizeArray) { 9672 if (PatternType == 1) 9673 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9674 else 9675 Diag(SL, diag::warn_strncat_src_size) << SR; 9676 return; 9677 } 9678 9679 if (PatternType == 1) 9680 Diag(SL, diag::warn_strncat_large_size) << SR; 9681 else 9682 Diag(SL, diag::warn_strncat_src_size) << SR; 9683 9684 SmallString<128> sizeString; 9685 llvm::raw_svector_ostream OS(sizeString); 9686 OS << "sizeof("; 9687 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9688 OS << ") - "; 9689 OS << "strlen("; 9690 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9691 OS << ") - 1"; 9692 9693 Diag(SL, diag::note_strncat_wrong_size) 9694 << FixItHint::CreateReplacement(SR, OS.str()); 9695 } 9696 9697 void 9698 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9699 SourceLocation ReturnLoc, 9700 bool isObjCMethod, 9701 const AttrVec *Attrs, 9702 const FunctionDecl *FD) { 9703 // Check if the return value is null but should not be. 9704 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9705 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9706 CheckNonNullExpr(*this, RetValExp)) 9707 Diag(ReturnLoc, diag::warn_null_ret) 9708 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9709 9710 // C++11 [basic.stc.dynamic.allocation]p4: 9711 // If an allocation function declared with a non-throwing 9712 // exception-specification fails to allocate storage, it shall return 9713 // a null pointer. Any other allocation function that fails to allocate 9714 // storage shall indicate failure only by throwing an exception [...] 9715 if (FD) { 9716 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9717 if (Op == OO_New || Op == OO_Array_New) { 9718 const FunctionProtoType *Proto 9719 = FD->getType()->castAs<FunctionProtoType>(); 9720 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9721 CheckNonNullExpr(*this, RetValExp)) 9722 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 9723 << FD << getLangOpts().CPlusPlus11; 9724 } 9725 } 9726 } 9727 9728 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 9729 9730 /// Check for comparisons of floating point operands using != and ==. 9731 /// Issue a warning if these are no self-comparisons, as they are not likely 9732 /// to do what the programmer intended. 9733 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 9734 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 9735 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 9736 9737 // Special case: check for x == x (which is OK). 9738 // Do not emit warnings for such cases. 9739 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 9740 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 9741 if (DRL->getDecl() == DRR->getDecl()) 9742 return; 9743 9744 // Special case: check for comparisons against literals that can be exactly 9745 // represented by APFloat. In such cases, do not emit a warning. This 9746 // is a heuristic: often comparison against such literals are used to 9747 // detect if a value in a variable has not changed. This clearly can 9748 // lead to false negatives. 9749 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 9750 if (FLL->isExact()) 9751 return; 9752 } else 9753 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 9754 if (FLR->isExact()) 9755 return; 9756 9757 // Check for comparisons with builtin types. 9758 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 9759 if (CL->getBuiltinCallee()) 9760 return; 9761 9762 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 9763 if (CR->getBuiltinCallee()) 9764 return; 9765 9766 // Emit the diagnostic. 9767 Diag(Loc, diag::warn_floatingpoint_eq) 9768 << LHS->getSourceRange() << RHS->getSourceRange(); 9769 } 9770 9771 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 9772 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 9773 9774 namespace { 9775 9776 /// Structure recording the 'active' range of an integer-valued 9777 /// expression. 9778 struct IntRange { 9779 /// The number of bits active in the int. 9780 unsigned Width; 9781 9782 /// True if the int is known not to have negative values. 9783 bool NonNegative; 9784 9785 IntRange(unsigned Width, bool NonNegative) 9786 : Width(Width), NonNegative(NonNegative) {} 9787 9788 /// Returns the range of the bool type. 9789 static IntRange forBoolType() { 9790 return IntRange(1, true); 9791 } 9792 9793 /// Returns the range of an opaque value of the given integral type. 9794 static IntRange forValueOfType(ASTContext &C, QualType T) { 9795 return forValueOfCanonicalType(C, 9796 T->getCanonicalTypeInternal().getTypePtr()); 9797 } 9798 9799 /// Returns the range of an opaque value of a canonical integral type. 9800 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 9801 assert(T->isCanonicalUnqualified()); 9802 9803 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9804 T = VT->getElementType().getTypePtr(); 9805 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9806 T = CT->getElementType().getTypePtr(); 9807 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9808 T = AT->getValueType().getTypePtr(); 9809 9810 if (!C.getLangOpts().CPlusPlus) { 9811 // For enum types in C code, use the underlying datatype. 9812 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9813 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 9814 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 9815 // For enum types in C++, use the known bit width of the enumerators. 9816 EnumDecl *Enum = ET->getDecl(); 9817 // In C++11, enums can have a fixed underlying type. Use this type to 9818 // compute the range. 9819 if (Enum->isFixed()) { 9820 return IntRange(C.getIntWidth(QualType(T, 0)), 9821 !ET->isSignedIntegerOrEnumerationType()); 9822 } 9823 9824 unsigned NumPositive = Enum->getNumPositiveBits(); 9825 unsigned NumNegative = Enum->getNumNegativeBits(); 9826 9827 if (NumNegative == 0) 9828 return IntRange(NumPositive, true/*NonNegative*/); 9829 else 9830 return IntRange(std::max(NumPositive + 1, NumNegative), 9831 false/*NonNegative*/); 9832 } 9833 9834 const BuiltinType *BT = cast<BuiltinType>(T); 9835 assert(BT->isInteger()); 9836 9837 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9838 } 9839 9840 /// Returns the "target" range of a canonical integral type, i.e. 9841 /// the range of values expressible in the type. 9842 /// 9843 /// This matches forValueOfCanonicalType except that enums have the 9844 /// full range of their type, not the range of their enumerators. 9845 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 9846 assert(T->isCanonicalUnqualified()); 9847 9848 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9849 T = VT->getElementType().getTypePtr(); 9850 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9851 T = CT->getElementType().getTypePtr(); 9852 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9853 T = AT->getValueType().getTypePtr(); 9854 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9855 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 9856 9857 const BuiltinType *BT = cast<BuiltinType>(T); 9858 assert(BT->isInteger()); 9859 9860 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9861 } 9862 9863 /// Returns the supremum of two ranges: i.e. their conservative merge. 9864 static IntRange join(IntRange L, IntRange R) { 9865 return IntRange(std::max(L.Width, R.Width), 9866 L.NonNegative && R.NonNegative); 9867 } 9868 9869 /// Returns the infinum of two ranges: i.e. their aggressive merge. 9870 static IntRange meet(IntRange L, IntRange R) { 9871 return IntRange(std::min(L.Width, R.Width), 9872 L.NonNegative || R.NonNegative); 9873 } 9874 }; 9875 9876 } // namespace 9877 9878 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 9879 unsigned MaxWidth) { 9880 if (value.isSigned() && value.isNegative()) 9881 return IntRange(value.getMinSignedBits(), false); 9882 9883 if (value.getBitWidth() > MaxWidth) 9884 value = value.trunc(MaxWidth); 9885 9886 // isNonNegative() just checks the sign bit without considering 9887 // signedness. 9888 return IntRange(value.getActiveBits(), true); 9889 } 9890 9891 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 9892 unsigned MaxWidth) { 9893 if (result.isInt()) 9894 return GetValueRange(C, result.getInt(), MaxWidth); 9895 9896 if (result.isVector()) { 9897 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 9898 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 9899 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 9900 R = IntRange::join(R, El); 9901 } 9902 return R; 9903 } 9904 9905 if (result.isComplexInt()) { 9906 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 9907 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 9908 return IntRange::join(R, I); 9909 } 9910 9911 // This can happen with lossless casts to intptr_t of "based" lvalues. 9912 // Assume it might use arbitrary bits. 9913 // FIXME: The only reason we need to pass the type in here is to get 9914 // the sign right on this one case. It would be nice if APValue 9915 // preserved this. 9916 assert(result.isLValue() || result.isAddrLabelDiff()); 9917 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 9918 } 9919 9920 static QualType GetExprType(const Expr *E) { 9921 QualType Ty = E->getType(); 9922 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 9923 Ty = AtomicRHS->getValueType(); 9924 return Ty; 9925 } 9926 9927 /// Pseudo-evaluate the given integer expression, estimating the 9928 /// range of values it might take. 9929 /// 9930 /// \param MaxWidth - the width to which the value will be truncated 9931 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 9932 bool InConstantContext) { 9933 E = E->IgnoreParens(); 9934 9935 // Try a full evaluation first. 9936 Expr::EvalResult result; 9937 if (E->EvaluateAsRValue(result, C, InConstantContext)) 9938 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 9939 9940 // I think we only want to look through implicit casts here; if the 9941 // user has an explicit widening cast, we should treat the value as 9942 // being of the new, wider type. 9943 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 9944 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 9945 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext); 9946 9947 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 9948 9949 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 9950 CE->getCastKind() == CK_BooleanToSignedIntegral; 9951 9952 // Assume that non-integer casts can span the full range of the type. 9953 if (!isIntegerCast) 9954 return OutputTypeRange; 9955 9956 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 9957 std::min(MaxWidth, OutputTypeRange.Width), 9958 InConstantContext); 9959 9960 // Bail out if the subexpr's range is as wide as the cast type. 9961 if (SubRange.Width >= OutputTypeRange.Width) 9962 return OutputTypeRange; 9963 9964 // Otherwise, we take the smaller width, and we're non-negative if 9965 // either the output type or the subexpr is. 9966 return IntRange(SubRange.Width, 9967 SubRange.NonNegative || OutputTypeRange.NonNegative); 9968 } 9969 9970 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 9971 // If we can fold the condition, just take that operand. 9972 bool CondResult; 9973 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 9974 return GetExprRange(C, 9975 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 9976 MaxWidth, InConstantContext); 9977 9978 // Otherwise, conservatively merge. 9979 IntRange L = 9980 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext); 9981 IntRange R = 9982 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext); 9983 return IntRange::join(L, R); 9984 } 9985 9986 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 9987 switch (BO->getOpcode()) { 9988 case BO_Cmp: 9989 llvm_unreachable("builtin <=> should have class type"); 9990 9991 // Boolean-valued operations are single-bit and positive. 9992 case BO_LAnd: 9993 case BO_LOr: 9994 case BO_LT: 9995 case BO_GT: 9996 case BO_LE: 9997 case BO_GE: 9998 case BO_EQ: 9999 case BO_NE: 10000 return IntRange::forBoolType(); 10001 10002 // The type of the assignments is the type of the LHS, so the RHS 10003 // is not necessarily the same type. 10004 case BO_MulAssign: 10005 case BO_DivAssign: 10006 case BO_RemAssign: 10007 case BO_AddAssign: 10008 case BO_SubAssign: 10009 case BO_XorAssign: 10010 case BO_OrAssign: 10011 // TODO: bitfields? 10012 return IntRange::forValueOfType(C, GetExprType(E)); 10013 10014 // Simple assignments just pass through the RHS, which will have 10015 // been coerced to the LHS type. 10016 case BO_Assign: 10017 // TODO: bitfields? 10018 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10019 10020 // Operations with opaque sources are black-listed. 10021 case BO_PtrMemD: 10022 case BO_PtrMemI: 10023 return IntRange::forValueOfType(C, GetExprType(E)); 10024 10025 // Bitwise-and uses the *infinum* of the two source ranges. 10026 case BO_And: 10027 case BO_AndAssign: 10028 return IntRange::meet( 10029 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext), 10030 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext)); 10031 10032 // Left shift gets black-listed based on a judgement call. 10033 case BO_Shl: 10034 // ...except that we want to treat '1 << (blah)' as logically 10035 // positive. It's an important idiom. 10036 if (IntegerLiteral *I 10037 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10038 if (I->getValue() == 1) { 10039 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10040 return IntRange(R.Width, /*NonNegative*/ true); 10041 } 10042 } 10043 LLVM_FALLTHROUGH; 10044 10045 case BO_ShlAssign: 10046 return IntRange::forValueOfType(C, GetExprType(E)); 10047 10048 // Right shift by a constant can narrow its left argument. 10049 case BO_Shr: 10050 case BO_ShrAssign: { 10051 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10052 10053 // If the shift amount is a positive constant, drop the width by 10054 // that much. 10055 llvm::APSInt shift; 10056 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10057 shift.isNonNegative()) { 10058 unsigned zext = shift.getZExtValue(); 10059 if (zext >= L.Width) 10060 L.Width = (L.NonNegative ? 0 : 1); 10061 else 10062 L.Width -= zext; 10063 } 10064 10065 return L; 10066 } 10067 10068 // Comma acts as its right operand. 10069 case BO_Comma: 10070 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10071 10072 // Black-list pointer subtractions. 10073 case BO_Sub: 10074 if (BO->getLHS()->getType()->isPointerType()) 10075 return IntRange::forValueOfType(C, GetExprType(E)); 10076 break; 10077 10078 // The width of a division result is mostly determined by the size 10079 // of the LHS. 10080 case BO_Div: { 10081 // Don't 'pre-truncate' the operands. 10082 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10083 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10084 10085 // If the divisor is constant, use that. 10086 llvm::APSInt divisor; 10087 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10088 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10089 if (log2 >= L.Width) 10090 L.Width = (L.NonNegative ? 0 : 1); 10091 else 10092 L.Width = std::min(L.Width - log2, MaxWidth); 10093 return L; 10094 } 10095 10096 // Otherwise, just use the LHS's width. 10097 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10098 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10099 } 10100 10101 // The result of a remainder can't be larger than the result of 10102 // either side. 10103 case BO_Rem: { 10104 // Don't 'pre-truncate' the operands. 10105 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10106 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10107 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10108 10109 IntRange meet = IntRange::meet(L, R); 10110 meet.Width = std::min(meet.Width, MaxWidth); 10111 return meet; 10112 } 10113 10114 // The default behavior is okay for these. 10115 case BO_Mul: 10116 case BO_Add: 10117 case BO_Xor: 10118 case BO_Or: 10119 break; 10120 } 10121 10122 // The default case is to treat the operation as if it were closed 10123 // on the narrowest type that encompasses both operands. 10124 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10125 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10126 return IntRange::join(L, R); 10127 } 10128 10129 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10130 switch (UO->getOpcode()) { 10131 // Boolean-valued operations are white-listed. 10132 case UO_LNot: 10133 return IntRange::forBoolType(); 10134 10135 // Operations with opaque sources are black-listed. 10136 case UO_Deref: 10137 case UO_AddrOf: // should be impossible 10138 return IntRange::forValueOfType(C, GetExprType(E)); 10139 10140 default: 10141 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext); 10142 } 10143 } 10144 10145 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10146 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext); 10147 10148 if (const auto *BitField = E->getSourceBitField()) 10149 return IntRange(BitField->getBitWidthValue(C), 10150 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10151 10152 return IntRange::forValueOfType(C, GetExprType(E)); 10153 } 10154 10155 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10156 bool InConstantContext) { 10157 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext); 10158 } 10159 10160 /// Checks whether the given value, which currently has the given 10161 /// source semantics, has the same value when coerced through the 10162 /// target semantics. 10163 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10164 const llvm::fltSemantics &Src, 10165 const llvm::fltSemantics &Tgt) { 10166 llvm::APFloat truncated = value; 10167 10168 bool ignored; 10169 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10170 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10171 10172 return truncated.bitwiseIsEqual(value); 10173 } 10174 10175 /// Checks whether the given value, which currently has the given 10176 /// source semantics, has the same value when coerced through the 10177 /// target semantics. 10178 /// 10179 /// The value might be a vector of floats (or a complex number). 10180 static bool IsSameFloatAfterCast(const APValue &value, 10181 const llvm::fltSemantics &Src, 10182 const llvm::fltSemantics &Tgt) { 10183 if (value.isFloat()) 10184 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10185 10186 if (value.isVector()) { 10187 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10188 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10189 return false; 10190 return true; 10191 } 10192 10193 assert(value.isComplexFloat()); 10194 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10195 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10196 } 10197 10198 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC); 10199 10200 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10201 // Suppress cases where we are comparing against an enum constant. 10202 if (const DeclRefExpr *DR = 10203 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10204 if (isa<EnumConstantDecl>(DR->getDecl())) 10205 return true; 10206 10207 // Suppress cases where the value is expanded from a macro, unless that macro 10208 // is how a language represents a boolean literal. This is the case in both C 10209 // and Objective-C. 10210 SourceLocation BeginLoc = E->getBeginLoc(); 10211 if (BeginLoc.isMacroID()) { 10212 StringRef MacroName = Lexer::getImmediateMacroName( 10213 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10214 return MacroName != "YES" && MacroName != "NO" && 10215 MacroName != "true" && MacroName != "false"; 10216 } 10217 10218 return false; 10219 } 10220 10221 static bool isKnownToHaveUnsignedValue(Expr *E) { 10222 return E->getType()->isIntegerType() && 10223 (!E->getType()->isSignedIntegerType() || 10224 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10225 } 10226 10227 namespace { 10228 /// The promoted range of values of a type. In general this has the 10229 /// following structure: 10230 /// 10231 /// |-----------| . . . |-----------| 10232 /// ^ ^ ^ ^ 10233 /// Min HoleMin HoleMax Max 10234 /// 10235 /// ... where there is only a hole if a signed type is promoted to unsigned 10236 /// (in which case Min and Max are the smallest and largest representable 10237 /// values). 10238 struct PromotedRange { 10239 // Min, or HoleMax if there is a hole. 10240 llvm::APSInt PromotedMin; 10241 // Max, or HoleMin if there is a hole. 10242 llvm::APSInt PromotedMax; 10243 10244 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10245 if (R.Width == 0) 10246 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10247 else if (R.Width >= BitWidth && !Unsigned) { 10248 // Promotion made the type *narrower*. This happens when promoting 10249 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10250 // Treat all values of 'signed int' as being in range for now. 10251 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10252 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10253 } else { 10254 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10255 .extOrTrunc(BitWidth); 10256 PromotedMin.setIsUnsigned(Unsigned); 10257 10258 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10259 .extOrTrunc(BitWidth); 10260 PromotedMax.setIsUnsigned(Unsigned); 10261 } 10262 } 10263 10264 // Determine whether this range is contiguous (has no hole). 10265 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10266 10267 // Where a constant value is within the range. 10268 enum ComparisonResult { 10269 LT = 0x1, 10270 LE = 0x2, 10271 GT = 0x4, 10272 GE = 0x8, 10273 EQ = 0x10, 10274 NE = 0x20, 10275 InRangeFlag = 0x40, 10276 10277 Less = LE | LT | NE, 10278 Min = LE | InRangeFlag, 10279 InRange = InRangeFlag, 10280 Max = GE | InRangeFlag, 10281 Greater = GE | GT | NE, 10282 10283 OnlyValue = LE | GE | EQ | InRangeFlag, 10284 InHole = NE 10285 }; 10286 10287 ComparisonResult compare(const llvm::APSInt &Value) const { 10288 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10289 Value.isUnsigned() == PromotedMin.isUnsigned()); 10290 if (!isContiguous()) { 10291 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10292 if (Value.isMinValue()) return Min; 10293 if (Value.isMaxValue()) return Max; 10294 if (Value >= PromotedMin) return InRange; 10295 if (Value <= PromotedMax) return InRange; 10296 return InHole; 10297 } 10298 10299 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10300 case -1: return Less; 10301 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10302 case 1: 10303 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10304 case -1: return InRange; 10305 case 0: return Max; 10306 case 1: return Greater; 10307 } 10308 } 10309 10310 llvm_unreachable("impossible compare result"); 10311 } 10312 10313 static llvm::Optional<StringRef> 10314 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10315 if (Op == BO_Cmp) { 10316 ComparisonResult LTFlag = LT, GTFlag = GT; 10317 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10318 10319 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10320 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10321 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10322 return llvm::None; 10323 } 10324 10325 ComparisonResult TrueFlag, FalseFlag; 10326 if (Op == BO_EQ) { 10327 TrueFlag = EQ; 10328 FalseFlag = NE; 10329 } else if (Op == BO_NE) { 10330 TrueFlag = NE; 10331 FalseFlag = EQ; 10332 } else { 10333 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10334 TrueFlag = LT; 10335 FalseFlag = GE; 10336 } else { 10337 TrueFlag = GT; 10338 FalseFlag = LE; 10339 } 10340 if (Op == BO_GE || Op == BO_LE) 10341 std::swap(TrueFlag, FalseFlag); 10342 } 10343 if (R & TrueFlag) 10344 return StringRef("true"); 10345 if (R & FalseFlag) 10346 return StringRef("false"); 10347 return llvm::None; 10348 } 10349 }; 10350 } 10351 10352 static bool HasEnumType(Expr *E) { 10353 // Strip off implicit integral promotions. 10354 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10355 if (ICE->getCastKind() != CK_IntegralCast && 10356 ICE->getCastKind() != CK_NoOp) 10357 break; 10358 E = ICE->getSubExpr(); 10359 } 10360 10361 return E->getType()->isEnumeralType(); 10362 } 10363 10364 static int classifyConstantValue(Expr *Constant) { 10365 // The values of this enumeration are used in the diagnostics 10366 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10367 enum ConstantValueKind { 10368 Miscellaneous = 0, 10369 LiteralTrue, 10370 LiteralFalse 10371 }; 10372 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10373 return BL->getValue() ? ConstantValueKind::LiteralTrue 10374 : ConstantValueKind::LiteralFalse; 10375 return ConstantValueKind::Miscellaneous; 10376 } 10377 10378 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10379 Expr *Constant, Expr *Other, 10380 const llvm::APSInt &Value, 10381 bool RhsConstant) { 10382 if (S.inTemplateInstantiation()) 10383 return false; 10384 10385 Expr *OriginalOther = Other; 10386 10387 Constant = Constant->IgnoreParenImpCasts(); 10388 Other = Other->IgnoreParenImpCasts(); 10389 10390 // Suppress warnings on tautological comparisons between values of the same 10391 // enumeration type. There are only two ways we could warn on this: 10392 // - If the constant is outside the range of representable values of 10393 // the enumeration. In such a case, we should warn about the cast 10394 // to enumeration type, not about the comparison. 10395 // - If the constant is the maximum / minimum in-range value. For an 10396 // enumeratin type, such comparisons can be meaningful and useful. 10397 if (Constant->getType()->isEnumeralType() && 10398 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10399 return false; 10400 10401 // TODO: Investigate using GetExprRange() to get tighter bounds 10402 // on the bit ranges. 10403 QualType OtherT = Other->getType(); 10404 if (const auto *AT = OtherT->getAs<AtomicType>()) 10405 OtherT = AT->getValueType(); 10406 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10407 10408 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10409 // (Namely, macOS). 10410 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10411 S.NSAPIObj->isObjCBOOLType(OtherT) && 10412 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10413 10414 // Whether we're treating Other as being a bool because of the form of 10415 // expression despite it having another type (typically 'int' in C). 10416 bool OtherIsBooleanDespiteType = 10417 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10418 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10419 OtherRange = IntRange::forBoolType(); 10420 10421 // Determine the promoted range of the other type and see if a comparison of 10422 // the constant against that range is tautological. 10423 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10424 Value.isUnsigned()); 10425 auto Cmp = OtherPromotedRange.compare(Value); 10426 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10427 if (!Result) 10428 return false; 10429 10430 // Suppress the diagnostic for an in-range comparison if the constant comes 10431 // from a macro or enumerator. We don't want to diagnose 10432 // 10433 // some_long_value <= INT_MAX 10434 // 10435 // when sizeof(int) == sizeof(long). 10436 bool InRange = Cmp & PromotedRange::InRangeFlag; 10437 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10438 return false; 10439 10440 // If this is a comparison to an enum constant, include that 10441 // constant in the diagnostic. 10442 const EnumConstantDecl *ED = nullptr; 10443 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10444 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10445 10446 // Should be enough for uint128 (39 decimal digits) 10447 SmallString<64> PrettySourceValue; 10448 llvm::raw_svector_ostream OS(PrettySourceValue); 10449 if (ED) { 10450 OS << '\'' << *ED << "' (" << Value << ")"; 10451 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10452 Constant->IgnoreParenImpCasts())) { 10453 OS << (BL->getValue() ? "YES" : "NO"); 10454 } else { 10455 OS << Value; 10456 } 10457 10458 if (IsObjCSignedCharBool) { 10459 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10460 S.PDiag(diag::warn_tautological_compare_objc_bool) 10461 << OS.str() << *Result); 10462 return true; 10463 } 10464 10465 // FIXME: We use a somewhat different formatting for the in-range cases and 10466 // cases involving boolean values for historical reasons. We should pick a 10467 // consistent way of presenting these diagnostics. 10468 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10469 10470 S.DiagRuntimeBehavior( 10471 E->getOperatorLoc(), E, 10472 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10473 : diag::warn_tautological_bool_compare) 10474 << OS.str() << classifyConstantValue(Constant) << OtherT 10475 << OtherIsBooleanDespiteType << *Result 10476 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10477 } else { 10478 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10479 ? (HasEnumType(OriginalOther) 10480 ? diag::warn_unsigned_enum_always_true_comparison 10481 : diag::warn_unsigned_always_true_comparison) 10482 : diag::warn_tautological_constant_compare; 10483 10484 S.Diag(E->getOperatorLoc(), Diag) 10485 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10486 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10487 } 10488 10489 return true; 10490 } 10491 10492 /// Analyze the operands of the given comparison. Implements the 10493 /// fallback case from AnalyzeComparison. 10494 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10495 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10496 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10497 } 10498 10499 /// Implements -Wsign-compare. 10500 /// 10501 /// \param E the binary operator to check for warnings 10502 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10503 // The type the comparison is being performed in. 10504 QualType T = E->getLHS()->getType(); 10505 10506 // Only analyze comparison operators where both sides have been converted to 10507 // the same type. 10508 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10509 return AnalyzeImpConvsInComparison(S, E); 10510 10511 // Don't analyze value-dependent comparisons directly. 10512 if (E->isValueDependent()) 10513 return AnalyzeImpConvsInComparison(S, E); 10514 10515 Expr *LHS = E->getLHS(); 10516 Expr *RHS = E->getRHS(); 10517 10518 if (T->isIntegralType(S.Context)) { 10519 llvm::APSInt RHSValue; 10520 llvm::APSInt LHSValue; 10521 10522 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10523 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10524 10525 // We don't care about expressions whose result is a constant. 10526 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10527 return AnalyzeImpConvsInComparison(S, E); 10528 10529 // We only care about expressions where just one side is literal 10530 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10531 // Is the constant on the RHS or LHS? 10532 const bool RhsConstant = IsRHSIntegralLiteral; 10533 Expr *Const = RhsConstant ? RHS : LHS; 10534 Expr *Other = RhsConstant ? LHS : RHS; 10535 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10536 10537 // Check whether an integer constant comparison results in a value 10538 // of 'true' or 'false'. 10539 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10540 return AnalyzeImpConvsInComparison(S, E); 10541 } 10542 } 10543 10544 if (!T->hasUnsignedIntegerRepresentation()) { 10545 // We don't do anything special if this isn't an unsigned integral 10546 // comparison: we're only interested in integral comparisons, and 10547 // signed comparisons only happen in cases we don't care to warn about. 10548 return AnalyzeImpConvsInComparison(S, E); 10549 } 10550 10551 LHS = LHS->IgnoreParenImpCasts(); 10552 RHS = RHS->IgnoreParenImpCasts(); 10553 10554 if (!S.getLangOpts().CPlusPlus) { 10555 // Avoid warning about comparison of integers with different signs when 10556 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10557 // the type of `E`. 10558 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10559 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10560 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10561 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10562 } 10563 10564 // Check to see if one of the (unmodified) operands is of different 10565 // signedness. 10566 Expr *signedOperand, *unsignedOperand; 10567 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10568 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10569 "unsigned comparison between two signed integer expressions?"); 10570 signedOperand = LHS; 10571 unsignedOperand = RHS; 10572 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10573 signedOperand = RHS; 10574 unsignedOperand = LHS; 10575 } else { 10576 return AnalyzeImpConvsInComparison(S, E); 10577 } 10578 10579 // Otherwise, calculate the effective range of the signed operand. 10580 IntRange signedRange = 10581 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated()); 10582 10583 // Go ahead and analyze implicit conversions in the operands. Note 10584 // that we skip the implicit conversions on both sides. 10585 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10586 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10587 10588 // If the signed range is non-negative, -Wsign-compare won't fire. 10589 if (signedRange.NonNegative) 10590 return; 10591 10592 // For (in)equality comparisons, if the unsigned operand is a 10593 // constant which cannot collide with a overflowed signed operand, 10594 // then reinterpreting the signed operand as unsigned will not 10595 // change the result of the comparison. 10596 if (E->isEqualityOp()) { 10597 unsigned comparisonWidth = S.Context.getIntWidth(T); 10598 IntRange unsignedRange = 10599 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated()); 10600 10601 // We should never be unable to prove that the unsigned operand is 10602 // non-negative. 10603 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10604 10605 if (unsignedRange.Width < comparisonWidth) 10606 return; 10607 } 10608 10609 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10610 S.PDiag(diag::warn_mixed_sign_comparison) 10611 << LHS->getType() << RHS->getType() 10612 << LHS->getSourceRange() << RHS->getSourceRange()); 10613 } 10614 10615 /// Analyzes an attempt to assign the given value to a bitfield. 10616 /// 10617 /// Returns true if there was something fishy about the attempt. 10618 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10619 SourceLocation InitLoc) { 10620 assert(Bitfield->isBitField()); 10621 if (Bitfield->isInvalidDecl()) 10622 return false; 10623 10624 // White-list bool bitfields. 10625 QualType BitfieldType = Bitfield->getType(); 10626 if (BitfieldType->isBooleanType()) 10627 return false; 10628 10629 if (BitfieldType->isEnumeralType()) { 10630 EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl(); 10631 // If the underlying enum type was not explicitly specified as an unsigned 10632 // type and the enum contain only positive values, MSVC++ will cause an 10633 // inconsistency by storing this as a signed type. 10634 if (S.getLangOpts().CPlusPlus11 && 10635 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10636 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10637 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10638 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10639 << BitfieldEnumDecl->getNameAsString(); 10640 } 10641 } 10642 10643 if (Bitfield->getType()->isBooleanType()) 10644 return false; 10645 10646 // Ignore value- or type-dependent expressions. 10647 if (Bitfield->getBitWidth()->isValueDependent() || 10648 Bitfield->getBitWidth()->isTypeDependent() || 10649 Init->isValueDependent() || 10650 Init->isTypeDependent()) 10651 return false; 10652 10653 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10654 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10655 10656 Expr::EvalResult Result; 10657 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10658 Expr::SE_AllowSideEffects)) { 10659 // The RHS is not constant. If the RHS has an enum type, make sure the 10660 // bitfield is wide enough to hold all the values of the enum without 10661 // truncation. 10662 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10663 EnumDecl *ED = EnumTy->getDecl(); 10664 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10665 10666 // Enum types are implicitly signed on Windows, so check if there are any 10667 // negative enumerators to see if the enum was intended to be signed or 10668 // not. 10669 bool SignedEnum = ED->getNumNegativeBits() > 0; 10670 10671 // Check for surprising sign changes when assigning enum values to a 10672 // bitfield of different signedness. If the bitfield is signed and we 10673 // have exactly the right number of bits to store this unsigned enum, 10674 // suggest changing the enum to an unsigned type. This typically happens 10675 // on Windows where unfixed enums always use an underlying type of 'int'. 10676 unsigned DiagID = 0; 10677 if (SignedEnum && !SignedBitfield) { 10678 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10679 } else if (SignedBitfield && !SignedEnum && 10680 ED->getNumPositiveBits() == FieldWidth) { 10681 DiagID = diag::warn_signed_bitfield_enum_conversion; 10682 } 10683 10684 if (DiagID) { 10685 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10686 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10687 SourceRange TypeRange = 10688 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10689 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10690 << SignedEnum << TypeRange; 10691 } 10692 10693 // Compute the required bitwidth. If the enum has negative values, we need 10694 // one more bit than the normal number of positive bits to represent the 10695 // sign bit. 10696 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10697 ED->getNumNegativeBits()) 10698 : ED->getNumPositiveBits(); 10699 10700 // Check the bitwidth. 10701 if (BitsNeeded > FieldWidth) { 10702 Expr *WidthExpr = Bitfield->getBitWidth(); 10703 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10704 << Bitfield << ED; 10705 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10706 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10707 } 10708 } 10709 10710 return false; 10711 } 10712 10713 llvm::APSInt Value = Result.Val.getInt(); 10714 10715 unsigned OriginalWidth = Value.getBitWidth(); 10716 10717 if (!Value.isSigned() || Value.isNegative()) 10718 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10719 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10720 OriginalWidth = Value.getMinSignedBits(); 10721 10722 if (OriginalWidth <= FieldWidth) 10723 return false; 10724 10725 // Compute the value which the bitfield will contain. 10726 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 10727 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 10728 10729 // Check whether the stored value is equal to the original value. 10730 TruncatedValue = TruncatedValue.extend(OriginalWidth); 10731 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 10732 return false; 10733 10734 // Special-case bitfields of width 1: booleans are naturally 0/1, and 10735 // therefore don't strictly fit into a signed bitfield of width 1. 10736 if (FieldWidth == 1 && Value == 1) 10737 return false; 10738 10739 std::string PrettyValue = Value.toString(10); 10740 std::string PrettyTrunc = TruncatedValue.toString(10); 10741 10742 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 10743 << PrettyValue << PrettyTrunc << OriginalInit->getType() 10744 << Init->getSourceRange(); 10745 10746 return true; 10747 } 10748 10749 /// Analyze the given simple or compound assignment for warning-worthy 10750 /// operations. 10751 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 10752 // Just recurse on the LHS. 10753 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10754 10755 // We want to recurse on the RHS as normal unless we're assigning to 10756 // a bitfield. 10757 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 10758 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 10759 E->getOperatorLoc())) { 10760 // Recurse, ignoring any implicit conversions on the RHS. 10761 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 10762 E->getOperatorLoc()); 10763 } 10764 } 10765 10766 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10767 10768 // Diagnose implicitly sequentially-consistent atomic assignment. 10769 if (E->getLHS()->getType()->isAtomicType()) 10770 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 10771 } 10772 10773 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10774 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 10775 SourceLocation CContext, unsigned diag, 10776 bool pruneControlFlow = false) { 10777 if (pruneControlFlow) { 10778 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10779 S.PDiag(diag) 10780 << SourceType << T << E->getSourceRange() 10781 << SourceRange(CContext)); 10782 return; 10783 } 10784 S.Diag(E->getExprLoc(), diag) 10785 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 10786 } 10787 10788 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10789 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 10790 SourceLocation CContext, 10791 unsigned diag, bool pruneControlFlow = false) { 10792 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 10793 } 10794 10795 /// Diagnose an implicit cast from a floating point value to an integer value. 10796 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 10797 SourceLocation CContext) { 10798 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 10799 const bool PruneWarnings = S.inTemplateInstantiation(); 10800 10801 Expr *InnerE = E->IgnoreParenImpCasts(); 10802 // We also want to warn on, e.g., "int i = -1.234" 10803 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 10804 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 10805 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 10806 10807 const bool IsLiteral = 10808 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 10809 10810 llvm::APFloat Value(0.0); 10811 bool IsConstant = 10812 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 10813 if (!IsConstant) { 10814 return DiagnoseImpCast(S, E, T, CContext, 10815 diag::warn_impcast_float_integer, PruneWarnings); 10816 } 10817 10818 bool isExact = false; 10819 10820 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 10821 T->hasUnsignedIntegerRepresentation()); 10822 llvm::APFloat::opStatus Result = Value.convertToInteger( 10823 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 10824 10825 if (Result == llvm::APFloat::opOK && isExact) { 10826 if (IsLiteral) return; 10827 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 10828 PruneWarnings); 10829 } 10830 10831 // Conversion of a floating-point value to a non-bool integer where the 10832 // integral part cannot be represented by the integer type is undefined. 10833 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 10834 return DiagnoseImpCast( 10835 S, E, T, CContext, 10836 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 10837 : diag::warn_impcast_float_to_integer_out_of_range, 10838 PruneWarnings); 10839 10840 unsigned DiagID = 0; 10841 if (IsLiteral) { 10842 // Warn on floating point literal to integer. 10843 DiagID = diag::warn_impcast_literal_float_to_integer; 10844 } else if (IntegerValue == 0) { 10845 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 10846 return DiagnoseImpCast(S, E, T, CContext, 10847 diag::warn_impcast_float_integer, PruneWarnings); 10848 } 10849 // Warn on non-zero to zero conversion. 10850 DiagID = diag::warn_impcast_float_to_integer_zero; 10851 } else { 10852 if (IntegerValue.isUnsigned()) { 10853 if (!IntegerValue.isMaxValue()) { 10854 return DiagnoseImpCast(S, E, T, CContext, 10855 diag::warn_impcast_float_integer, PruneWarnings); 10856 } 10857 } else { // IntegerValue.isSigned() 10858 if (!IntegerValue.isMaxSignedValue() && 10859 !IntegerValue.isMinSignedValue()) { 10860 return DiagnoseImpCast(S, E, T, CContext, 10861 diag::warn_impcast_float_integer, PruneWarnings); 10862 } 10863 } 10864 // Warn on evaluatable floating point expression to integer conversion. 10865 DiagID = diag::warn_impcast_float_to_integer; 10866 } 10867 10868 // FIXME: Force the precision of the source value down so we don't print 10869 // digits which are usually useless (we don't really care here if we 10870 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 10871 // would automatically print the shortest representation, but it's a bit 10872 // tricky to implement. 10873 SmallString<16> PrettySourceValue; 10874 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 10875 precision = (precision * 59 + 195) / 196; 10876 Value.toString(PrettySourceValue, precision); 10877 10878 SmallString<16> PrettyTargetValue; 10879 if (IsBool) 10880 PrettyTargetValue = Value.isZero() ? "false" : "true"; 10881 else 10882 IntegerValue.toString(PrettyTargetValue); 10883 10884 if (PruneWarnings) { 10885 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10886 S.PDiag(DiagID) 10887 << E->getType() << T.getUnqualifiedType() 10888 << PrettySourceValue << PrettyTargetValue 10889 << E->getSourceRange() << SourceRange(CContext)); 10890 } else { 10891 S.Diag(E->getExprLoc(), DiagID) 10892 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 10893 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 10894 } 10895 } 10896 10897 /// Analyze the given compound assignment for the possible losing of 10898 /// floating-point precision. 10899 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 10900 assert(isa<CompoundAssignOperator>(E) && 10901 "Must be compound assignment operation"); 10902 // Recurse on the LHS and RHS in here 10903 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10904 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10905 10906 if (E->getLHS()->getType()->isAtomicType()) 10907 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 10908 10909 // Now check the outermost expression 10910 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 10911 const auto *RBT = cast<CompoundAssignOperator>(E) 10912 ->getComputationResultType() 10913 ->getAs<BuiltinType>(); 10914 10915 // The below checks assume source is floating point. 10916 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 10917 10918 // If source is floating point but target is an integer. 10919 if (ResultBT->isInteger()) 10920 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 10921 E->getExprLoc(), diag::warn_impcast_float_integer); 10922 10923 if (!ResultBT->isFloatingPoint()) 10924 return; 10925 10926 // If both source and target are floating points, warn about losing precision. 10927 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 10928 QualType(ResultBT, 0), QualType(RBT, 0)); 10929 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 10930 // warn about dropping FP rank. 10931 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 10932 diag::warn_impcast_float_result_precision); 10933 } 10934 10935 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 10936 IntRange Range) { 10937 if (!Range.Width) return "0"; 10938 10939 llvm::APSInt ValueInRange = Value; 10940 ValueInRange.setIsSigned(!Range.NonNegative); 10941 ValueInRange = ValueInRange.trunc(Range.Width); 10942 return ValueInRange.toString(10); 10943 } 10944 10945 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 10946 if (!isa<ImplicitCastExpr>(Ex)) 10947 return false; 10948 10949 Expr *InnerE = Ex->IgnoreParenImpCasts(); 10950 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 10951 const Type *Source = 10952 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 10953 if (Target->isDependentType()) 10954 return false; 10955 10956 const BuiltinType *FloatCandidateBT = 10957 dyn_cast<BuiltinType>(ToBool ? Source : Target); 10958 const Type *BoolCandidateType = ToBool ? Target : Source; 10959 10960 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 10961 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 10962 } 10963 10964 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 10965 SourceLocation CC) { 10966 unsigned NumArgs = TheCall->getNumArgs(); 10967 for (unsigned i = 0; i < NumArgs; ++i) { 10968 Expr *CurrA = TheCall->getArg(i); 10969 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 10970 continue; 10971 10972 bool IsSwapped = ((i > 0) && 10973 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 10974 IsSwapped |= ((i < (NumArgs - 1)) && 10975 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 10976 if (IsSwapped) { 10977 // Warn on this floating-point to bool conversion. 10978 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 10979 CurrA->getType(), CC, 10980 diag::warn_impcast_floating_point_to_bool); 10981 } 10982 } 10983 } 10984 10985 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 10986 SourceLocation CC) { 10987 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 10988 E->getExprLoc())) 10989 return; 10990 10991 // Don't warn on functions which have return type nullptr_t. 10992 if (isa<CallExpr>(E)) 10993 return; 10994 10995 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 10996 const Expr::NullPointerConstantKind NullKind = 10997 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 10998 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 10999 return; 11000 11001 // Return if target type is a safe conversion. 11002 if (T->isAnyPointerType() || T->isBlockPointerType() || 11003 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11004 return; 11005 11006 SourceLocation Loc = E->getSourceRange().getBegin(); 11007 11008 // Venture through the macro stacks to get to the source of macro arguments. 11009 // The new location is a better location than the complete location that was 11010 // passed in. 11011 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11012 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11013 11014 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11015 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11016 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11017 Loc, S.SourceMgr, S.getLangOpts()); 11018 if (MacroName == "NULL") 11019 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11020 } 11021 11022 // Only warn if the null and context location are in the same macro expansion. 11023 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11024 return; 11025 11026 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11027 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11028 << FixItHint::CreateReplacement(Loc, 11029 S.getFixItZeroLiteralForType(T, Loc)); 11030 } 11031 11032 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11033 ObjCArrayLiteral *ArrayLiteral); 11034 11035 static void 11036 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11037 ObjCDictionaryLiteral *DictionaryLiteral); 11038 11039 /// Check a single element within a collection literal against the 11040 /// target element type. 11041 static void checkObjCCollectionLiteralElement(Sema &S, 11042 QualType TargetElementType, 11043 Expr *Element, 11044 unsigned ElementKind) { 11045 // Skip a bitcast to 'id' or qualified 'id'. 11046 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11047 if (ICE->getCastKind() == CK_BitCast && 11048 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11049 Element = ICE->getSubExpr(); 11050 } 11051 11052 QualType ElementType = Element->getType(); 11053 ExprResult ElementResult(Element); 11054 if (ElementType->getAs<ObjCObjectPointerType>() && 11055 S.CheckSingleAssignmentConstraints(TargetElementType, 11056 ElementResult, 11057 false, false) 11058 != Sema::Compatible) { 11059 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11060 << ElementType << ElementKind << TargetElementType 11061 << Element->getSourceRange(); 11062 } 11063 11064 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11065 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11066 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11067 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11068 } 11069 11070 /// Check an Objective-C array literal being converted to the given 11071 /// target type. 11072 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11073 ObjCArrayLiteral *ArrayLiteral) { 11074 if (!S.NSArrayDecl) 11075 return; 11076 11077 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11078 if (!TargetObjCPtr) 11079 return; 11080 11081 if (TargetObjCPtr->isUnspecialized() || 11082 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11083 != S.NSArrayDecl->getCanonicalDecl()) 11084 return; 11085 11086 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11087 if (TypeArgs.size() != 1) 11088 return; 11089 11090 QualType TargetElementType = TypeArgs[0]; 11091 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11092 checkObjCCollectionLiteralElement(S, TargetElementType, 11093 ArrayLiteral->getElement(I), 11094 0); 11095 } 11096 } 11097 11098 /// Check an Objective-C dictionary literal being converted to the given 11099 /// target type. 11100 static void 11101 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11102 ObjCDictionaryLiteral *DictionaryLiteral) { 11103 if (!S.NSDictionaryDecl) 11104 return; 11105 11106 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11107 if (!TargetObjCPtr) 11108 return; 11109 11110 if (TargetObjCPtr->isUnspecialized() || 11111 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11112 != S.NSDictionaryDecl->getCanonicalDecl()) 11113 return; 11114 11115 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11116 if (TypeArgs.size() != 2) 11117 return; 11118 11119 QualType TargetKeyType = TypeArgs[0]; 11120 QualType TargetObjectType = TypeArgs[1]; 11121 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11122 auto Element = DictionaryLiteral->getKeyValueElement(I); 11123 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11124 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11125 } 11126 } 11127 11128 // Helper function to filter out cases for constant width constant conversion. 11129 // Don't warn on char array initialization or for non-decimal values. 11130 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11131 SourceLocation CC) { 11132 // If initializing from a constant, and the constant starts with '0', 11133 // then it is a binary, octal, or hexadecimal. Allow these constants 11134 // to fill all the bits, even if there is a sign change. 11135 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11136 const char FirstLiteralCharacter = 11137 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11138 if (FirstLiteralCharacter == '0') 11139 return false; 11140 } 11141 11142 // If the CC location points to a '{', and the type is char, then assume 11143 // assume it is an array initialization. 11144 if (CC.isValid() && T->isCharType()) { 11145 const char FirstContextCharacter = 11146 S.getSourceManager().getCharacterData(CC)[0]; 11147 if (FirstContextCharacter == '{') 11148 return false; 11149 } 11150 11151 return true; 11152 } 11153 11154 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11155 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11156 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11157 } 11158 11159 static void 11160 CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, 11161 bool *ICContext = nullptr) { 11162 if (E->isTypeDependent() || E->isValueDependent()) return; 11163 11164 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11165 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11166 if (Source == Target) return; 11167 if (Target->isDependentType()) return; 11168 11169 // If the conversion context location is invalid don't complain. We also 11170 // don't want to emit a warning if the issue occurs from the expansion of 11171 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11172 // delay this check as long as possible. Once we detect we are in that 11173 // scenario, we just return. 11174 if (CC.isInvalid()) 11175 return; 11176 11177 if (Source->isAtomicType()) 11178 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11179 11180 // Diagnose implicit casts to bool. 11181 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11182 if (isa<StringLiteral>(E)) 11183 // Warn on string literal to bool. Checks for string literals in logical 11184 // and expressions, for instance, assert(0 && "error here"), are 11185 // prevented by a check in AnalyzeImplicitConversions(). 11186 return DiagnoseImpCast(S, E, T, CC, 11187 diag::warn_impcast_string_literal_to_bool); 11188 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11189 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11190 // This covers the literal expressions that evaluate to Objective-C 11191 // objects. 11192 return DiagnoseImpCast(S, E, T, CC, 11193 diag::warn_impcast_objective_c_literal_to_bool); 11194 } 11195 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11196 // Warn on pointer to bool conversion that is always true. 11197 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11198 SourceRange(CC)); 11199 } 11200 } 11201 11202 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11203 // is a typedef for signed char (macOS), then that constant value has to be 1 11204 // or 0. 11205 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11206 Expr::EvalResult Result; 11207 if (E->EvaluateAsInt(Result, S.getASTContext(), 11208 Expr::SE_AllowSideEffects) && 11209 Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11210 auto Builder = S.Diag(CC, diag::warn_impcast_constant_int_to_objc_bool) 11211 << Result.Val.getInt().toString(10); 11212 Expr *Ignored = E->IgnoreImplicit(); 11213 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11214 isa<BinaryOperator>(Ignored) || 11215 isa<CXXOperatorCallExpr>(Ignored); 11216 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 11217 if (NeedsParens) 11218 Builder << FixItHint::CreateInsertion(E->getBeginLoc(), "(") 11219 << FixItHint::CreateInsertion(EndLoc, ")"); 11220 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11221 return; 11222 } 11223 } 11224 11225 // Check implicit casts from Objective-C collection literals to specialized 11226 // collection types, e.g., NSArray<NSString *> *. 11227 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11228 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11229 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11230 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11231 11232 // Strip vector types. 11233 if (isa<VectorType>(Source)) { 11234 if (!isa<VectorType>(Target)) { 11235 if (S.SourceMgr.isInSystemMacro(CC)) 11236 return; 11237 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11238 } 11239 11240 // If the vector cast is cast between two vectors of the same size, it is 11241 // a bitcast, not a conversion. 11242 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11243 return; 11244 11245 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11246 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11247 } 11248 if (auto VecTy = dyn_cast<VectorType>(Target)) 11249 Target = VecTy->getElementType().getTypePtr(); 11250 11251 // Strip complex types. 11252 if (isa<ComplexType>(Source)) { 11253 if (!isa<ComplexType>(Target)) { 11254 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11255 return; 11256 11257 return DiagnoseImpCast(S, E, T, CC, 11258 S.getLangOpts().CPlusPlus 11259 ? diag::err_impcast_complex_scalar 11260 : diag::warn_impcast_complex_scalar); 11261 } 11262 11263 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11264 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11265 } 11266 11267 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11268 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11269 11270 // If the source is floating point... 11271 if (SourceBT && SourceBT->isFloatingPoint()) { 11272 // ...and the target is floating point... 11273 if (TargetBT && TargetBT->isFloatingPoint()) { 11274 // ...then warn if we're dropping FP rank. 11275 11276 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11277 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11278 if (Order > 0) { 11279 // Don't warn about float constants that are precisely 11280 // representable in the target type. 11281 Expr::EvalResult result; 11282 if (E->EvaluateAsRValue(result, S.Context)) { 11283 // Value might be a float, a float vector, or a float complex. 11284 if (IsSameFloatAfterCast(result.Val, 11285 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11286 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11287 return; 11288 } 11289 11290 if (S.SourceMgr.isInSystemMacro(CC)) 11291 return; 11292 11293 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11294 } 11295 // ... or possibly if we're increasing rank, too 11296 else if (Order < 0) { 11297 if (S.SourceMgr.isInSystemMacro(CC)) 11298 return; 11299 11300 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11301 } 11302 return; 11303 } 11304 11305 // If the target is integral, always warn. 11306 if (TargetBT && TargetBT->isInteger()) { 11307 if (S.SourceMgr.isInSystemMacro(CC)) 11308 return; 11309 11310 DiagnoseFloatingImpCast(S, E, T, CC); 11311 } 11312 11313 // Detect the case where a call result is converted from floating-point to 11314 // to bool, and the final argument to the call is converted from bool, to 11315 // discover this typo: 11316 // 11317 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11318 // 11319 // FIXME: This is an incredibly special case; is there some more general 11320 // way to detect this class of misplaced-parentheses bug? 11321 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11322 // Check last argument of function call to see if it is an 11323 // implicit cast from a type matching the type the result 11324 // is being cast to. 11325 CallExpr *CEx = cast<CallExpr>(E); 11326 if (unsigned NumArgs = CEx->getNumArgs()) { 11327 Expr *LastA = CEx->getArg(NumArgs - 1); 11328 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11329 if (isa<ImplicitCastExpr>(LastA) && 11330 InnerE->getType()->isBooleanType()) { 11331 // Warn on this floating-point to bool conversion 11332 DiagnoseImpCast(S, E, T, CC, 11333 diag::warn_impcast_floating_point_to_bool); 11334 } 11335 } 11336 } 11337 return; 11338 } 11339 11340 // Valid casts involving fixed point types should be accounted for here. 11341 if (Source->isFixedPointType()) { 11342 if (Target->isUnsaturatedFixedPointType()) { 11343 Expr::EvalResult Result; 11344 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11345 S.isConstantEvaluated())) { 11346 APFixedPoint Value = Result.Val.getFixedPoint(); 11347 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11348 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11349 if (Value > MaxVal || Value < MinVal) { 11350 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11351 S.PDiag(diag::warn_impcast_fixed_point_range) 11352 << Value.toString() << T 11353 << E->getSourceRange() 11354 << clang::SourceRange(CC)); 11355 return; 11356 } 11357 } 11358 } else if (Target->isIntegerType()) { 11359 Expr::EvalResult Result; 11360 if (!S.isConstantEvaluated() && 11361 E->EvaluateAsFixedPoint(Result, S.Context, 11362 Expr::SE_AllowSideEffects)) { 11363 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11364 11365 bool Overflowed; 11366 llvm::APSInt IntResult = FXResult.convertToInt( 11367 S.Context.getIntWidth(T), 11368 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11369 11370 if (Overflowed) { 11371 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11372 S.PDiag(diag::warn_impcast_fixed_point_range) 11373 << FXResult.toString() << T 11374 << E->getSourceRange() 11375 << clang::SourceRange(CC)); 11376 return; 11377 } 11378 } 11379 } 11380 } else if (Target->isUnsaturatedFixedPointType()) { 11381 if (Source->isIntegerType()) { 11382 Expr::EvalResult Result; 11383 if (!S.isConstantEvaluated() && 11384 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11385 llvm::APSInt Value = Result.Val.getInt(); 11386 11387 bool Overflowed; 11388 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11389 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11390 11391 if (Overflowed) { 11392 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11393 S.PDiag(diag::warn_impcast_fixed_point_range) 11394 << Value.toString(/*Radix=*/10) << T 11395 << E->getSourceRange() 11396 << clang::SourceRange(CC)); 11397 return; 11398 } 11399 } 11400 } 11401 } 11402 11403 DiagnoseNullConversion(S, E, T, CC); 11404 11405 S.DiscardMisalignedMemberAddress(Target, E); 11406 11407 if (!Source->isIntegerType() || !Target->isIntegerType()) 11408 return; 11409 11410 // TODO: remove this early return once the false positives for constant->bool 11411 // in templates, macros, etc, are reduced or removed. 11412 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11413 return; 11414 11415 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11416 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11417 11418 if (SourceRange.Width > TargetRange.Width) { 11419 // If the source is a constant, use a default-on diagnostic. 11420 // TODO: this should happen for bitfield stores, too. 11421 Expr::EvalResult Result; 11422 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 11423 S.isConstantEvaluated())) { 11424 llvm::APSInt Value(32); 11425 Value = Result.Val.getInt(); 11426 11427 if (S.SourceMgr.isInSystemMacro(CC)) 11428 return; 11429 11430 std::string PrettySourceValue = Value.toString(10); 11431 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11432 11433 S.DiagRuntimeBehavior( 11434 E->getExprLoc(), E, 11435 S.PDiag(diag::warn_impcast_integer_precision_constant) 11436 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11437 << E->getSourceRange() << clang::SourceRange(CC)); 11438 return; 11439 } 11440 11441 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11442 if (S.SourceMgr.isInSystemMacro(CC)) 11443 return; 11444 11445 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11446 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11447 /* pruneControlFlow */ true); 11448 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11449 } 11450 11451 if (TargetRange.Width > SourceRange.Width) { 11452 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11453 if (UO->getOpcode() == UO_Minus) 11454 if (Source->isUnsignedIntegerType()) { 11455 if (Target->isUnsignedIntegerType()) 11456 return DiagnoseImpCast(S, E, T, CC, 11457 diag::warn_impcast_high_order_zero_bits); 11458 if (Target->isSignedIntegerType()) 11459 return DiagnoseImpCast(S, E, T, CC, 11460 diag::warn_impcast_nonnegative_result); 11461 } 11462 } 11463 11464 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11465 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11466 // Warn when doing a signed to signed conversion, warn if the positive 11467 // source value is exactly the width of the target type, which will 11468 // cause a negative value to be stored. 11469 11470 Expr::EvalResult Result; 11471 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11472 !S.SourceMgr.isInSystemMacro(CC)) { 11473 llvm::APSInt Value = Result.Val.getInt(); 11474 if (isSameWidthConstantConversion(S, E, T, CC)) { 11475 std::string PrettySourceValue = Value.toString(10); 11476 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11477 11478 S.DiagRuntimeBehavior( 11479 E->getExprLoc(), E, 11480 S.PDiag(diag::warn_impcast_integer_precision_constant) 11481 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11482 << E->getSourceRange() << clang::SourceRange(CC)); 11483 return; 11484 } 11485 } 11486 11487 // Fall through for non-constants to give a sign conversion warning. 11488 } 11489 11490 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11491 (!TargetRange.NonNegative && SourceRange.NonNegative && 11492 SourceRange.Width == TargetRange.Width)) { 11493 if (S.SourceMgr.isInSystemMacro(CC)) 11494 return; 11495 11496 unsigned DiagID = diag::warn_impcast_integer_sign; 11497 11498 // Traditionally, gcc has warned about this under -Wsign-compare. 11499 // We also want to warn about it in -Wconversion. 11500 // So if -Wconversion is off, use a completely identical diagnostic 11501 // in the sign-compare group. 11502 // The conditional-checking code will 11503 if (ICContext) { 11504 DiagID = diag::warn_impcast_integer_sign_conditional; 11505 *ICContext = true; 11506 } 11507 11508 return DiagnoseImpCast(S, E, T, CC, DiagID); 11509 } 11510 11511 // Diagnose conversions between different enumeration types. 11512 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11513 // type, to give us better diagnostics. 11514 QualType SourceType = E->getType(); 11515 if (!S.getLangOpts().CPlusPlus) { 11516 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11517 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11518 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11519 SourceType = S.Context.getTypeDeclType(Enum); 11520 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11521 } 11522 } 11523 11524 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11525 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11526 if (SourceEnum->getDecl()->hasNameForLinkage() && 11527 TargetEnum->getDecl()->hasNameForLinkage() && 11528 SourceEnum != TargetEnum) { 11529 if (S.SourceMgr.isInSystemMacro(CC)) 11530 return; 11531 11532 return DiagnoseImpCast(S, E, SourceType, T, CC, 11533 diag::warn_impcast_different_enum_types); 11534 } 11535 } 11536 11537 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11538 SourceLocation CC, QualType T); 11539 11540 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11541 SourceLocation CC, bool &ICContext) { 11542 E = E->IgnoreParenImpCasts(); 11543 11544 if (isa<ConditionalOperator>(E)) 11545 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11546 11547 AnalyzeImplicitConversions(S, E, CC); 11548 if (E->getType() != T) 11549 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11550 } 11551 11552 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11553 SourceLocation CC, QualType T) { 11554 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11555 11556 bool Suspicious = false; 11557 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11558 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11559 11560 // If -Wconversion would have warned about either of the candidates 11561 // for a signedness conversion to the context type... 11562 if (!Suspicious) return; 11563 11564 // ...but it's currently ignored... 11565 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 11566 return; 11567 11568 // ...then check whether it would have warned about either of the 11569 // candidates for a signedness conversion to the condition type. 11570 if (E->getType() == T) return; 11571 11572 Suspicious = false; 11573 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 11574 E->getType(), CC, &Suspicious); 11575 if (!Suspicious) 11576 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 11577 E->getType(), CC, &Suspicious); 11578 } 11579 11580 /// Check conversion of given expression to boolean. 11581 /// Input argument E is a logical expression. 11582 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 11583 if (S.getLangOpts().Bool) 11584 return; 11585 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 11586 return; 11587 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 11588 } 11589 11590 /// AnalyzeImplicitConversions - Find and report any interesting 11591 /// implicit conversions in the given expression. There are a couple 11592 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 11593 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, 11594 SourceLocation CC) { 11595 QualType T = OrigE->getType(); 11596 Expr *E = OrigE->IgnoreParenImpCasts(); 11597 11598 if (E->isTypeDependent() || E->isValueDependent()) 11599 return; 11600 11601 // For conditional operators, we analyze the arguments as if they 11602 // were being fed directly into the output. 11603 if (isa<ConditionalOperator>(E)) { 11604 ConditionalOperator *CO = cast<ConditionalOperator>(E); 11605 CheckConditionalOperator(S, CO, CC, T); 11606 return; 11607 } 11608 11609 // Check implicit argument conversions for function calls. 11610 if (CallExpr *Call = dyn_cast<CallExpr>(E)) 11611 CheckImplicitArgumentConversions(S, Call, CC); 11612 11613 // Go ahead and check any implicit conversions we might have skipped. 11614 // The non-canonical typecheck is just an optimization; 11615 // CheckImplicitConversion will filter out dead implicit conversions. 11616 if (E->getType() != T) 11617 CheckImplicitConversion(S, E, T, CC); 11618 11619 // Now continue drilling into this expression. 11620 11621 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 11622 // The bound subexpressions in a PseudoObjectExpr are not reachable 11623 // as transitive children. 11624 // FIXME: Use a more uniform representation for this. 11625 for (auto *SE : POE->semantics()) 11626 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 11627 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); 11628 } 11629 11630 // Skip past explicit casts. 11631 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 11632 E = CE->getSubExpr()->IgnoreParenImpCasts(); 11633 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 11634 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11635 return AnalyzeImplicitConversions(S, E, CC); 11636 } 11637 11638 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11639 // Do a somewhat different check with comparison operators. 11640 if (BO->isComparisonOp()) 11641 return AnalyzeComparison(S, BO); 11642 11643 // And with simple assignments. 11644 if (BO->getOpcode() == BO_Assign) 11645 return AnalyzeAssignment(S, BO); 11646 // And with compound assignments. 11647 if (BO->isAssignmentOp()) 11648 return AnalyzeCompoundAssignment(S, BO); 11649 } 11650 11651 // These break the otherwise-useful invariant below. Fortunately, 11652 // we don't really need to recurse into them, because any internal 11653 // expressions should have been analyzed already when they were 11654 // built into statements. 11655 if (isa<StmtExpr>(E)) return; 11656 11657 // Don't descend into unevaluated contexts. 11658 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 11659 11660 // Now just recurse over the expression's children. 11661 CC = E->getExprLoc(); 11662 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 11663 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 11664 for (Stmt *SubStmt : E->children()) { 11665 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 11666 if (!ChildExpr) 11667 continue; 11668 11669 if (IsLogicalAndOperator && 11670 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 11671 // Ignore checking string literals that are in logical and operators. 11672 // This is a common pattern for asserts. 11673 continue; 11674 AnalyzeImplicitConversions(S, ChildExpr, CC); 11675 } 11676 11677 if (BO && BO->isLogicalOp()) { 11678 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 11679 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11680 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11681 11682 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 11683 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11684 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11685 } 11686 11687 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 11688 if (U->getOpcode() == UO_LNot) { 11689 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 11690 } else if (U->getOpcode() != UO_AddrOf) { 11691 if (U->getSubExpr()->getType()->isAtomicType()) 11692 S.Diag(U->getSubExpr()->getBeginLoc(), 11693 diag::warn_atomic_implicit_seq_cst); 11694 } 11695 } 11696 } 11697 11698 /// Diagnose integer type and any valid implicit conversion to it. 11699 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 11700 // Taking into account implicit conversions, 11701 // allow any integer. 11702 if (!E->getType()->isIntegerType()) { 11703 S.Diag(E->getBeginLoc(), 11704 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 11705 return true; 11706 } 11707 // Potentially emit standard warnings for implicit conversions if enabled 11708 // using -Wconversion. 11709 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 11710 return false; 11711 } 11712 11713 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 11714 // Returns true when emitting a warning about taking the address of a reference. 11715 static bool CheckForReference(Sema &SemaRef, const Expr *E, 11716 const PartialDiagnostic &PD) { 11717 E = E->IgnoreParenImpCasts(); 11718 11719 const FunctionDecl *FD = nullptr; 11720 11721 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 11722 if (!DRE->getDecl()->getType()->isReferenceType()) 11723 return false; 11724 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11725 if (!M->getMemberDecl()->getType()->isReferenceType()) 11726 return false; 11727 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 11728 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 11729 return false; 11730 FD = Call->getDirectCallee(); 11731 } else { 11732 return false; 11733 } 11734 11735 SemaRef.Diag(E->getExprLoc(), PD); 11736 11737 // If possible, point to location of function. 11738 if (FD) { 11739 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 11740 } 11741 11742 return true; 11743 } 11744 11745 // Returns true if the SourceLocation is expanded from any macro body. 11746 // Returns false if the SourceLocation is invalid, is from not in a macro 11747 // expansion, or is from expanded from a top-level macro argument. 11748 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 11749 if (Loc.isInvalid()) 11750 return false; 11751 11752 while (Loc.isMacroID()) { 11753 if (SM.isMacroBodyExpansion(Loc)) 11754 return true; 11755 Loc = SM.getImmediateMacroCallerLoc(Loc); 11756 } 11757 11758 return false; 11759 } 11760 11761 /// Diagnose pointers that are always non-null. 11762 /// \param E the expression containing the pointer 11763 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 11764 /// compared to a null pointer 11765 /// \param IsEqual True when the comparison is equal to a null pointer 11766 /// \param Range Extra SourceRange to highlight in the diagnostic 11767 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 11768 Expr::NullPointerConstantKind NullKind, 11769 bool IsEqual, SourceRange Range) { 11770 if (!E) 11771 return; 11772 11773 // Don't warn inside macros. 11774 if (E->getExprLoc().isMacroID()) { 11775 const SourceManager &SM = getSourceManager(); 11776 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 11777 IsInAnyMacroBody(SM, Range.getBegin())) 11778 return; 11779 } 11780 E = E->IgnoreImpCasts(); 11781 11782 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 11783 11784 if (isa<CXXThisExpr>(E)) { 11785 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 11786 : diag::warn_this_bool_conversion; 11787 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 11788 return; 11789 } 11790 11791 bool IsAddressOf = false; 11792 11793 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 11794 if (UO->getOpcode() != UO_AddrOf) 11795 return; 11796 IsAddressOf = true; 11797 E = UO->getSubExpr(); 11798 } 11799 11800 if (IsAddressOf) { 11801 unsigned DiagID = IsCompare 11802 ? diag::warn_address_of_reference_null_compare 11803 : diag::warn_address_of_reference_bool_conversion; 11804 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 11805 << IsEqual; 11806 if (CheckForReference(*this, E, PD)) { 11807 return; 11808 } 11809 } 11810 11811 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 11812 bool IsParam = isa<NonNullAttr>(NonnullAttr); 11813 std::string Str; 11814 llvm::raw_string_ostream S(Str); 11815 E->printPretty(S, nullptr, getPrintingPolicy()); 11816 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 11817 : diag::warn_cast_nonnull_to_bool; 11818 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 11819 << E->getSourceRange() << Range << IsEqual; 11820 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 11821 }; 11822 11823 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 11824 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 11825 if (auto *Callee = Call->getDirectCallee()) { 11826 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 11827 ComplainAboutNonnullParamOrCall(A); 11828 return; 11829 } 11830 } 11831 } 11832 11833 // Expect to find a single Decl. Skip anything more complicated. 11834 ValueDecl *D = nullptr; 11835 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 11836 D = R->getDecl(); 11837 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 11838 D = M->getMemberDecl(); 11839 } 11840 11841 // Weak Decls can be null. 11842 if (!D || D->isWeak()) 11843 return; 11844 11845 // Check for parameter decl with nonnull attribute 11846 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 11847 if (getCurFunction() && 11848 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 11849 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 11850 ComplainAboutNonnullParamOrCall(A); 11851 return; 11852 } 11853 11854 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 11855 // Skip function template not specialized yet. 11856 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11857 return; 11858 auto ParamIter = llvm::find(FD->parameters(), PV); 11859 assert(ParamIter != FD->param_end()); 11860 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 11861 11862 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 11863 if (!NonNull->args_size()) { 11864 ComplainAboutNonnullParamOrCall(NonNull); 11865 return; 11866 } 11867 11868 for (const ParamIdx &ArgNo : NonNull->args()) { 11869 if (ArgNo.getASTIndex() == ParamNo) { 11870 ComplainAboutNonnullParamOrCall(NonNull); 11871 return; 11872 } 11873 } 11874 } 11875 } 11876 } 11877 } 11878 11879 QualType T = D->getType(); 11880 const bool IsArray = T->isArrayType(); 11881 const bool IsFunction = T->isFunctionType(); 11882 11883 // Address of function is used to silence the function warning. 11884 if (IsAddressOf && IsFunction) { 11885 return; 11886 } 11887 11888 // Found nothing. 11889 if (!IsAddressOf && !IsFunction && !IsArray) 11890 return; 11891 11892 // Pretty print the expression for the diagnostic. 11893 std::string Str; 11894 llvm::raw_string_ostream S(Str); 11895 E->printPretty(S, nullptr, getPrintingPolicy()); 11896 11897 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 11898 : diag::warn_impcast_pointer_to_bool; 11899 enum { 11900 AddressOf, 11901 FunctionPointer, 11902 ArrayPointer 11903 } DiagType; 11904 if (IsAddressOf) 11905 DiagType = AddressOf; 11906 else if (IsFunction) 11907 DiagType = FunctionPointer; 11908 else if (IsArray) 11909 DiagType = ArrayPointer; 11910 else 11911 llvm_unreachable("Could not determine diagnostic."); 11912 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 11913 << Range << IsEqual; 11914 11915 if (!IsFunction) 11916 return; 11917 11918 // Suggest '&' to silence the function warning. 11919 Diag(E->getExprLoc(), diag::note_function_warning_silence) 11920 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 11921 11922 // Check to see if '()' fixit should be emitted. 11923 QualType ReturnType; 11924 UnresolvedSet<4> NonTemplateOverloads; 11925 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 11926 if (ReturnType.isNull()) 11927 return; 11928 11929 if (IsCompare) { 11930 // There are two cases here. If there is null constant, the only suggest 11931 // for a pointer return type. If the null is 0, then suggest if the return 11932 // type is a pointer or an integer type. 11933 if (!ReturnType->isPointerType()) { 11934 if (NullKind == Expr::NPCK_ZeroExpression || 11935 NullKind == Expr::NPCK_ZeroLiteral) { 11936 if (!ReturnType->isIntegerType()) 11937 return; 11938 } else { 11939 return; 11940 } 11941 } 11942 } else { // !IsCompare 11943 // For function to bool, only suggest if the function pointer has bool 11944 // return type. 11945 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 11946 return; 11947 } 11948 Diag(E->getExprLoc(), diag::note_function_to_function_call) 11949 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 11950 } 11951 11952 /// Diagnoses "dangerous" implicit conversions within the given 11953 /// expression (which is a full expression). Implements -Wconversion 11954 /// and -Wsign-compare. 11955 /// 11956 /// \param CC the "context" location of the implicit conversion, i.e. 11957 /// the most location of the syntactic entity requiring the implicit 11958 /// conversion 11959 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 11960 // Don't diagnose in unevaluated contexts. 11961 if (isUnevaluatedContext()) 11962 return; 11963 11964 // Don't diagnose for value- or type-dependent expressions. 11965 if (E->isTypeDependent() || E->isValueDependent()) 11966 return; 11967 11968 // Check for array bounds violations in cases where the check isn't triggered 11969 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 11970 // ArraySubscriptExpr is on the RHS of a variable initialization. 11971 CheckArrayAccess(E); 11972 11973 // This is not the right CC for (e.g.) a variable initialization. 11974 AnalyzeImplicitConversions(*this, E, CC); 11975 } 11976 11977 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 11978 /// Input argument E is a logical expression. 11979 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 11980 ::CheckBoolLikeConversion(*this, E, CC); 11981 } 11982 11983 /// Diagnose when expression is an integer constant expression and its evaluation 11984 /// results in integer overflow 11985 void Sema::CheckForIntOverflow (Expr *E) { 11986 // Use a work list to deal with nested struct initializers. 11987 SmallVector<Expr *, 2> Exprs(1, E); 11988 11989 do { 11990 Expr *OriginalE = Exprs.pop_back_val(); 11991 Expr *E = OriginalE->IgnoreParenCasts(); 11992 11993 if (isa<BinaryOperator>(E)) { 11994 E->EvaluateForOverflow(Context); 11995 continue; 11996 } 11997 11998 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 11999 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12000 else if (isa<ObjCBoxedExpr>(OriginalE)) 12001 E->EvaluateForOverflow(Context); 12002 else if (auto Call = dyn_cast<CallExpr>(E)) 12003 Exprs.append(Call->arg_begin(), Call->arg_end()); 12004 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12005 Exprs.append(Message->arg_begin(), Message->arg_end()); 12006 } while (!Exprs.empty()); 12007 } 12008 12009 namespace { 12010 12011 /// Visitor for expressions which looks for unsequenced operations on the 12012 /// same object. 12013 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { 12014 using Base = EvaluatedExprVisitor<SequenceChecker>; 12015 12016 /// A tree of sequenced regions within an expression. Two regions are 12017 /// unsequenced if one is an ancestor or a descendent of the other. When we 12018 /// finish processing an expression with sequencing, such as a comma 12019 /// expression, we fold its tree nodes into its parent, since they are 12020 /// unsequenced with respect to nodes we will visit later. 12021 class SequenceTree { 12022 struct Value { 12023 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12024 unsigned Parent : 31; 12025 unsigned Merged : 1; 12026 }; 12027 SmallVector<Value, 8> Values; 12028 12029 public: 12030 /// A region within an expression which may be sequenced with respect 12031 /// to some other region. 12032 class Seq { 12033 friend class SequenceTree; 12034 12035 unsigned Index; 12036 12037 explicit Seq(unsigned N) : Index(N) {} 12038 12039 public: 12040 Seq() : Index(0) {} 12041 }; 12042 12043 SequenceTree() { Values.push_back(Value(0)); } 12044 Seq root() const { return Seq(0); } 12045 12046 /// Create a new sequence of operations, which is an unsequenced 12047 /// subset of \p Parent. This sequence of operations is sequenced with 12048 /// respect to other children of \p Parent. 12049 Seq allocate(Seq Parent) { 12050 Values.push_back(Value(Parent.Index)); 12051 return Seq(Values.size() - 1); 12052 } 12053 12054 /// Merge a sequence of operations into its parent. 12055 void merge(Seq S) { 12056 Values[S.Index].Merged = true; 12057 } 12058 12059 /// Determine whether two operations are unsequenced. This operation 12060 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12061 /// should have been merged into its parent as appropriate. 12062 bool isUnsequenced(Seq Cur, Seq Old) { 12063 unsigned C = representative(Cur.Index); 12064 unsigned Target = representative(Old.Index); 12065 while (C >= Target) { 12066 if (C == Target) 12067 return true; 12068 C = Values[C].Parent; 12069 } 12070 return false; 12071 } 12072 12073 private: 12074 /// Pick a representative for a sequence. 12075 unsigned representative(unsigned K) { 12076 if (Values[K].Merged) 12077 // Perform path compression as we go. 12078 return Values[K].Parent = representative(Values[K].Parent); 12079 return K; 12080 } 12081 }; 12082 12083 /// An object for which we can track unsequenced uses. 12084 using Object = NamedDecl *; 12085 12086 /// Different flavors of object usage which we track. We only track the 12087 /// least-sequenced usage of each kind. 12088 enum UsageKind { 12089 /// A read of an object. Multiple unsequenced reads are OK. 12090 UK_Use, 12091 12092 /// A modification of an object which is sequenced before the value 12093 /// computation of the expression, such as ++n in C++. 12094 UK_ModAsValue, 12095 12096 /// A modification of an object which is not sequenced before the value 12097 /// computation of the expression, such as n++. 12098 UK_ModAsSideEffect, 12099 12100 UK_Count = UK_ModAsSideEffect + 1 12101 }; 12102 12103 struct Usage { 12104 Expr *Use; 12105 SequenceTree::Seq Seq; 12106 12107 Usage() : Use(nullptr), Seq() {} 12108 }; 12109 12110 struct UsageInfo { 12111 Usage Uses[UK_Count]; 12112 12113 /// Have we issued a diagnostic for this variable already? 12114 bool Diagnosed; 12115 12116 UsageInfo() : Uses(), Diagnosed(false) {} 12117 }; 12118 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12119 12120 Sema &SemaRef; 12121 12122 /// Sequenced regions within the expression. 12123 SequenceTree Tree; 12124 12125 /// Declaration modifications and references which we have seen. 12126 UsageInfoMap UsageMap; 12127 12128 /// The region we are currently within. 12129 SequenceTree::Seq Region; 12130 12131 /// Filled in with declarations which were modified as a side-effect 12132 /// (that is, post-increment operations). 12133 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12134 12135 /// Expressions to check later. We defer checking these to reduce 12136 /// stack usage. 12137 SmallVectorImpl<Expr *> &WorkList; 12138 12139 /// RAII object wrapping the visitation of a sequenced subexpression of an 12140 /// expression. At the end of this process, the side-effects of the evaluation 12141 /// become sequenced with respect to the value computation of the result, so 12142 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12143 /// UK_ModAsValue. 12144 struct SequencedSubexpression { 12145 SequencedSubexpression(SequenceChecker &Self) 12146 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12147 Self.ModAsSideEffect = &ModAsSideEffect; 12148 } 12149 12150 ~SequencedSubexpression() { 12151 for (auto &M : llvm::reverse(ModAsSideEffect)) { 12152 UsageInfo &U = Self.UsageMap[M.first]; 12153 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; 12154 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue); 12155 SideEffectUsage = M.second; 12156 } 12157 Self.ModAsSideEffect = OldModAsSideEffect; 12158 } 12159 12160 SequenceChecker &Self; 12161 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12162 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12163 }; 12164 12165 /// RAII object wrapping the visitation of a subexpression which we might 12166 /// choose to evaluate as a constant. If any subexpression is evaluated and 12167 /// found to be non-constant, this allows us to suppress the evaluation of 12168 /// the outer expression. 12169 class EvaluationTracker { 12170 public: 12171 EvaluationTracker(SequenceChecker &Self) 12172 : Self(Self), Prev(Self.EvalTracker) { 12173 Self.EvalTracker = this; 12174 } 12175 12176 ~EvaluationTracker() { 12177 Self.EvalTracker = Prev; 12178 if (Prev) 12179 Prev->EvalOK &= EvalOK; 12180 } 12181 12182 bool evaluate(const Expr *E, bool &Result) { 12183 if (!EvalOK || E->isValueDependent()) 12184 return false; 12185 EvalOK = E->EvaluateAsBooleanCondition( 12186 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12187 return EvalOK; 12188 } 12189 12190 private: 12191 SequenceChecker &Self; 12192 EvaluationTracker *Prev; 12193 bool EvalOK = true; 12194 } *EvalTracker = nullptr; 12195 12196 /// Find the object which is produced by the specified expression, 12197 /// if any. 12198 Object getObject(Expr *E, bool Mod) const { 12199 E = E->IgnoreParenCasts(); 12200 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12201 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12202 return getObject(UO->getSubExpr(), Mod); 12203 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12204 if (BO->getOpcode() == BO_Comma) 12205 return getObject(BO->getRHS(), Mod); 12206 if (Mod && BO->isAssignmentOp()) 12207 return getObject(BO->getLHS(), Mod); 12208 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12209 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12210 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12211 return ME->getMemberDecl(); 12212 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12213 // FIXME: If this is a reference, map through to its value. 12214 return DRE->getDecl(); 12215 return nullptr; 12216 } 12217 12218 /// Note that an object was modified or used by an expression. 12219 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { 12220 Usage &U = UI.Uses[UK]; 12221 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { 12222 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12223 ModAsSideEffect->push_back(std::make_pair(O, U)); 12224 U.Use = Ref; 12225 U.Seq = Region; 12226 } 12227 } 12228 12229 /// Check whether a modification or use conflicts with a prior usage. 12230 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, 12231 bool IsModMod) { 12232 if (UI.Diagnosed) 12233 return; 12234 12235 const Usage &U = UI.Uses[OtherKind]; 12236 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) 12237 return; 12238 12239 Expr *Mod = U.Use; 12240 Expr *ModOrUse = Ref; 12241 if (OtherKind == UK_Use) 12242 std::swap(Mod, ModOrUse); 12243 12244 SemaRef.DiagRuntimeBehavior( 12245 Mod->getExprLoc(), {Mod, ModOrUse}, 12246 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12247 : diag::warn_unsequenced_mod_use) 12248 << O << SourceRange(ModOrUse->getExprLoc())); 12249 UI.Diagnosed = true; 12250 } 12251 12252 void notePreUse(Object O, Expr *Use) { 12253 UsageInfo &U = UsageMap[O]; 12254 // Uses conflict with other modifications. 12255 checkUsage(O, U, Use, UK_ModAsValue, false); 12256 } 12257 12258 void notePostUse(Object O, Expr *Use) { 12259 UsageInfo &U = UsageMap[O]; 12260 checkUsage(O, U, Use, UK_ModAsSideEffect, false); 12261 addUsage(U, O, Use, UK_Use); 12262 } 12263 12264 void notePreMod(Object O, Expr *Mod) { 12265 UsageInfo &U = UsageMap[O]; 12266 // Modifications conflict with other modifications and with uses. 12267 checkUsage(O, U, Mod, UK_ModAsValue, true); 12268 checkUsage(O, U, Mod, UK_Use, false); 12269 } 12270 12271 void notePostMod(Object O, Expr *Use, UsageKind UK) { 12272 UsageInfo &U = UsageMap[O]; 12273 checkUsage(O, U, Use, UK_ModAsSideEffect, true); 12274 addUsage(U, O, Use, UK); 12275 } 12276 12277 public: 12278 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) 12279 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12280 Visit(E); 12281 } 12282 12283 void VisitStmt(Stmt *S) { 12284 // Skip all statements which aren't expressions for now. 12285 } 12286 12287 void VisitExpr(Expr *E) { 12288 // By default, just recurse to evaluated subexpressions. 12289 Base::VisitStmt(E); 12290 } 12291 12292 void VisitCastExpr(CastExpr *E) { 12293 Object O = Object(); 12294 if (E->getCastKind() == CK_LValueToRValue) 12295 O = getObject(E->getSubExpr(), false); 12296 12297 if (O) 12298 notePreUse(O, E); 12299 VisitExpr(E); 12300 if (O) 12301 notePostUse(O, E); 12302 } 12303 12304 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) { 12305 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12306 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12307 SequenceTree::Seq OldRegion = Region; 12308 12309 { 12310 SequencedSubexpression SeqBefore(*this); 12311 Region = BeforeRegion; 12312 Visit(SequencedBefore); 12313 } 12314 12315 Region = AfterRegion; 12316 Visit(SequencedAfter); 12317 12318 Region = OldRegion; 12319 12320 Tree.merge(BeforeRegion); 12321 Tree.merge(AfterRegion); 12322 } 12323 12324 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) { 12325 // C++17 [expr.sub]p1: 12326 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12327 // expression E1 is sequenced before the expression E2. 12328 if (SemaRef.getLangOpts().CPlusPlus17) 12329 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12330 else 12331 Base::VisitStmt(ASE); 12332 } 12333 12334 void VisitBinComma(BinaryOperator *BO) { 12335 // C++11 [expr.comma]p1: 12336 // Every value computation and side effect associated with the left 12337 // expression is sequenced before every value computation and side 12338 // effect associated with the right expression. 12339 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12340 } 12341 12342 void VisitBinAssign(BinaryOperator *BO) { 12343 // The modification is sequenced after the value computation of the LHS 12344 // and RHS, so check it before inspecting the operands and update the 12345 // map afterwards. 12346 Object O = getObject(BO->getLHS(), true); 12347 if (!O) 12348 return VisitExpr(BO); 12349 12350 notePreMod(O, BO); 12351 12352 // C++11 [expr.ass]p7: 12353 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated 12354 // only once. 12355 // 12356 // Therefore, for a compound assignment operator, O is considered used 12357 // everywhere except within the evaluation of E1 itself. 12358 if (isa<CompoundAssignOperator>(BO)) 12359 notePreUse(O, BO); 12360 12361 Visit(BO->getLHS()); 12362 12363 if (isa<CompoundAssignOperator>(BO)) 12364 notePostUse(O, BO); 12365 12366 Visit(BO->getRHS()); 12367 12368 // C++11 [expr.ass]p1: 12369 // the assignment is sequenced [...] before the value computation of the 12370 // assignment expression. 12371 // C11 6.5.16/3 has no such rule. 12372 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12373 : UK_ModAsSideEffect); 12374 } 12375 12376 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { 12377 VisitBinAssign(CAO); 12378 } 12379 12380 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12381 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12382 void VisitUnaryPreIncDec(UnaryOperator *UO) { 12383 Object O = getObject(UO->getSubExpr(), true); 12384 if (!O) 12385 return VisitExpr(UO); 12386 12387 notePreMod(O, UO); 12388 Visit(UO->getSubExpr()); 12389 // C++11 [expr.pre.incr]p1: 12390 // the expression ++x is equivalent to x+=1 12391 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12392 : UK_ModAsSideEffect); 12393 } 12394 12395 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12396 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12397 void VisitUnaryPostIncDec(UnaryOperator *UO) { 12398 Object O = getObject(UO->getSubExpr(), true); 12399 if (!O) 12400 return VisitExpr(UO); 12401 12402 notePreMod(O, UO); 12403 Visit(UO->getSubExpr()); 12404 notePostMod(O, UO, UK_ModAsSideEffect); 12405 } 12406 12407 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. 12408 void VisitBinLOr(BinaryOperator *BO) { 12409 // The side-effects of the LHS of an '&&' are sequenced before the 12410 // value computation of the RHS, and hence before the value computation 12411 // of the '&&' itself, unless the LHS evaluates to zero. We treat them 12412 // as if they were unconditionally sequenced. 12413 EvaluationTracker Eval(*this); 12414 { 12415 SequencedSubexpression Sequenced(*this); 12416 Visit(BO->getLHS()); 12417 } 12418 12419 bool Result; 12420 if (Eval.evaluate(BO->getLHS(), Result)) { 12421 if (!Result) 12422 Visit(BO->getRHS()); 12423 } else { 12424 // Check for unsequenced operations in the RHS, treating it as an 12425 // entirely separate evaluation. 12426 // 12427 // FIXME: If there are operations in the RHS which are unsequenced 12428 // with respect to operations outside the RHS, and those operations 12429 // are unconditionally evaluated, diagnose them. 12430 WorkList.push_back(BO->getRHS()); 12431 } 12432 } 12433 void VisitBinLAnd(BinaryOperator *BO) { 12434 EvaluationTracker Eval(*this); 12435 { 12436 SequencedSubexpression Sequenced(*this); 12437 Visit(BO->getLHS()); 12438 } 12439 12440 bool Result; 12441 if (Eval.evaluate(BO->getLHS(), Result)) { 12442 if (Result) 12443 Visit(BO->getRHS()); 12444 } else { 12445 WorkList.push_back(BO->getRHS()); 12446 } 12447 } 12448 12449 // Only visit the condition, unless we can be sure which subexpression will 12450 // be chosen. 12451 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { 12452 EvaluationTracker Eval(*this); 12453 { 12454 SequencedSubexpression Sequenced(*this); 12455 Visit(CO->getCond()); 12456 } 12457 12458 bool Result; 12459 if (Eval.evaluate(CO->getCond(), Result)) 12460 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); 12461 else { 12462 WorkList.push_back(CO->getTrueExpr()); 12463 WorkList.push_back(CO->getFalseExpr()); 12464 } 12465 } 12466 12467 void VisitCallExpr(CallExpr *CE) { 12468 // C++11 [intro.execution]p15: 12469 // When calling a function [...], every value computation and side effect 12470 // associated with any argument expression, or with the postfix expression 12471 // designating the called function, is sequenced before execution of every 12472 // expression or statement in the body of the function [and thus before 12473 // the value computation of its result]. 12474 SequencedSubexpression Sequenced(*this); 12475 Base::VisitCallExpr(CE); 12476 12477 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 12478 } 12479 12480 void VisitCXXConstructExpr(CXXConstructExpr *CCE) { 12481 // This is a call, so all subexpressions are sequenced before the result. 12482 SequencedSubexpression Sequenced(*this); 12483 12484 if (!CCE->isListInitialization()) 12485 return VisitExpr(CCE); 12486 12487 // In C++11, list initializations are sequenced. 12488 SmallVector<SequenceTree::Seq, 32> Elts; 12489 SequenceTree::Seq Parent = Region; 12490 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), 12491 E = CCE->arg_end(); 12492 I != E; ++I) { 12493 Region = Tree.allocate(Parent); 12494 Elts.push_back(Region); 12495 Visit(*I); 12496 } 12497 12498 // Forget that the initializers are sequenced. 12499 Region = Parent; 12500 for (unsigned I = 0; I < Elts.size(); ++I) 12501 Tree.merge(Elts[I]); 12502 } 12503 12504 void VisitInitListExpr(InitListExpr *ILE) { 12505 if (!SemaRef.getLangOpts().CPlusPlus11) 12506 return VisitExpr(ILE); 12507 12508 // In C++11, list initializations are sequenced. 12509 SmallVector<SequenceTree::Seq, 32> Elts; 12510 SequenceTree::Seq Parent = Region; 12511 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12512 Expr *E = ILE->getInit(I); 12513 if (!E) continue; 12514 Region = Tree.allocate(Parent); 12515 Elts.push_back(Region); 12516 Visit(E); 12517 } 12518 12519 // Forget that the initializers are sequenced. 12520 Region = Parent; 12521 for (unsigned I = 0; I < Elts.size(); ++I) 12522 Tree.merge(Elts[I]); 12523 } 12524 }; 12525 12526 } // namespace 12527 12528 void Sema::CheckUnsequencedOperations(Expr *E) { 12529 SmallVector<Expr *, 8> WorkList; 12530 WorkList.push_back(E); 12531 while (!WorkList.empty()) { 12532 Expr *Item = WorkList.pop_back_val(); 12533 SequenceChecker(*this, Item, WorkList); 12534 } 12535 } 12536 12537 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 12538 bool IsConstexpr) { 12539 llvm::SaveAndRestore<bool> ConstantContext( 12540 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 12541 CheckImplicitConversions(E, CheckLoc); 12542 if (!E->isInstantiationDependent()) 12543 CheckUnsequencedOperations(E); 12544 if (!IsConstexpr && !E->isValueDependent()) 12545 CheckForIntOverflow(E); 12546 DiagnoseMisalignedMembers(); 12547 } 12548 12549 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 12550 FieldDecl *BitField, 12551 Expr *Init) { 12552 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 12553 } 12554 12555 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 12556 SourceLocation Loc) { 12557 if (!PType->isVariablyModifiedType()) 12558 return; 12559 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 12560 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 12561 return; 12562 } 12563 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 12564 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 12565 return; 12566 } 12567 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 12568 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 12569 return; 12570 } 12571 12572 const ArrayType *AT = S.Context.getAsArrayType(PType); 12573 if (!AT) 12574 return; 12575 12576 if (AT->getSizeModifier() != ArrayType::Star) { 12577 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 12578 return; 12579 } 12580 12581 S.Diag(Loc, diag::err_array_star_in_function_definition); 12582 } 12583 12584 /// CheckParmsForFunctionDef - Check that the parameters of the given 12585 /// function are appropriate for the definition of a function. This 12586 /// takes care of any checks that cannot be performed on the 12587 /// declaration itself, e.g., that the types of each of the function 12588 /// parameters are complete. 12589 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 12590 bool CheckParameterNames) { 12591 bool HasInvalidParm = false; 12592 for (ParmVarDecl *Param : Parameters) { 12593 // C99 6.7.5.3p4: the parameters in a parameter type list in a 12594 // function declarator that is part of a function definition of 12595 // that function shall not have incomplete type. 12596 // 12597 // This is also C++ [dcl.fct]p6. 12598 if (!Param->isInvalidDecl() && 12599 RequireCompleteType(Param->getLocation(), Param->getType(), 12600 diag::err_typecheck_decl_incomplete_type)) { 12601 Param->setInvalidDecl(); 12602 HasInvalidParm = true; 12603 } 12604 12605 // C99 6.9.1p5: If the declarator includes a parameter type list, the 12606 // declaration of each parameter shall include an identifier. 12607 if (CheckParameterNames && 12608 Param->getIdentifier() == nullptr && 12609 !Param->isImplicit() && 12610 !getLangOpts().CPlusPlus) 12611 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 12612 12613 // C99 6.7.5.3p12: 12614 // If the function declarator is not part of a definition of that 12615 // function, parameters may have incomplete type and may use the [*] 12616 // notation in their sequences of declarator specifiers to specify 12617 // variable length array types. 12618 QualType PType = Param->getOriginalType(); 12619 // FIXME: This diagnostic should point the '[*]' if source-location 12620 // information is added for it. 12621 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 12622 12623 // If the parameter is a c++ class type and it has to be destructed in the 12624 // callee function, declare the destructor so that it can be called by the 12625 // callee function. Do not perform any direct access check on the dtor here. 12626 if (!Param->isInvalidDecl()) { 12627 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 12628 if (!ClassDecl->isInvalidDecl() && 12629 !ClassDecl->hasIrrelevantDestructor() && 12630 !ClassDecl->isDependentContext() && 12631 ClassDecl->isParamDestroyedInCallee()) { 12632 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 12633 MarkFunctionReferenced(Param->getLocation(), Destructor); 12634 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 12635 } 12636 } 12637 } 12638 12639 // Parameters with the pass_object_size attribute only need to be marked 12640 // constant at function definitions. Because we lack information about 12641 // whether we're on a declaration or definition when we're instantiating the 12642 // attribute, we need to check for constness here. 12643 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 12644 if (!Param->getType().isConstQualified()) 12645 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 12646 << Attr->getSpelling() << 1; 12647 12648 // Check for parameter names shadowing fields from the class. 12649 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 12650 // The owning context for the parameter should be the function, but we 12651 // want to see if this function's declaration context is a record. 12652 DeclContext *DC = Param->getDeclContext(); 12653 if (DC && DC->isFunctionOrMethod()) { 12654 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 12655 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 12656 RD, /*DeclIsField*/ false); 12657 } 12658 } 12659 } 12660 12661 return HasInvalidParm; 12662 } 12663 12664 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr 12665 /// or MemberExpr. 12666 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, 12667 ASTContext &Context) { 12668 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) 12669 return Context.getDeclAlign(DRE->getDecl()); 12670 12671 if (const auto *ME = dyn_cast<MemberExpr>(E)) 12672 return Context.getDeclAlign(ME->getMemberDecl()); 12673 12674 return TypeAlign; 12675 } 12676 12677 /// CheckCastAlign - Implements -Wcast-align, which warns when a 12678 /// pointer cast increases the alignment requirements. 12679 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 12680 // This is actually a lot of work to potentially be doing on every 12681 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 12682 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 12683 return; 12684 12685 // Ignore dependent types. 12686 if (T->isDependentType() || Op->getType()->isDependentType()) 12687 return; 12688 12689 // Require that the destination be a pointer type. 12690 const PointerType *DestPtr = T->getAs<PointerType>(); 12691 if (!DestPtr) return; 12692 12693 // If the destination has alignment 1, we're done. 12694 QualType DestPointee = DestPtr->getPointeeType(); 12695 if (DestPointee->isIncompleteType()) return; 12696 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 12697 if (DestAlign.isOne()) return; 12698 12699 // Require that the source be a pointer type. 12700 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 12701 if (!SrcPtr) return; 12702 QualType SrcPointee = SrcPtr->getPointeeType(); 12703 12704 // Whitelist casts from cv void*. We already implicitly 12705 // whitelisted casts to cv void*, since they have alignment 1. 12706 // Also whitelist casts involving incomplete types, which implicitly 12707 // includes 'void'. 12708 if (SrcPointee->isIncompleteType()) return; 12709 12710 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); 12711 12712 if (auto *CE = dyn_cast<CastExpr>(Op)) { 12713 if (CE->getCastKind() == CK_ArrayToPointerDecay) 12714 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context); 12715 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) { 12716 if (UO->getOpcode() == UO_AddrOf) 12717 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context); 12718 } 12719 12720 if (SrcAlign >= DestAlign) return; 12721 12722 Diag(TRange.getBegin(), diag::warn_cast_align) 12723 << Op->getType() << T 12724 << static_cast<unsigned>(SrcAlign.getQuantity()) 12725 << static_cast<unsigned>(DestAlign.getQuantity()) 12726 << TRange << Op->getSourceRange(); 12727 } 12728 12729 /// Check whether this array fits the idiom of a size-one tail padded 12730 /// array member of a struct. 12731 /// 12732 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 12733 /// commonly used to emulate flexible arrays in C89 code. 12734 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 12735 const NamedDecl *ND) { 12736 if (Size != 1 || !ND) return false; 12737 12738 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 12739 if (!FD) return false; 12740 12741 // Don't consider sizes resulting from macro expansions or template argument 12742 // substitution to form C89 tail-padded arrays. 12743 12744 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 12745 while (TInfo) { 12746 TypeLoc TL = TInfo->getTypeLoc(); 12747 // Look through typedefs. 12748 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 12749 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 12750 TInfo = TDL->getTypeSourceInfo(); 12751 continue; 12752 } 12753 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 12754 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 12755 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 12756 return false; 12757 } 12758 break; 12759 } 12760 12761 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 12762 if (!RD) return false; 12763 if (RD->isUnion()) return false; 12764 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 12765 if (!CRD->isStandardLayout()) return false; 12766 } 12767 12768 // See if this is the last field decl in the record. 12769 const Decl *D = FD; 12770 while ((D = D->getNextDeclInContext())) 12771 if (isa<FieldDecl>(D)) 12772 return false; 12773 return true; 12774 } 12775 12776 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 12777 const ArraySubscriptExpr *ASE, 12778 bool AllowOnePastEnd, bool IndexNegated) { 12779 // Already diagnosed by the constant evaluator. 12780 if (isConstantEvaluated()) 12781 return; 12782 12783 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 12784 if (IndexExpr->isValueDependent()) 12785 return; 12786 12787 const Type *EffectiveType = 12788 BaseExpr->getType()->getPointeeOrArrayElementType(); 12789 BaseExpr = BaseExpr->IgnoreParenCasts(); 12790 const ConstantArrayType *ArrayTy = 12791 Context.getAsConstantArrayType(BaseExpr->getType()); 12792 12793 if (!ArrayTy) 12794 return; 12795 12796 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 12797 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 12798 return; 12799 12800 Expr::EvalResult Result; 12801 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 12802 return; 12803 12804 llvm::APSInt index = Result.Val.getInt(); 12805 if (IndexNegated) 12806 index = -index; 12807 12808 const NamedDecl *ND = nullptr; 12809 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12810 ND = DRE->getDecl(); 12811 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12812 ND = ME->getMemberDecl(); 12813 12814 if (index.isUnsigned() || !index.isNegative()) { 12815 // It is possible that the type of the base expression after 12816 // IgnoreParenCasts is incomplete, even though the type of the base 12817 // expression before IgnoreParenCasts is complete (see PR39746 for an 12818 // example). In this case we have no information about whether the array 12819 // access exceeds the array bounds. However we can still diagnose an array 12820 // access which precedes the array bounds. 12821 if (BaseType->isIncompleteType()) 12822 return; 12823 12824 llvm::APInt size = ArrayTy->getSize(); 12825 if (!size.isStrictlyPositive()) 12826 return; 12827 12828 if (BaseType != EffectiveType) { 12829 // Make sure we're comparing apples to apples when comparing index to size 12830 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 12831 uint64_t array_typesize = Context.getTypeSize(BaseType); 12832 // Handle ptrarith_typesize being zero, such as when casting to void* 12833 if (!ptrarith_typesize) ptrarith_typesize = 1; 12834 if (ptrarith_typesize != array_typesize) { 12835 // There's a cast to a different size type involved 12836 uint64_t ratio = array_typesize / ptrarith_typesize; 12837 // TODO: Be smarter about handling cases where array_typesize is not a 12838 // multiple of ptrarith_typesize 12839 if (ptrarith_typesize * ratio == array_typesize) 12840 size *= llvm::APInt(size.getBitWidth(), ratio); 12841 } 12842 } 12843 12844 if (size.getBitWidth() > index.getBitWidth()) 12845 index = index.zext(size.getBitWidth()); 12846 else if (size.getBitWidth() < index.getBitWidth()) 12847 size = size.zext(index.getBitWidth()); 12848 12849 // For array subscripting the index must be less than size, but for pointer 12850 // arithmetic also allow the index (offset) to be equal to size since 12851 // computing the next address after the end of the array is legal and 12852 // commonly done e.g. in C++ iterators and range-based for loops. 12853 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 12854 return; 12855 12856 // Also don't warn for arrays of size 1 which are members of some 12857 // structure. These are often used to approximate flexible arrays in C89 12858 // code. 12859 if (IsTailPaddedMemberArray(*this, size, ND)) 12860 return; 12861 12862 // Suppress the warning if the subscript expression (as identified by the 12863 // ']' location) and the index expression are both from macro expansions 12864 // within a system header. 12865 if (ASE) { 12866 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 12867 ASE->getRBracketLoc()); 12868 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 12869 SourceLocation IndexLoc = 12870 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 12871 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 12872 return; 12873 } 12874 } 12875 12876 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 12877 if (ASE) 12878 DiagID = diag::warn_array_index_exceeds_bounds; 12879 12880 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12881 PDiag(DiagID) << index.toString(10, true) 12882 << size.toString(10, true) 12883 << (unsigned)size.getLimitedValue(~0U) 12884 << IndexExpr->getSourceRange()); 12885 } else { 12886 unsigned DiagID = diag::warn_array_index_precedes_bounds; 12887 if (!ASE) { 12888 DiagID = diag::warn_ptr_arith_precedes_bounds; 12889 if (index.isNegative()) index = -index; 12890 } 12891 12892 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 12893 PDiag(DiagID) << index.toString(10, true) 12894 << IndexExpr->getSourceRange()); 12895 } 12896 12897 if (!ND) { 12898 // Try harder to find a NamedDecl to point at in the note. 12899 while (const ArraySubscriptExpr *ASE = 12900 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 12901 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 12902 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 12903 ND = DRE->getDecl(); 12904 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 12905 ND = ME->getMemberDecl(); 12906 } 12907 12908 if (ND) 12909 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 12910 PDiag(diag::note_array_index_out_of_bounds) 12911 << ND->getDeclName()); 12912 } 12913 12914 void Sema::CheckArrayAccess(const Expr *expr) { 12915 int AllowOnePastEnd = 0; 12916 while (expr) { 12917 expr = expr->IgnoreParenImpCasts(); 12918 switch (expr->getStmtClass()) { 12919 case Stmt::ArraySubscriptExprClass: { 12920 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 12921 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 12922 AllowOnePastEnd > 0); 12923 expr = ASE->getBase(); 12924 break; 12925 } 12926 case Stmt::MemberExprClass: { 12927 expr = cast<MemberExpr>(expr)->getBase(); 12928 break; 12929 } 12930 case Stmt::OMPArraySectionExprClass: { 12931 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 12932 if (ASE->getLowerBound()) 12933 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 12934 /*ASE=*/nullptr, AllowOnePastEnd > 0); 12935 return; 12936 } 12937 case Stmt::UnaryOperatorClass: { 12938 // Only unwrap the * and & unary operators 12939 const UnaryOperator *UO = cast<UnaryOperator>(expr); 12940 expr = UO->getSubExpr(); 12941 switch (UO->getOpcode()) { 12942 case UO_AddrOf: 12943 AllowOnePastEnd++; 12944 break; 12945 case UO_Deref: 12946 AllowOnePastEnd--; 12947 break; 12948 default: 12949 return; 12950 } 12951 break; 12952 } 12953 case Stmt::ConditionalOperatorClass: { 12954 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 12955 if (const Expr *lhs = cond->getLHS()) 12956 CheckArrayAccess(lhs); 12957 if (const Expr *rhs = cond->getRHS()) 12958 CheckArrayAccess(rhs); 12959 return; 12960 } 12961 case Stmt::CXXOperatorCallExprClass: { 12962 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 12963 for (const auto *Arg : OCE->arguments()) 12964 CheckArrayAccess(Arg); 12965 return; 12966 } 12967 default: 12968 return; 12969 } 12970 } 12971 } 12972 12973 //===--- CHECK: Objective-C retain cycles ----------------------------------// 12974 12975 namespace { 12976 12977 struct RetainCycleOwner { 12978 VarDecl *Variable = nullptr; 12979 SourceRange Range; 12980 SourceLocation Loc; 12981 bool Indirect = false; 12982 12983 RetainCycleOwner() = default; 12984 12985 void setLocsFrom(Expr *e) { 12986 Loc = e->getExprLoc(); 12987 Range = e->getSourceRange(); 12988 } 12989 }; 12990 12991 } // namespace 12992 12993 /// Consider whether capturing the given variable can possibly lead to 12994 /// a retain cycle. 12995 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 12996 // In ARC, it's captured strongly iff the variable has __strong 12997 // lifetime. In MRR, it's captured strongly if the variable is 12998 // __block and has an appropriate type. 12999 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13000 return false; 13001 13002 owner.Variable = var; 13003 if (ref) 13004 owner.setLocsFrom(ref); 13005 return true; 13006 } 13007 13008 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 13009 while (true) { 13010 e = e->IgnoreParens(); 13011 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 13012 switch (cast->getCastKind()) { 13013 case CK_BitCast: 13014 case CK_LValueBitCast: 13015 case CK_LValueToRValue: 13016 case CK_ARCReclaimReturnedObject: 13017 e = cast->getSubExpr(); 13018 continue; 13019 13020 default: 13021 return false; 13022 } 13023 } 13024 13025 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 13026 ObjCIvarDecl *ivar = ref->getDecl(); 13027 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13028 return false; 13029 13030 // Try to find a retain cycle in the base. 13031 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 13032 return false; 13033 13034 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 13035 owner.Indirect = true; 13036 return true; 13037 } 13038 13039 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 13040 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 13041 if (!var) return false; 13042 return considerVariable(var, ref, owner); 13043 } 13044 13045 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 13046 if (member->isArrow()) return false; 13047 13048 // Don't count this as an indirect ownership. 13049 e = member->getBase(); 13050 continue; 13051 } 13052 13053 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 13054 // Only pay attention to pseudo-objects on property references. 13055 ObjCPropertyRefExpr *pre 13056 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 13057 ->IgnoreParens()); 13058 if (!pre) return false; 13059 if (pre->isImplicitProperty()) return false; 13060 ObjCPropertyDecl *property = pre->getExplicitProperty(); 13061 if (!property->isRetaining() && 13062 !(property->getPropertyIvarDecl() && 13063 property->getPropertyIvarDecl()->getType() 13064 .getObjCLifetime() == Qualifiers::OCL_Strong)) 13065 return false; 13066 13067 owner.Indirect = true; 13068 if (pre->isSuperReceiver()) { 13069 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 13070 if (!owner.Variable) 13071 return false; 13072 owner.Loc = pre->getLocation(); 13073 owner.Range = pre->getSourceRange(); 13074 return true; 13075 } 13076 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 13077 ->getSourceExpr()); 13078 continue; 13079 } 13080 13081 // Array ivars? 13082 13083 return false; 13084 } 13085 } 13086 13087 namespace { 13088 13089 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 13090 ASTContext &Context; 13091 VarDecl *Variable; 13092 Expr *Capturer = nullptr; 13093 bool VarWillBeReased = false; 13094 13095 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 13096 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 13097 Context(Context), Variable(variable) {} 13098 13099 void VisitDeclRefExpr(DeclRefExpr *ref) { 13100 if (ref->getDecl() == Variable && !Capturer) 13101 Capturer = ref; 13102 } 13103 13104 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 13105 if (Capturer) return; 13106 Visit(ref->getBase()); 13107 if (Capturer && ref->isFreeIvar()) 13108 Capturer = ref; 13109 } 13110 13111 void VisitBlockExpr(BlockExpr *block) { 13112 // Look inside nested blocks 13113 if (block->getBlockDecl()->capturesVariable(Variable)) 13114 Visit(block->getBlockDecl()->getBody()); 13115 } 13116 13117 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 13118 if (Capturer) return; 13119 if (OVE->getSourceExpr()) 13120 Visit(OVE->getSourceExpr()); 13121 } 13122 13123 void VisitBinaryOperator(BinaryOperator *BinOp) { 13124 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 13125 return; 13126 Expr *LHS = BinOp->getLHS(); 13127 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 13128 if (DRE->getDecl() != Variable) 13129 return; 13130 if (Expr *RHS = BinOp->getRHS()) { 13131 RHS = RHS->IgnoreParenCasts(); 13132 llvm::APSInt Value; 13133 VarWillBeReased = 13134 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 13135 } 13136 } 13137 } 13138 }; 13139 13140 } // namespace 13141 13142 /// Check whether the given argument is a block which captures a 13143 /// variable. 13144 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 13145 assert(owner.Variable && owner.Loc.isValid()); 13146 13147 e = e->IgnoreParenCasts(); 13148 13149 // Look through [^{...} copy] and Block_copy(^{...}). 13150 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 13151 Selector Cmd = ME->getSelector(); 13152 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 13153 e = ME->getInstanceReceiver(); 13154 if (!e) 13155 return nullptr; 13156 e = e->IgnoreParenCasts(); 13157 } 13158 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 13159 if (CE->getNumArgs() == 1) { 13160 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 13161 if (Fn) { 13162 const IdentifierInfo *FnI = Fn->getIdentifier(); 13163 if (FnI && FnI->isStr("_Block_copy")) { 13164 e = CE->getArg(0)->IgnoreParenCasts(); 13165 } 13166 } 13167 } 13168 } 13169 13170 BlockExpr *block = dyn_cast<BlockExpr>(e); 13171 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 13172 return nullptr; 13173 13174 FindCaptureVisitor visitor(S.Context, owner.Variable); 13175 visitor.Visit(block->getBlockDecl()->getBody()); 13176 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 13177 } 13178 13179 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 13180 RetainCycleOwner &owner) { 13181 assert(capturer); 13182 assert(owner.Variable && owner.Loc.isValid()); 13183 13184 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 13185 << owner.Variable << capturer->getSourceRange(); 13186 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 13187 << owner.Indirect << owner.Range; 13188 } 13189 13190 /// Check for a keyword selector that starts with the word 'add' or 13191 /// 'set'. 13192 static bool isSetterLikeSelector(Selector sel) { 13193 if (sel.isUnarySelector()) return false; 13194 13195 StringRef str = sel.getNameForSlot(0); 13196 while (!str.empty() && str.front() == '_') str = str.substr(1); 13197 if (str.startswith("set")) 13198 str = str.substr(3); 13199 else if (str.startswith("add")) { 13200 // Specially whitelist 'addOperationWithBlock:'. 13201 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 13202 return false; 13203 str = str.substr(3); 13204 } 13205 else 13206 return false; 13207 13208 if (str.empty()) return true; 13209 return !isLowercase(str.front()); 13210 } 13211 13212 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 13213 ObjCMessageExpr *Message) { 13214 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 13215 Message->getReceiverInterface(), 13216 NSAPI::ClassId_NSMutableArray); 13217 if (!IsMutableArray) { 13218 return None; 13219 } 13220 13221 Selector Sel = Message->getSelector(); 13222 13223 Optional<NSAPI::NSArrayMethodKind> MKOpt = 13224 S.NSAPIObj->getNSArrayMethodKind(Sel); 13225 if (!MKOpt) { 13226 return None; 13227 } 13228 13229 NSAPI::NSArrayMethodKind MK = *MKOpt; 13230 13231 switch (MK) { 13232 case NSAPI::NSMutableArr_addObject: 13233 case NSAPI::NSMutableArr_insertObjectAtIndex: 13234 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 13235 return 0; 13236 case NSAPI::NSMutableArr_replaceObjectAtIndex: 13237 return 1; 13238 13239 default: 13240 return None; 13241 } 13242 13243 return None; 13244 } 13245 13246 static 13247 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 13248 ObjCMessageExpr *Message) { 13249 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 13250 Message->getReceiverInterface(), 13251 NSAPI::ClassId_NSMutableDictionary); 13252 if (!IsMutableDictionary) { 13253 return None; 13254 } 13255 13256 Selector Sel = Message->getSelector(); 13257 13258 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 13259 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 13260 if (!MKOpt) { 13261 return None; 13262 } 13263 13264 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 13265 13266 switch (MK) { 13267 case NSAPI::NSMutableDict_setObjectForKey: 13268 case NSAPI::NSMutableDict_setValueForKey: 13269 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 13270 return 0; 13271 13272 default: 13273 return None; 13274 } 13275 13276 return None; 13277 } 13278 13279 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 13280 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 13281 Message->getReceiverInterface(), 13282 NSAPI::ClassId_NSMutableSet); 13283 13284 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 13285 Message->getReceiverInterface(), 13286 NSAPI::ClassId_NSMutableOrderedSet); 13287 if (!IsMutableSet && !IsMutableOrderedSet) { 13288 return None; 13289 } 13290 13291 Selector Sel = Message->getSelector(); 13292 13293 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 13294 if (!MKOpt) { 13295 return None; 13296 } 13297 13298 NSAPI::NSSetMethodKind MK = *MKOpt; 13299 13300 switch (MK) { 13301 case NSAPI::NSMutableSet_addObject: 13302 case NSAPI::NSOrderedSet_setObjectAtIndex: 13303 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 13304 case NSAPI::NSOrderedSet_insertObjectAtIndex: 13305 return 0; 13306 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 13307 return 1; 13308 } 13309 13310 return None; 13311 } 13312 13313 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 13314 if (!Message->isInstanceMessage()) { 13315 return; 13316 } 13317 13318 Optional<int> ArgOpt; 13319 13320 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 13321 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 13322 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 13323 return; 13324 } 13325 13326 int ArgIndex = *ArgOpt; 13327 13328 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 13329 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 13330 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 13331 } 13332 13333 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 13334 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13335 if (ArgRE->isObjCSelfExpr()) { 13336 Diag(Message->getSourceRange().getBegin(), 13337 diag::warn_objc_circular_container) 13338 << ArgRE->getDecl() << StringRef("'super'"); 13339 } 13340 } 13341 } else { 13342 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 13343 13344 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 13345 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 13346 } 13347 13348 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 13349 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13350 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 13351 ValueDecl *Decl = ReceiverRE->getDecl(); 13352 Diag(Message->getSourceRange().getBegin(), 13353 diag::warn_objc_circular_container) 13354 << Decl << Decl; 13355 if (!ArgRE->isObjCSelfExpr()) { 13356 Diag(Decl->getLocation(), 13357 diag::note_objc_circular_container_declared_here) 13358 << Decl; 13359 } 13360 } 13361 } 13362 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 13363 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 13364 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 13365 ObjCIvarDecl *Decl = IvarRE->getDecl(); 13366 Diag(Message->getSourceRange().getBegin(), 13367 diag::warn_objc_circular_container) 13368 << Decl << Decl; 13369 Diag(Decl->getLocation(), 13370 diag::note_objc_circular_container_declared_here) 13371 << Decl; 13372 } 13373 } 13374 } 13375 } 13376 } 13377 13378 /// Check a message send to see if it's likely to cause a retain cycle. 13379 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 13380 // Only check instance methods whose selector looks like a setter. 13381 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 13382 return; 13383 13384 // Try to find a variable that the receiver is strongly owned by. 13385 RetainCycleOwner owner; 13386 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 13387 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 13388 return; 13389 } else { 13390 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 13391 owner.Variable = getCurMethodDecl()->getSelfDecl(); 13392 owner.Loc = msg->getSuperLoc(); 13393 owner.Range = msg->getSuperLoc(); 13394 } 13395 13396 // Check whether the receiver is captured by any of the arguments. 13397 const ObjCMethodDecl *MD = msg->getMethodDecl(); 13398 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 13399 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 13400 // noescape blocks should not be retained by the method. 13401 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 13402 continue; 13403 return diagnoseRetainCycle(*this, capturer, owner); 13404 } 13405 } 13406 } 13407 13408 /// Check a property assign to see if it's likely to cause a retain cycle. 13409 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 13410 RetainCycleOwner owner; 13411 if (!findRetainCycleOwner(*this, receiver, owner)) 13412 return; 13413 13414 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 13415 diagnoseRetainCycle(*this, capturer, owner); 13416 } 13417 13418 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 13419 RetainCycleOwner Owner; 13420 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 13421 return; 13422 13423 // Because we don't have an expression for the variable, we have to set the 13424 // location explicitly here. 13425 Owner.Loc = Var->getLocation(); 13426 Owner.Range = Var->getSourceRange(); 13427 13428 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 13429 diagnoseRetainCycle(*this, Capturer, Owner); 13430 } 13431 13432 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 13433 Expr *RHS, bool isProperty) { 13434 // Check if RHS is an Objective-C object literal, which also can get 13435 // immediately zapped in a weak reference. Note that we explicitly 13436 // allow ObjCStringLiterals, since those are designed to never really die. 13437 RHS = RHS->IgnoreParenImpCasts(); 13438 13439 // This enum needs to match with the 'select' in 13440 // warn_objc_arc_literal_assign (off-by-1). 13441 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 13442 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 13443 return false; 13444 13445 S.Diag(Loc, diag::warn_arc_literal_assign) 13446 << (unsigned) Kind 13447 << (isProperty ? 0 : 1) 13448 << RHS->getSourceRange(); 13449 13450 return true; 13451 } 13452 13453 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 13454 Qualifiers::ObjCLifetime LT, 13455 Expr *RHS, bool isProperty) { 13456 // Strip off any implicit cast added to get to the one ARC-specific. 13457 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13458 if (cast->getCastKind() == CK_ARCConsumeObject) { 13459 S.Diag(Loc, diag::warn_arc_retained_assign) 13460 << (LT == Qualifiers::OCL_ExplicitNone) 13461 << (isProperty ? 0 : 1) 13462 << RHS->getSourceRange(); 13463 return true; 13464 } 13465 RHS = cast->getSubExpr(); 13466 } 13467 13468 if (LT == Qualifiers::OCL_Weak && 13469 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 13470 return true; 13471 13472 return false; 13473 } 13474 13475 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 13476 QualType LHS, Expr *RHS) { 13477 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 13478 13479 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 13480 return false; 13481 13482 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 13483 return true; 13484 13485 return false; 13486 } 13487 13488 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 13489 Expr *LHS, Expr *RHS) { 13490 QualType LHSType; 13491 // PropertyRef on LHS type need be directly obtained from 13492 // its declaration as it has a PseudoType. 13493 ObjCPropertyRefExpr *PRE 13494 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 13495 if (PRE && !PRE->isImplicitProperty()) { 13496 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13497 if (PD) 13498 LHSType = PD->getType(); 13499 } 13500 13501 if (LHSType.isNull()) 13502 LHSType = LHS->getType(); 13503 13504 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 13505 13506 if (LT == Qualifiers::OCL_Weak) { 13507 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 13508 getCurFunction()->markSafeWeakUse(LHS); 13509 } 13510 13511 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 13512 return; 13513 13514 // FIXME. Check for other life times. 13515 if (LT != Qualifiers::OCL_None) 13516 return; 13517 13518 if (PRE) { 13519 if (PRE->isImplicitProperty()) 13520 return; 13521 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13522 if (!PD) 13523 return; 13524 13525 unsigned Attributes = PD->getPropertyAttributes(); 13526 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { 13527 // when 'assign' attribute was not explicitly specified 13528 // by user, ignore it and rely on property type itself 13529 // for lifetime info. 13530 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 13531 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && 13532 LHSType->isObjCRetainableType()) 13533 return; 13534 13535 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13536 if (cast->getCastKind() == CK_ARCConsumeObject) { 13537 Diag(Loc, diag::warn_arc_retained_property_assign) 13538 << RHS->getSourceRange(); 13539 return; 13540 } 13541 RHS = cast->getSubExpr(); 13542 } 13543 } 13544 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { 13545 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 13546 return; 13547 } 13548 } 13549 } 13550 13551 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 13552 13553 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 13554 SourceLocation StmtLoc, 13555 const NullStmt *Body) { 13556 // Do not warn if the body is a macro that expands to nothing, e.g: 13557 // 13558 // #define CALL(x) 13559 // if (condition) 13560 // CALL(0); 13561 if (Body->hasLeadingEmptyMacro()) 13562 return false; 13563 13564 // Get line numbers of statement and body. 13565 bool StmtLineInvalid; 13566 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 13567 &StmtLineInvalid); 13568 if (StmtLineInvalid) 13569 return false; 13570 13571 bool BodyLineInvalid; 13572 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 13573 &BodyLineInvalid); 13574 if (BodyLineInvalid) 13575 return false; 13576 13577 // Warn if null statement and body are on the same line. 13578 if (StmtLine != BodyLine) 13579 return false; 13580 13581 return true; 13582 } 13583 13584 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 13585 const Stmt *Body, 13586 unsigned DiagID) { 13587 // Since this is a syntactic check, don't emit diagnostic for template 13588 // instantiations, this just adds noise. 13589 if (CurrentInstantiationScope) 13590 return; 13591 13592 // The body should be a null statement. 13593 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13594 if (!NBody) 13595 return; 13596 13597 // Do the usual checks. 13598 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13599 return; 13600 13601 Diag(NBody->getSemiLoc(), DiagID); 13602 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13603 } 13604 13605 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 13606 const Stmt *PossibleBody) { 13607 assert(!CurrentInstantiationScope); // Ensured by caller 13608 13609 SourceLocation StmtLoc; 13610 const Stmt *Body; 13611 unsigned DiagID; 13612 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 13613 StmtLoc = FS->getRParenLoc(); 13614 Body = FS->getBody(); 13615 DiagID = diag::warn_empty_for_body; 13616 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 13617 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 13618 Body = WS->getBody(); 13619 DiagID = diag::warn_empty_while_body; 13620 } else 13621 return; // Neither `for' nor `while'. 13622 13623 // The body should be a null statement. 13624 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 13625 if (!NBody) 13626 return; 13627 13628 // Skip expensive checks if diagnostic is disabled. 13629 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 13630 return; 13631 13632 // Do the usual checks. 13633 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 13634 return; 13635 13636 // `for(...);' and `while(...);' are popular idioms, so in order to keep 13637 // noise level low, emit diagnostics only if for/while is followed by a 13638 // CompoundStmt, e.g.: 13639 // for (int i = 0; i < n; i++); 13640 // { 13641 // a(i); 13642 // } 13643 // or if for/while is followed by a statement with more indentation 13644 // than for/while itself: 13645 // for (int i = 0; i < n; i++); 13646 // a(i); 13647 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 13648 if (!ProbableTypo) { 13649 bool BodyColInvalid; 13650 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 13651 PossibleBody->getBeginLoc(), &BodyColInvalid); 13652 if (BodyColInvalid) 13653 return; 13654 13655 bool StmtColInvalid; 13656 unsigned StmtCol = 13657 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 13658 if (StmtColInvalid) 13659 return; 13660 13661 if (BodyCol > StmtCol) 13662 ProbableTypo = true; 13663 } 13664 13665 if (ProbableTypo) { 13666 Diag(NBody->getSemiLoc(), DiagID); 13667 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 13668 } 13669 } 13670 13671 //===--- CHECK: Warn on self move with std::move. -------------------------===// 13672 13673 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 13674 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 13675 SourceLocation OpLoc) { 13676 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 13677 return; 13678 13679 if (inTemplateInstantiation()) 13680 return; 13681 13682 // Strip parens and casts away. 13683 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 13684 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 13685 13686 // Check for a call expression 13687 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 13688 if (!CE || CE->getNumArgs() != 1) 13689 return; 13690 13691 // Check for a call to std::move 13692 if (!CE->isCallToStdMove()) 13693 return; 13694 13695 // Get argument from std::move 13696 RHSExpr = CE->getArg(0); 13697 13698 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 13699 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 13700 13701 // Two DeclRefExpr's, check that the decls are the same. 13702 if (LHSDeclRef && RHSDeclRef) { 13703 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13704 return; 13705 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13706 RHSDeclRef->getDecl()->getCanonicalDecl()) 13707 return; 13708 13709 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13710 << LHSExpr->getSourceRange() 13711 << RHSExpr->getSourceRange(); 13712 return; 13713 } 13714 13715 // Member variables require a different approach to check for self moves. 13716 // MemberExpr's are the same if every nested MemberExpr refers to the same 13717 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 13718 // the base Expr's are CXXThisExpr's. 13719 const Expr *LHSBase = LHSExpr; 13720 const Expr *RHSBase = RHSExpr; 13721 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 13722 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 13723 if (!LHSME || !RHSME) 13724 return; 13725 13726 while (LHSME && RHSME) { 13727 if (LHSME->getMemberDecl()->getCanonicalDecl() != 13728 RHSME->getMemberDecl()->getCanonicalDecl()) 13729 return; 13730 13731 LHSBase = LHSME->getBase(); 13732 RHSBase = RHSME->getBase(); 13733 LHSME = dyn_cast<MemberExpr>(LHSBase); 13734 RHSME = dyn_cast<MemberExpr>(RHSBase); 13735 } 13736 13737 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 13738 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 13739 if (LHSDeclRef && RHSDeclRef) { 13740 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 13741 return; 13742 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 13743 RHSDeclRef->getDecl()->getCanonicalDecl()) 13744 return; 13745 13746 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13747 << LHSExpr->getSourceRange() 13748 << RHSExpr->getSourceRange(); 13749 return; 13750 } 13751 13752 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 13753 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 13754 << LHSExpr->getSourceRange() 13755 << RHSExpr->getSourceRange(); 13756 } 13757 13758 //===--- Layout compatibility ----------------------------------------------// 13759 13760 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 13761 13762 /// Check if two enumeration types are layout-compatible. 13763 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 13764 // C++11 [dcl.enum] p8: 13765 // Two enumeration types are layout-compatible if they have the same 13766 // underlying type. 13767 return ED1->isComplete() && ED2->isComplete() && 13768 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 13769 } 13770 13771 /// Check if two fields are layout-compatible. 13772 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 13773 FieldDecl *Field2) { 13774 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 13775 return false; 13776 13777 if (Field1->isBitField() != Field2->isBitField()) 13778 return false; 13779 13780 if (Field1->isBitField()) { 13781 // Make sure that the bit-fields are the same length. 13782 unsigned Bits1 = Field1->getBitWidthValue(C); 13783 unsigned Bits2 = Field2->getBitWidthValue(C); 13784 13785 if (Bits1 != Bits2) 13786 return false; 13787 } 13788 13789 return true; 13790 } 13791 13792 /// Check if two standard-layout structs are layout-compatible. 13793 /// (C++11 [class.mem] p17) 13794 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 13795 RecordDecl *RD2) { 13796 // If both records are C++ classes, check that base classes match. 13797 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 13798 // If one of records is a CXXRecordDecl we are in C++ mode, 13799 // thus the other one is a CXXRecordDecl, too. 13800 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 13801 // Check number of base classes. 13802 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 13803 return false; 13804 13805 // Check the base classes. 13806 for (CXXRecordDecl::base_class_const_iterator 13807 Base1 = D1CXX->bases_begin(), 13808 BaseEnd1 = D1CXX->bases_end(), 13809 Base2 = D2CXX->bases_begin(); 13810 Base1 != BaseEnd1; 13811 ++Base1, ++Base2) { 13812 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 13813 return false; 13814 } 13815 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 13816 // If only RD2 is a C++ class, it should have zero base classes. 13817 if (D2CXX->getNumBases() > 0) 13818 return false; 13819 } 13820 13821 // Check the fields. 13822 RecordDecl::field_iterator Field2 = RD2->field_begin(), 13823 Field2End = RD2->field_end(), 13824 Field1 = RD1->field_begin(), 13825 Field1End = RD1->field_end(); 13826 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 13827 if (!isLayoutCompatible(C, *Field1, *Field2)) 13828 return false; 13829 } 13830 if (Field1 != Field1End || Field2 != Field2End) 13831 return false; 13832 13833 return true; 13834 } 13835 13836 /// Check if two standard-layout unions are layout-compatible. 13837 /// (C++11 [class.mem] p18) 13838 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 13839 RecordDecl *RD2) { 13840 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 13841 for (auto *Field2 : RD2->fields()) 13842 UnmatchedFields.insert(Field2); 13843 13844 for (auto *Field1 : RD1->fields()) { 13845 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 13846 I = UnmatchedFields.begin(), 13847 E = UnmatchedFields.end(); 13848 13849 for ( ; I != E; ++I) { 13850 if (isLayoutCompatible(C, Field1, *I)) { 13851 bool Result = UnmatchedFields.erase(*I); 13852 (void) Result; 13853 assert(Result); 13854 break; 13855 } 13856 } 13857 if (I == E) 13858 return false; 13859 } 13860 13861 return UnmatchedFields.empty(); 13862 } 13863 13864 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 13865 RecordDecl *RD2) { 13866 if (RD1->isUnion() != RD2->isUnion()) 13867 return false; 13868 13869 if (RD1->isUnion()) 13870 return isLayoutCompatibleUnion(C, RD1, RD2); 13871 else 13872 return isLayoutCompatibleStruct(C, RD1, RD2); 13873 } 13874 13875 /// Check if two types are layout-compatible in C++11 sense. 13876 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 13877 if (T1.isNull() || T2.isNull()) 13878 return false; 13879 13880 // C++11 [basic.types] p11: 13881 // If two types T1 and T2 are the same type, then T1 and T2 are 13882 // layout-compatible types. 13883 if (C.hasSameType(T1, T2)) 13884 return true; 13885 13886 T1 = T1.getCanonicalType().getUnqualifiedType(); 13887 T2 = T2.getCanonicalType().getUnqualifiedType(); 13888 13889 const Type::TypeClass TC1 = T1->getTypeClass(); 13890 const Type::TypeClass TC2 = T2->getTypeClass(); 13891 13892 if (TC1 != TC2) 13893 return false; 13894 13895 if (TC1 == Type::Enum) { 13896 return isLayoutCompatible(C, 13897 cast<EnumType>(T1)->getDecl(), 13898 cast<EnumType>(T2)->getDecl()); 13899 } else if (TC1 == Type::Record) { 13900 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 13901 return false; 13902 13903 return isLayoutCompatible(C, 13904 cast<RecordType>(T1)->getDecl(), 13905 cast<RecordType>(T2)->getDecl()); 13906 } 13907 13908 return false; 13909 } 13910 13911 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 13912 13913 /// Given a type tag expression find the type tag itself. 13914 /// 13915 /// \param TypeExpr Type tag expression, as it appears in user's code. 13916 /// 13917 /// \param VD Declaration of an identifier that appears in a type tag. 13918 /// 13919 /// \param MagicValue Type tag magic value. 13920 /// 13921 /// \param isConstantEvaluated wether the evalaution should be performed in 13922 13923 /// constant context. 13924 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 13925 const ValueDecl **VD, uint64_t *MagicValue, 13926 bool isConstantEvaluated) { 13927 while(true) { 13928 if (!TypeExpr) 13929 return false; 13930 13931 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 13932 13933 switch (TypeExpr->getStmtClass()) { 13934 case Stmt::UnaryOperatorClass: { 13935 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 13936 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 13937 TypeExpr = UO->getSubExpr(); 13938 continue; 13939 } 13940 return false; 13941 } 13942 13943 case Stmt::DeclRefExprClass: { 13944 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 13945 *VD = DRE->getDecl(); 13946 return true; 13947 } 13948 13949 case Stmt::IntegerLiteralClass: { 13950 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 13951 llvm::APInt MagicValueAPInt = IL->getValue(); 13952 if (MagicValueAPInt.getActiveBits() <= 64) { 13953 *MagicValue = MagicValueAPInt.getZExtValue(); 13954 return true; 13955 } else 13956 return false; 13957 } 13958 13959 case Stmt::BinaryConditionalOperatorClass: 13960 case Stmt::ConditionalOperatorClass: { 13961 const AbstractConditionalOperator *ACO = 13962 cast<AbstractConditionalOperator>(TypeExpr); 13963 bool Result; 13964 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 13965 isConstantEvaluated)) { 13966 if (Result) 13967 TypeExpr = ACO->getTrueExpr(); 13968 else 13969 TypeExpr = ACO->getFalseExpr(); 13970 continue; 13971 } 13972 return false; 13973 } 13974 13975 case Stmt::BinaryOperatorClass: { 13976 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 13977 if (BO->getOpcode() == BO_Comma) { 13978 TypeExpr = BO->getRHS(); 13979 continue; 13980 } 13981 return false; 13982 } 13983 13984 default: 13985 return false; 13986 } 13987 } 13988 } 13989 13990 /// Retrieve the C type corresponding to type tag TypeExpr. 13991 /// 13992 /// \param TypeExpr Expression that specifies a type tag. 13993 /// 13994 /// \param MagicValues Registered magic values. 13995 /// 13996 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 13997 /// kind. 13998 /// 13999 /// \param TypeInfo Information about the corresponding C type. 14000 /// 14001 /// \param isConstantEvaluated wether the evalaution should be performed in 14002 /// constant context. 14003 /// 14004 /// \returns true if the corresponding C type was found. 14005 static bool GetMatchingCType( 14006 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 14007 const ASTContext &Ctx, 14008 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 14009 *MagicValues, 14010 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 14011 bool isConstantEvaluated) { 14012 FoundWrongKind = false; 14013 14014 // Variable declaration that has type_tag_for_datatype attribute. 14015 const ValueDecl *VD = nullptr; 14016 14017 uint64_t MagicValue; 14018 14019 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 14020 return false; 14021 14022 if (VD) { 14023 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 14024 if (I->getArgumentKind() != ArgumentKind) { 14025 FoundWrongKind = true; 14026 return false; 14027 } 14028 TypeInfo.Type = I->getMatchingCType(); 14029 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 14030 TypeInfo.MustBeNull = I->getMustBeNull(); 14031 return true; 14032 } 14033 return false; 14034 } 14035 14036 if (!MagicValues) 14037 return false; 14038 14039 llvm::DenseMap<Sema::TypeTagMagicValue, 14040 Sema::TypeTagData>::const_iterator I = 14041 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 14042 if (I == MagicValues->end()) 14043 return false; 14044 14045 TypeInfo = I->second; 14046 return true; 14047 } 14048 14049 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 14050 uint64_t MagicValue, QualType Type, 14051 bool LayoutCompatible, 14052 bool MustBeNull) { 14053 if (!TypeTagForDatatypeMagicValues) 14054 TypeTagForDatatypeMagicValues.reset( 14055 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 14056 14057 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 14058 (*TypeTagForDatatypeMagicValues)[Magic] = 14059 TypeTagData(Type, LayoutCompatible, MustBeNull); 14060 } 14061 14062 static bool IsSameCharType(QualType T1, QualType T2) { 14063 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 14064 if (!BT1) 14065 return false; 14066 14067 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 14068 if (!BT2) 14069 return false; 14070 14071 BuiltinType::Kind T1Kind = BT1->getKind(); 14072 BuiltinType::Kind T2Kind = BT2->getKind(); 14073 14074 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 14075 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 14076 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 14077 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 14078 } 14079 14080 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 14081 const ArrayRef<const Expr *> ExprArgs, 14082 SourceLocation CallSiteLoc) { 14083 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 14084 bool IsPointerAttr = Attr->getIsPointer(); 14085 14086 // Retrieve the argument representing the 'type_tag'. 14087 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 14088 if (TypeTagIdxAST >= ExprArgs.size()) { 14089 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14090 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 14091 return; 14092 } 14093 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 14094 bool FoundWrongKind; 14095 TypeTagData TypeInfo; 14096 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 14097 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 14098 TypeInfo, isConstantEvaluated())) { 14099 if (FoundWrongKind) 14100 Diag(TypeTagExpr->getExprLoc(), 14101 diag::warn_type_tag_for_datatype_wrong_kind) 14102 << TypeTagExpr->getSourceRange(); 14103 return; 14104 } 14105 14106 // Retrieve the argument representing the 'arg_idx'. 14107 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 14108 if (ArgumentIdxAST >= ExprArgs.size()) { 14109 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14110 << 1 << Attr->getArgumentIdx().getSourceIndex(); 14111 return; 14112 } 14113 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 14114 if (IsPointerAttr) { 14115 // Skip implicit cast of pointer to `void *' (as a function argument). 14116 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 14117 if (ICE->getType()->isVoidPointerType() && 14118 ICE->getCastKind() == CK_BitCast) 14119 ArgumentExpr = ICE->getSubExpr(); 14120 } 14121 QualType ArgumentType = ArgumentExpr->getType(); 14122 14123 // Passing a `void*' pointer shouldn't trigger a warning. 14124 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 14125 return; 14126 14127 if (TypeInfo.MustBeNull) { 14128 // Type tag with matching void type requires a null pointer. 14129 if (!ArgumentExpr->isNullPointerConstant(Context, 14130 Expr::NPC_ValueDependentIsNotNull)) { 14131 Diag(ArgumentExpr->getExprLoc(), 14132 diag::warn_type_safety_null_pointer_required) 14133 << ArgumentKind->getName() 14134 << ArgumentExpr->getSourceRange() 14135 << TypeTagExpr->getSourceRange(); 14136 } 14137 return; 14138 } 14139 14140 QualType RequiredType = TypeInfo.Type; 14141 if (IsPointerAttr) 14142 RequiredType = Context.getPointerType(RequiredType); 14143 14144 bool mismatch = false; 14145 if (!TypeInfo.LayoutCompatible) { 14146 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 14147 14148 // C++11 [basic.fundamental] p1: 14149 // Plain char, signed char, and unsigned char are three distinct types. 14150 // 14151 // But we treat plain `char' as equivalent to `signed char' or `unsigned 14152 // char' depending on the current char signedness mode. 14153 if (mismatch) 14154 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 14155 RequiredType->getPointeeType())) || 14156 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 14157 mismatch = false; 14158 } else 14159 if (IsPointerAttr) 14160 mismatch = !isLayoutCompatible(Context, 14161 ArgumentType->getPointeeType(), 14162 RequiredType->getPointeeType()); 14163 else 14164 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 14165 14166 if (mismatch) 14167 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 14168 << ArgumentType << ArgumentKind 14169 << TypeInfo.LayoutCompatible << RequiredType 14170 << ArgumentExpr->getSourceRange() 14171 << TypeTagExpr->getSourceRange(); 14172 } 14173 14174 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 14175 CharUnits Alignment) { 14176 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 14177 } 14178 14179 void Sema::DiagnoseMisalignedMembers() { 14180 for (MisalignedMember &m : MisalignedMembers) { 14181 const NamedDecl *ND = m.RD; 14182 if (ND->getName().empty()) { 14183 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 14184 ND = TD; 14185 } 14186 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 14187 << m.MD << ND << m.E->getSourceRange(); 14188 } 14189 MisalignedMembers.clear(); 14190 } 14191 14192 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 14193 E = E->IgnoreParens(); 14194 if (!T->isPointerType() && !T->isIntegerType()) 14195 return; 14196 if (isa<UnaryOperator>(E) && 14197 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 14198 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 14199 if (isa<MemberExpr>(Op)) { 14200 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 14201 if (MA != MisalignedMembers.end() && 14202 (T->isIntegerType() || 14203 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 14204 Context.getTypeAlignInChars( 14205 T->getPointeeType()) <= MA->Alignment)))) 14206 MisalignedMembers.erase(MA); 14207 } 14208 } 14209 } 14210 14211 void Sema::RefersToMemberWithReducedAlignment( 14212 Expr *E, 14213 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 14214 Action) { 14215 const auto *ME = dyn_cast<MemberExpr>(E); 14216 if (!ME) 14217 return; 14218 14219 // No need to check expressions with an __unaligned-qualified type. 14220 if (E->getType().getQualifiers().hasUnaligned()) 14221 return; 14222 14223 // For a chain of MemberExpr like "a.b.c.d" this list 14224 // will keep FieldDecl's like [d, c, b]. 14225 SmallVector<FieldDecl *, 4> ReverseMemberChain; 14226 const MemberExpr *TopME = nullptr; 14227 bool AnyIsPacked = false; 14228 do { 14229 QualType BaseType = ME->getBase()->getType(); 14230 if (ME->isArrow()) 14231 BaseType = BaseType->getPointeeType(); 14232 RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl(); 14233 if (RD->isInvalidDecl()) 14234 return; 14235 14236 ValueDecl *MD = ME->getMemberDecl(); 14237 auto *FD = dyn_cast<FieldDecl>(MD); 14238 // We do not care about non-data members. 14239 if (!FD || FD->isInvalidDecl()) 14240 return; 14241 14242 AnyIsPacked = 14243 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 14244 ReverseMemberChain.push_back(FD); 14245 14246 TopME = ME; 14247 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 14248 } while (ME); 14249 assert(TopME && "We did not compute a topmost MemberExpr!"); 14250 14251 // Not the scope of this diagnostic. 14252 if (!AnyIsPacked) 14253 return; 14254 14255 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 14256 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 14257 // TODO: The innermost base of the member expression may be too complicated. 14258 // For now, just disregard these cases. This is left for future 14259 // improvement. 14260 if (!DRE && !isa<CXXThisExpr>(TopBase)) 14261 return; 14262 14263 // Alignment expected by the whole expression. 14264 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 14265 14266 // No need to do anything else with this case. 14267 if (ExpectedAlignment.isOne()) 14268 return; 14269 14270 // Synthesize offset of the whole access. 14271 CharUnits Offset; 14272 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 14273 I++) { 14274 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 14275 } 14276 14277 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 14278 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 14279 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 14280 14281 // The base expression of the innermost MemberExpr may give 14282 // stronger guarantees than the class containing the member. 14283 if (DRE && !TopME->isArrow()) { 14284 const ValueDecl *VD = DRE->getDecl(); 14285 if (!VD->getType()->isReferenceType()) 14286 CompleteObjectAlignment = 14287 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 14288 } 14289 14290 // Check if the synthesized offset fulfills the alignment. 14291 if (Offset % ExpectedAlignment != 0 || 14292 // It may fulfill the offset it but the effective alignment may still be 14293 // lower than the expected expression alignment. 14294 CompleteObjectAlignment < ExpectedAlignment) { 14295 // If this happens, we want to determine a sensible culprit of this. 14296 // Intuitively, watching the chain of member expressions from right to 14297 // left, we start with the required alignment (as required by the field 14298 // type) but some packed attribute in that chain has reduced the alignment. 14299 // It may happen that another packed structure increases it again. But if 14300 // we are here such increase has not been enough. So pointing the first 14301 // FieldDecl that either is packed or else its RecordDecl is, 14302 // seems reasonable. 14303 FieldDecl *FD = nullptr; 14304 CharUnits Alignment; 14305 for (FieldDecl *FDI : ReverseMemberChain) { 14306 if (FDI->hasAttr<PackedAttr>() || 14307 FDI->getParent()->hasAttr<PackedAttr>()) { 14308 FD = FDI; 14309 Alignment = std::min( 14310 Context.getTypeAlignInChars(FD->getType()), 14311 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 14312 break; 14313 } 14314 } 14315 assert(FD && "We did not find a packed FieldDecl!"); 14316 Action(E, FD->getParent(), FD, Alignment); 14317 } 14318 } 14319 14320 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 14321 using namespace std::placeholders; 14322 14323 RefersToMemberWithReducedAlignment( 14324 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 14325 _2, _3, _4)); 14326 } 14327