1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <numeric> 47 #include <type_traits> 48 #include <utility> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "memory-builtins" 53 54 enum AllocType : uint8_t { 55 OpNewLike = 1<<0, // allocates; never returns null 56 MallocLike = 1<<1, // allocates; may return null 57 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 58 CallocLike = 1<<3, // allocates + bzero 59 ReallocLike = 1<<4, // reallocates 60 StrDupLike = 1<<5, 61 MallocOrOpNewLike = MallocLike | OpNewLike, 62 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 63 AllocLike = MallocOrCallocLike | StrDupLike, 64 AnyAlloc = AllocLike | ReallocLike 65 }; 66 67 enum class MallocFamily { 68 Malloc, 69 CPPNew, // new(unsigned int) 70 CPPNewAligned, // new(unsigned int, align_val_t) 71 CPPNewArray, // new[](unsigned int) 72 CPPNewArrayAligned, // new[](unsigned long, align_val_t) 73 MSVCNew, // new(unsigned int) 74 MSVCArrayNew, // new[](unsigned int) 75 VecMalloc, 76 KmpcAllocShared, 77 }; 78 79 StringRef mangledNameForMallocFamily(const MallocFamily &Family) { 80 switch (Family) { 81 case MallocFamily::Malloc: 82 return "malloc"; 83 case MallocFamily::CPPNew: 84 return "_Znwm"; 85 case MallocFamily::CPPNewAligned: 86 return "_ZnwmSt11align_val_t"; 87 case MallocFamily::CPPNewArray: 88 return "_Znam"; 89 case MallocFamily::CPPNewArrayAligned: 90 return "_ZnamSt11align_val_t"; 91 case MallocFamily::MSVCNew: 92 return "??2@YAPAXI@Z"; 93 case MallocFamily::MSVCArrayNew: 94 return "??_U@YAPAXI@Z"; 95 case MallocFamily::VecMalloc: 96 return "vec_malloc"; 97 case MallocFamily::KmpcAllocShared: 98 return "__kmpc_alloc_shared"; 99 } 100 llvm_unreachable("missing an alloc family"); 101 } 102 103 struct AllocFnsTy { 104 AllocType AllocTy; 105 unsigned NumParams; 106 // First and Second size parameters (or -1 if unused) 107 int FstParam, SndParam; 108 // Alignment parameter for aligned_alloc and aligned new 109 int AlignParam; 110 // Name of default allocator function to group malloc/free calls by family 111 MallocFamily Family; 112 }; 113 114 // clang-format off 115 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 116 // know which functions are nounwind, noalias, nocapture parameters, etc. 117 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 118 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::VecMalloc}}, 119 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int) 120 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow) 121 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t) 122 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t, nothrow) 123 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long) 124 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow) 125 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t) 126 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow) 127 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int) 128 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int, nothrow) 129 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t) 130 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow) 131 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long) 132 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long, nothrow) 133 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t) 134 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow) 135 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int) 136 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int, nothrow) 137 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long) 138 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long, nothrow) 139 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int) 140 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow) 141 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long) 142 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow) 143 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 144 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::VecMalloc}}, 145 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::VecMalloc}}, 146 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 147 {LibFunc_dunder_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 148 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 149 {LibFunc_dunder_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 150 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, 151 }; 152 // clang-format on 153 154 static const Function *getCalledFunction(const Value *V, 155 bool &IsNoBuiltin) { 156 // Don't care about intrinsics in this case. 157 if (isa<IntrinsicInst>(V)) 158 return nullptr; 159 160 const auto *CB = dyn_cast<CallBase>(V); 161 if (!CB) 162 return nullptr; 163 164 IsNoBuiltin = CB->isNoBuiltin(); 165 166 if (const Function *Callee = CB->getCalledFunction()) 167 return Callee; 168 return nullptr; 169 } 170 171 /// Returns the allocation data for the given value if it's a call to a known 172 /// allocation function. 173 static Optional<AllocFnsTy> 174 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 175 const TargetLibraryInfo *TLI) { 176 // Don't perform a slow TLI lookup, if this function doesn't return a pointer 177 // and thus can't be an allocation function. 178 if (!Callee->getReturnType()->isPointerTy()) 179 return None; 180 181 // Make sure that the function is available. 182 LibFunc TLIFn; 183 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 184 return None; 185 186 const auto *Iter = find_if( 187 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 188 return P.first == TLIFn; 189 }); 190 191 if (Iter == std::end(AllocationFnData)) 192 return None; 193 194 const AllocFnsTy *FnData = &Iter->second; 195 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 196 return None; 197 198 // Check function prototype. 199 int FstParam = FnData->FstParam; 200 int SndParam = FnData->SndParam; 201 FunctionType *FTy = Callee->getFunctionType(); 202 203 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 204 FTy->getNumParams() == FnData->NumParams && 205 (FstParam < 0 || 206 (FTy->getParamType(FstParam)->isIntegerTy(32) || 207 FTy->getParamType(FstParam)->isIntegerTy(64))) && 208 (SndParam < 0 || 209 FTy->getParamType(SndParam)->isIntegerTy(32) || 210 FTy->getParamType(SndParam)->isIntegerTy(64))) 211 return *FnData; 212 return None; 213 } 214 215 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 216 const TargetLibraryInfo *TLI) { 217 bool IsNoBuiltinCall; 218 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 219 if (!IsNoBuiltinCall) 220 return getAllocationDataForFunction(Callee, AllocTy, TLI); 221 return None; 222 } 223 224 static Optional<AllocFnsTy> 225 getAllocationData(const Value *V, AllocType AllocTy, 226 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 227 bool IsNoBuiltinCall; 228 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 229 if (!IsNoBuiltinCall) 230 return getAllocationDataForFunction( 231 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 232 return None; 233 } 234 235 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 236 const TargetLibraryInfo *TLI) { 237 bool IsNoBuiltinCall; 238 const Function *Callee = 239 getCalledFunction(V, IsNoBuiltinCall); 240 if (!Callee) 241 return None; 242 243 // Prefer to use existing information over allocsize. This will give us an 244 // accurate AllocTy. 245 if (!IsNoBuiltinCall) 246 if (Optional<AllocFnsTy> Data = 247 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 248 return Data; 249 250 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 251 if (Attr == Attribute()) 252 return None; 253 254 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 255 256 AllocFnsTy Result; 257 // Because allocsize only tells us how many bytes are allocated, we're not 258 // really allowed to assume anything, so we use MallocLike. 259 Result.AllocTy = MallocLike; 260 Result.NumParams = Callee->getNumOperands(); 261 Result.FstParam = Args.first; 262 Result.SndParam = Args.second.value_or(-1); 263 // Allocsize has no way to specify an alignment argument 264 Result.AlignParam = -1; 265 return Result; 266 } 267 268 static AllocFnKind getAllocFnKind(const Value *V) { 269 if (const auto *CB = dyn_cast<CallBase>(V)) { 270 Attribute Attr = CB->getFnAttr(Attribute::AllocKind); 271 if (Attr.isValid()) 272 return AllocFnKind(Attr.getValueAsInt()); 273 } 274 return AllocFnKind::Unknown; 275 } 276 277 static AllocFnKind getAllocFnKind(const Function *F) { 278 Attribute Attr = F->getFnAttribute(Attribute::AllocKind); 279 if (Attr.isValid()) 280 return AllocFnKind(Attr.getValueAsInt()); 281 return AllocFnKind::Unknown; 282 } 283 284 static bool checkFnAllocKind(const Value *V, AllocFnKind Wanted) { 285 return (getAllocFnKind(V) & Wanted) != AllocFnKind::Unknown; 286 } 287 288 static bool checkFnAllocKind(const Function *F, AllocFnKind Wanted) { 289 return (getAllocFnKind(F) & Wanted) != AllocFnKind::Unknown; 290 } 291 292 /// Tests if a value is a call or invoke to a library function that 293 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 294 /// like). 295 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 296 return getAllocationData(V, AnyAlloc, TLI).has_value() || 297 checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc); 298 } 299 bool llvm::isAllocationFn( 300 const Value *V, 301 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 302 return getAllocationData(V, AnyAlloc, GetTLI).has_value() || 303 checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc); 304 } 305 306 /// Tests if a value is a call or invoke to a library function that 307 /// allocates uninitialized memory (such as malloc). 308 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 309 return getAllocationData(V, MallocOrOpNewLike, TLI).has_value(); 310 } 311 312 /// Tests if a value is a call or invoke to a library function that 313 /// allocates uninitialized memory with alignment (such as aligned_alloc). 314 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 315 return getAllocationData(V, AlignedAllocLike, TLI).has_value(); 316 } 317 318 /// Tests if a value is a call or invoke to a library function that 319 /// allocates zero-filled memory (such as calloc). 320 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 321 return getAllocationData(V, CallocLike, TLI).has_value(); 322 } 323 324 /// Tests if a value is a call or invoke to a library function that 325 /// allocates memory similar to malloc or calloc. 326 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 327 return getAllocationData(V, MallocOrCallocLike, TLI).has_value(); 328 } 329 330 /// Tests if a value is a call or invoke to a library function that 331 /// allocates memory (either malloc, calloc, or strdup like). 332 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 333 return getAllocationData(V, AllocLike, TLI).has_value() || 334 checkFnAllocKind(V, AllocFnKind::Alloc); 335 } 336 337 /// Tests if a functions is a call or invoke to a library function that 338 /// reallocates memory (e.g., realloc). 339 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 340 return getAllocationDataForFunction(F, ReallocLike, TLI).has_value() || 341 checkFnAllocKind(F, AllocFnKind::Realloc); 342 } 343 344 Value *llvm::getReallocatedOperand(const CallBase *CB, 345 const TargetLibraryInfo *TLI) { 346 if (getAllocationData(CB, ReallocLike, TLI).has_value()) { 347 // All currently supported realloc functions reallocate the first argument. 348 return CB->getArgOperand(0); 349 } 350 if (checkFnAllocKind(CB, AllocFnKind::Realloc)) 351 return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer); 352 return nullptr; 353 } 354 355 bool llvm::isRemovableAlloc(const CallBase *CB, const TargetLibraryInfo *TLI) { 356 // Note: Removability is highly dependent on the source language. For 357 // example, recent C++ requires direct calls to the global allocation 358 // [basic.stc.dynamic.allocation] to be observable unless part of a new 359 // expression [expr.new paragraph 13]. 360 361 // Historically we've treated the C family allocation routines and operator 362 // new as removable 363 return isAllocLikeFn(CB, TLI); 364 } 365 366 Value *llvm::getAllocAlignment(const CallBase *V, 367 const TargetLibraryInfo *TLI) { 368 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 369 if (FnData && FnData->AlignParam >= 0) { 370 return V->getOperand(FnData->AlignParam); 371 } 372 return V->getArgOperandWithAttribute(Attribute::AllocAlign); 373 } 374 375 /// When we're compiling N-bit code, and the user uses parameters that are 376 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 377 /// trouble with APInt size issues. This function handles resizing + overflow 378 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 379 /// I's value. 380 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 381 // More bits than we can handle. Checking the bit width isn't necessary, but 382 // it's faster than checking active bits, and should give `false` in the 383 // vast majority of cases. 384 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 385 return false; 386 if (I.getBitWidth() != IntTyBits) 387 I = I.zextOrTrunc(IntTyBits); 388 return true; 389 } 390 391 Optional<APInt> 392 llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, 393 function_ref<const Value *(const Value *)> Mapper) { 394 // Note: This handles both explicitly listed allocation functions and 395 // allocsize. The code structure could stand to be cleaned up a bit. 396 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 397 if (!FnData) 398 return None; 399 400 // Get the index type for this address space, results and intermediate 401 // computations are performed at that width. 402 auto &DL = CB->getModule()->getDataLayout(); 403 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 404 405 // Handle strdup-like functions separately. 406 if (FnData->AllocTy == StrDupLike) { 407 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 408 if (!Size) 409 return None; 410 411 // Strndup limits strlen. 412 if (FnData->FstParam > 0) { 413 const ConstantInt *Arg = 414 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 415 if (!Arg) 416 return None; 417 418 APInt MaxSize = Arg->getValue().zext(IntTyBits); 419 if (Size.ugt(MaxSize)) 420 Size = MaxSize + 1; 421 } 422 return Size; 423 } 424 425 const ConstantInt *Arg = 426 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 427 if (!Arg) 428 return None; 429 430 APInt Size = Arg->getValue(); 431 if (!CheckedZextOrTrunc(Size, IntTyBits)) 432 return None; 433 434 // Size is determined by just 1 parameter. 435 if (FnData->SndParam < 0) 436 return Size; 437 438 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 439 if (!Arg) 440 return None; 441 442 APInt NumElems = Arg->getValue(); 443 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 444 return None; 445 446 bool Overflow; 447 Size = Size.umul_ov(NumElems, Overflow); 448 if (Overflow) 449 return None; 450 return Size; 451 } 452 453 Constant *llvm::getInitialValueOfAllocation(const Value *V, 454 const TargetLibraryInfo *TLI, 455 Type *Ty) { 456 auto *Alloc = dyn_cast<CallBase>(V); 457 if (!Alloc) 458 return nullptr; 459 460 // malloc and aligned_alloc are uninitialized (undef) 461 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 462 return UndefValue::get(Ty); 463 464 // calloc zero initializes 465 if (isCallocLikeFn(Alloc, TLI)) 466 return Constant::getNullValue(Ty); 467 468 AllocFnKind AK = getAllocFnKind(Alloc); 469 if ((AK & AllocFnKind::Uninitialized) != AllocFnKind::Unknown) 470 return UndefValue::get(Ty); 471 if ((AK & AllocFnKind::Zeroed) != AllocFnKind::Unknown) 472 return Constant::getNullValue(Ty); 473 474 return nullptr; 475 } 476 477 struct FreeFnsTy { 478 unsigned NumParams; 479 // Name of default allocator function to group malloc/free calls by family 480 MallocFamily Family; 481 }; 482 483 // clang-format off 484 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 485 {LibFunc_vec_free, {1, MallocFamily::VecMalloc}}, 486 {LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*) 487 {LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*) 488 {LibFunc_msvc_delete_ptr32, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 489 {LibFunc_msvc_delete_ptr64, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 490 {LibFunc_msvc_delete_array_ptr32, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 491 {LibFunc_msvc_delete_array_ptr64, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 492 {LibFunc_ZdlPvj, {2, MallocFamily::CPPNew}}, // delete(void*, uint) 493 {LibFunc_ZdlPvm, {2, MallocFamily::CPPNew}}, // delete(void*, ulong) 494 {LibFunc_ZdlPvRKSt9nothrow_t, {2, MallocFamily::CPPNew}}, // delete(void*, nothrow) 495 {LibFunc_ZdlPvSt11align_val_t, {2, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t) 496 {LibFunc_ZdaPvj, {2, MallocFamily::CPPNewArray}}, // delete[](void*, uint) 497 {LibFunc_ZdaPvm, {2, MallocFamily::CPPNewArray}}, // delete[](void*, ulong) 498 {LibFunc_ZdaPvRKSt9nothrow_t, {2, MallocFamily::CPPNewArray}}, // delete[](void*, nothrow) 499 {LibFunc_ZdaPvSt11align_val_t, {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t) 500 {LibFunc_msvc_delete_ptr32_int, {2, MallocFamily::MSVCNew}}, // delete(void*, uint) 501 {LibFunc_msvc_delete_ptr64_longlong, {2, MallocFamily::MSVCNew}}, // delete(void*, ulonglong) 502 {LibFunc_msvc_delete_ptr32_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 503 {LibFunc_msvc_delete_ptr64_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 504 {LibFunc_msvc_delete_array_ptr32_int, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, uint) 505 {LibFunc_msvc_delete_array_ptr64_longlong, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, ulonglong) 506 {LibFunc_msvc_delete_array_ptr32_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 507 {LibFunc_msvc_delete_array_ptr64_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 508 {LibFunc___kmpc_free_shared, {2, MallocFamily::KmpcAllocShared}}, // OpenMP Offloading RTL free 509 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t, nothrow) 510 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow) 511 {LibFunc_ZdlPvjSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned int, align_val_t) 512 {LibFunc_ZdlPvmSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned long, align_val_t) 513 {LibFunc_ZdaPvjSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t) 514 {LibFunc_ZdaPvmSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t) 515 }; 516 // clang-format on 517 518 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 519 const LibFunc TLIFn) { 520 const auto *Iter = 521 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 522 return P.first == TLIFn; 523 }); 524 if (Iter == std::end(FreeFnData)) 525 return None; 526 return Iter->second; 527 } 528 529 Optional<StringRef> llvm::getAllocationFamily(const Value *I, 530 const TargetLibraryInfo *TLI) { 531 bool IsNoBuiltin; 532 const Function *Callee = getCalledFunction(I, IsNoBuiltin); 533 if (Callee == nullptr || IsNoBuiltin) 534 return None; 535 LibFunc TLIFn; 536 537 if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn)) { 538 // Callee is some known library function. 539 const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); 540 if (AllocData) 541 return mangledNameForMallocFamily(AllocData.value().Family); 542 const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); 543 if (FreeData) 544 return mangledNameForMallocFamily(FreeData.value().Family); 545 } 546 // Callee isn't a known library function, still check attributes. 547 if (checkFnAllocKind(I, AllocFnKind::Free | AllocFnKind::Alloc | 548 AllocFnKind::Realloc)) { 549 Attribute Attr = cast<CallBase>(I)->getFnAttr("alloc-family"); 550 if (Attr.isValid()) 551 return Attr.getValueAsString(); 552 } 553 return None; 554 } 555 556 /// isLibFreeFunction - Returns true if the function is a builtin free() 557 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 558 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 559 if (!FnData) 560 return checkFnAllocKind(F, AllocFnKind::Free); 561 562 // Check free prototype. 563 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 564 // attribute will exist. 565 FunctionType *FTy = F->getFunctionType(); 566 if (!FTy->getReturnType()->isVoidTy()) 567 return false; 568 if (FTy->getNumParams() != FnData->NumParams) 569 return false; 570 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 571 return false; 572 573 return true; 574 } 575 576 Value *llvm::getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI) { 577 bool IsNoBuiltinCall; 578 const Function *Callee = getCalledFunction(CB, IsNoBuiltinCall); 579 if (Callee == nullptr || IsNoBuiltinCall) 580 return nullptr; 581 582 LibFunc TLIFn; 583 if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn) && 584 isLibFreeFunction(Callee, TLIFn)) { 585 // All currently supported free functions free the first argument. 586 return CB->getArgOperand(0); 587 } 588 589 if (checkFnAllocKind(CB, AllocFnKind::Free)) 590 return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer); 591 592 return nullptr; 593 } 594 595 //===----------------------------------------------------------------------===// 596 // Utility functions to compute size of objects. 597 // 598 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 599 if (Data.second.isNegative() || Data.first.ult(Data.second)) 600 return APInt(Data.first.getBitWidth(), 0); 601 return Data.first - Data.second; 602 } 603 604 /// Compute the size of the object pointed by Ptr. Returns true and the 605 /// object size in Size if successful, and false otherwise. 606 /// If RoundToAlign is true, then Size is rounded up to the alignment of 607 /// allocas, byval arguments, and global variables. 608 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 609 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 610 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 611 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 612 if (!Visitor.bothKnown(Data)) 613 return false; 614 615 Size = getSizeWithOverflow(Data).getZExtValue(); 616 return true; 617 } 618 619 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 620 const DataLayout &DL, 621 const TargetLibraryInfo *TLI, 622 bool MustSucceed) { 623 return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr, 624 MustSucceed); 625 } 626 627 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 628 const DataLayout &DL, 629 const TargetLibraryInfo *TLI, AAResults *AA, 630 bool MustSucceed) { 631 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 632 "ObjectSize must be a call to llvm.objectsize!"); 633 634 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 635 ObjectSizeOpts EvalOptions; 636 EvalOptions.AA = AA; 637 638 // Unless we have to fold this to something, try to be as accurate as 639 // possible. 640 if (MustSucceed) 641 EvalOptions.EvalMode = 642 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 643 else 644 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 645 646 EvalOptions.NullIsUnknownSize = 647 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 648 649 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 650 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 651 if (StaticOnly) { 652 // FIXME: Does it make sense to just return a failure value if the size won't 653 // fit in the output and `!MustSucceed`? 654 uint64_t Size; 655 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 656 isUIntN(ResultType->getBitWidth(), Size)) 657 return ConstantInt::get(ResultType, Size); 658 } else { 659 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 660 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 661 SizeOffsetEvalType SizeOffsetPair = 662 Eval.compute(ObjectSize->getArgOperand(0)); 663 664 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 665 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 666 Builder.SetInsertPoint(ObjectSize); 667 668 // If we've outside the end of the object, then we can always access 669 // exactly 0 bytes. 670 Value *ResultSize = 671 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 672 Value *UseZero = 673 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 674 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 675 Value *Ret = Builder.CreateSelect( 676 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 677 678 // The non-constant size expression cannot evaluate to -1. 679 if (!isa<Constant>(SizeOffsetPair.first) || 680 !isa<Constant>(SizeOffsetPair.second)) 681 Builder.CreateAssumption( 682 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 683 684 return Ret; 685 } 686 } 687 688 if (!MustSucceed) 689 return nullptr; 690 691 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 692 } 693 694 STATISTIC(ObjectVisitorArgument, 695 "Number of arguments with unsolved size and offset"); 696 STATISTIC(ObjectVisitorLoad, 697 "Number of load instructions with unsolved size and offset"); 698 699 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 700 if (Options.RoundToAlign && Alignment) 701 return APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment)); 702 return Size; 703 } 704 705 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 706 const TargetLibraryInfo *TLI, 707 LLVMContext &Context, 708 ObjectSizeOpts Options) 709 : DL(DL), TLI(TLI), Options(Options) { 710 // Pointer size must be rechecked for each object visited since it could have 711 // a different address space. 712 } 713 714 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 715 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 716 717 // Stripping pointer casts can strip address space casts which can change the 718 // index type size. The invariant is that we use the value type to determine 719 // the index type size and if we stripped address space casts we have to 720 // readjust the APInt as we pass it upwards in order for the APInt to match 721 // the type the caller passed in. 722 APInt Offset(InitialIntTyBits, 0); 723 V = V->stripAndAccumulateConstantOffsets( 724 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 725 726 // Later we use the index type size and zero but it will match the type of the 727 // value that is passed to computeImpl. 728 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 729 Zero = APInt::getZero(IntTyBits); 730 731 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 732 if (!IndexTypeSizeChanged && Offset.isZero()) 733 return computeImpl(V); 734 735 // We stripped an address space cast that changed the index type size or we 736 // accumulated some constant offset (or both). Readjust the bit width to match 737 // the argument index type size and apply the offset, as required. 738 SizeOffsetType SOT = computeImpl(V); 739 if (IndexTypeSizeChanged) { 740 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 741 SOT.first = APInt(); 742 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 743 SOT.second = APInt(); 744 } 745 // If the computed offset is "unknown" we cannot add the stripped offset. 746 return {SOT.first, 747 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 748 } 749 750 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 751 if (Instruction *I = dyn_cast<Instruction>(V)) { 752 // If we have already seen this instruction, bail out. Cycles can happen in 753 // unreachable code after constant propagation. 754 if (!SeenInsts.insert(I).second) 755 return unknown(); 756 757 return visit(*I); 758 } 759 if (Argument *A = dyn_cast<Argument>(V)) 760 return visitArgument(*A); 761 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 762 return visitConstantPointerNull(*P); 763 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 764 return visitGlobalAlias(*GA); 765 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 766 return visitGlobalVariable(*GV); 767 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 768 return visitUndefValue(*UV); 769 770 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 771 << *V << '\n'); 772 return unknown(); 773 } 774 775 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 776 return ::CheckedZextOrTrunc(I, IntTyBits); 777 } 778 779 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 780 if (!I.getAllocatedType()->isSized()) 781 return unknown(); 782 783 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 784 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 785 return unknown(); 786 APInt Size(IntTyBits, ElemSize.getKnownMinSize()); 787 if (!I.isArrayAllocation()) 788 return std::make_pair(align(Size, I.getAlign()), Zero); 789 790 Value *ArraySize = I.getArraySize(); 791 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 792 APInt NumElems = C->getValue(); 793 if (!CheckedZextOrTrunc(NumElems)) 794 return unknown(); 795 796 bool Overflow; 797 Size = Size.umul_ov(NumElems, Overflow); 798 return Overflow ? unknown() 799 : std::make_pair(align(Size, I.getAlign()), Zero); 800 } 801 return unknown(); 802 } 803 804 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 805 Type *MemoryTy = A.getPointeeInMemoryValueType(); 806 // No interprocedural analysis is done at the moment. 807 if (!MemoryTy|| !MemoryTy->isSized()) { 808 ++ObjectVisitorArgument; 809 return unknown(); 810 } 811 812 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 813 return std::make_pair(align(Size, A.getParamAlign()), Zero); 814 } 815 816 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 817 if (Optional<APInt> Size = getAllocSize(&CB, TLI)) 818 return std::make_pair(*Size, Zero); 819 return unknown(); 820 } 821 822 SizeOffsetType 823 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 824 // If null is unknown, there's nothing we can do. Additionally, non-zero 825 // address spaces can make use of null, so we don't presume to know anything 826 // about that. 827 // 828 // TODO: How should this work with address space casts? We currently just drop 829 // them on the floor, but it's unclear what we should do when a NULL from 830 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 831 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 832 return unknown(); 833 return std::make_pair(Zero, Zero); 834 } 835 836 SizeOffsetType 837 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 838 return unknown(); 839 } 840 841 SizeOffsetType 842 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 843 // Easy cases were already folded by previous passes. 844 return unknown(); 845 } 846 847 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 848 if (GA.isInterposable()) 849 return unknown(); 850 return compute(GA.getAliasee()); 851 } 852 853 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 854 if (!GV.hasDefinitiveInitializer()) 855 return unknown(); 856 857 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 858 return std::make_pair(align(Size, GV.getAlign()), Zero); 859 } 860 861 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 862 // clueless 863 return unknown(); 864 } 865 866 SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( 867 LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From, 868 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks, 869 unsigned &ScannedInstCount) { 870 constexpr unsigned MaxInstsToScan = 128; 871 872 auto Where = VisitedBlocks.find(&BB); 873 if (Where != VisitedBlocks.end()) 874 return Where->second; 875 876 auto Unknown = [this, &BB, &VisitedBlocks]() { 877 return VisitedBlocks[&BB] = unknown(); 878 }; 879 auto Known = [&BB, &VisitedBlocks](SizeOffsetType SO) { 880 return VisitedBlocks[&BB] = SO; 881 }; 882 883 do { 884 Instruction &I = *From; 885 886 if (I.isDebugOrPseudoInst()) 887 continue; 888 889 if (++ScannedInstCount > MaxInstsToScan) 890 return Unknown(); 891 892 if (!I.mayWriteToMemory()) 893 continue; 894 895 if (auto *SI = dyn_cast<StoreInst>(&I)) { 896 AliasResult AR = 897 Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand()); 898 switch ((AliasResult::Kind)AR) { 899 case AliasResult::NoAlias: 900 continue; 901 case AliasResult::MustAlias: 902 if (SI->getValueOperand()->getType()->isPointerTy()) 903 return Known(compute(SI->getValueOperand())); 904 else 905 return Unknown(); // No handling of non-pointer values by `compute`. 906 default: 907 return Unknown(); 908 } 909 } 910 911 if (auto *CB = dyn_cast<CallBase>(&I)) { 912 Function *Callee = CB->getCalledFunction(); 913 // Bail out on indirect call. 914 if (!Callee) 915 return Unknown(); 916 917 LibFunc TLIFn; 918 if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) || 919 !TLI->has(TLIFn)) 920 return Unknown(); 921 922 // TODO: There's probably more interesting case to support here. 923 if (TLIFn != LibFunc_posix_memalign) 924 return Unknown(); 925 926 AliasResult AR = 927 Options.AA->alias(CB->getOperand(0), Load.getPointerOperand()); 928 switch ((AliasResult::Kind)AR) { 929 case AliasResult::NoAlias: 930 continue; 931 case AliasResult::MustAlias: 932 break; 933 default: 934 return Unknown(); 935 } 936 937 // Is the error status of posix_memalign correctly checked? If not it 938 // would be incorrect to assume it succeeds and load doesn't see the 939 // previous value. 940 Optional<bool> Checked = isImpliedByDomCondition( 941 ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL); 942 if (!Checked || !*Checked) 943 return Unknown(); 944 945 Value *Size = CB->getOperand(2); 946 auto *C = dyn_cast<ConstantInt>(Size); 947 if (!C) 948 return Unknown(); 949 950 return Known({C->getValue(), APInt(C->getValue().getBitWidth(), 0)}); 951 } 952 953 return Unknown(); 954 } while (From-- != BB.begin()); 955 956 SmallVector<SizeOffsetType> PredecessorSizeOffsets; 957 for (auto *PredBB : predecessors(&BB)) { 958 PredecessorSizeOffsets.push_back(findLoadSizeOffset( 959 Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()), 960 VisitedBlocks, ScannedInstCount)); 961 if (!bothKnown(PredecessorSizeOffsets.back())) 962 return Unknown(); 963 } 964 965 if (PredecessorSizeOffsets.empty()) 966 return Unknown(); 967 968 return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1, 969 PredecessorSizeOffsets.end(), 970 PredecessorSizeOffsets.front(), 971 [this](SizeOffsetType LHS, SizeOffsetType RHS) { 972 return combineSizeOffset(LHS, RHS); 973 })); 974 } 975 976 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { 977 if (!Options.AA) { 978 ++ObjectVisitorLoad; 979 return unknown(); 980 } 981 982 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> VisitedBlocks; 983 unsigned ScannedInstCount = 0; 984 SizeOffsetType SO = 985 findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI), 986 VisitedBlocks, ScannedInstCount); 987 if (!bothKnown(SO)) 988 ++ObjectVisitorLoad; 989 return SO; 990 } 991 992 SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, 993 SizeOffsetType RHS) { 994 if (!bothKnown(LHS) || !bothKnown(RHS)) 995 return unknown(); 996 997 switch (Options.EvalMode) { 998 case ObjectSizeOpts::Mode::Min: 999 return (getSizeWithOverflow(LHS).slt(getSizeWithOverflow(RHS))) ? LHS : RHS; 1000 case ObjectSizeOpts::Mode::Max: 1001 return (getSizeWithOverflow(LHS).sgt(getSizeWithOverflow(RHS))) ? LHS : RHS; 1002 case ObjectSizeOpts::Mode::Exact: 1003 return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) ? LHS 1004 : unknown(); 1005 } 1006 llvm_unreachable("missing an eval mode"); 1007 } 1008 1009 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { 1010 auto IncomingValues = PN.incoming_values(); 1011 return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(), 1012 compute(*IncomingValues.begin()), 1013 [this](SizeOffsetType LHS, Value *VRHS) { 1014 return combineSizeOffset(LHS, compute(VRHS)); 1015 }); 1016 } 1017 1018 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 1019 return combineSizeOffset(compute(I.getTrueValue()), 1020 compute(I.getFalseValue())); 1021 } 1022 1023 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 1024 return std::make_pair(Zero, Zero); 1025 } 1026 1027 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 1028 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 1029 << '\n'); 1030 return unknown(); 1031 } 1032 1033 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 1034 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 1035 ObjectSizeOpts EvalOpts) 1036 : DL(DL), TLI(TLI), Context(Context), 1037 Builder(Context, TargetFolder(DL), 1038 IRBuilderCallbackInserter( 1039 [&](Instruction *I) { InsertedInstructions.insert(I); })), 1040 EvalOpts(EvalOpts) { 1041 // IntTy and Zero must be set for each compute() since the address space may 1042 // be different for later objects. 1043 } 1044 1045 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 1046 // XXX - Are vectors of pointers possible here? 1047 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 1048 Zero = ConstantInt::get(IntTy, 0); 1049 1050 SizeOffsetEvalType Result = compute_(V); 1051 1052 if (!bothKnown(Result)) { 1053 // Erase everything that was computed in this iteration from the cache, so 1054 // that no dangling references are left behind. We could be a bit smarter if 1055 // we kept a dependency graph. It's probably not worth the complexity. 1056 for (const Value *SeenVal : SeenVals) { 1057 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 1058 // non-computable results can be safely cached 1059 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 1060 CacheMap.erase(CacheIt); 1061 } 1062 1063 // Erase any instructions we inserted as part of the traversal. 1064 for (Instruction *I : InsertedInstructions) { 1065 I->replaceAllUsesWith(PoisonValue::get(I->getType())); 1066 I->eraseFromParent(); 1067 } 1068 } 1069 1070 SeenVals.clear(); 1071 InsertedInstructions.clear(); 1072 return Result; 1073 } 1074 1075 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 1076 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 1077 SizeOffsetType Const = Visitor.compute(V); 1078 if (Visitor.bothKnown(Const)) 1079 return std::make_pair(ConstantInt::get(Context, Const.first), 1080 ConstantInt::get(Context, Const.second)); 1081 1082 V = V->stripPointerCasts(); 1083 1084 // Check cache. 1085 CacheMapTy::iterator CacheIt = CacheMap.find(V); 1086 if (CacheIt != CacheMap.end()) 1087 return CacheIt->second; 1088 1089 // Always generate code immediately before the instruction being 1090 // processed, so that the generated code dominates the same BBs. 1091 BuilderTy::InsertPointGuard Guard(Builder); 1092 if (Instruction *I = dyn_cast<Instruction>(V)) 1093 Builder.SetInsertPoint(I); 1094 1095 // Now compute the size and offset. 1096 SizeOffsetEvalType Result; 1097 1098 // Record the pointers that were handled in this run, so that they can be 1099 // cleaned later if something fails. We also use this set to break cycles that 1100 // can occur in dead code. 1101 if (!SeenVals.insert(V).second) { 1102 Result = unknown(); 1103 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1104 Result = visitGEPOperator(*GEP); 1105 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 1106 Result = visit(*I); 1107 } else if (isa<Argument>(V) || 1108 (isa<ConstantExpr>(V) && 1109 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 1110 isa<GlobalAlias>(V) || 1111 isa<GlobalVariable>(V)) { 1112 // Ignore values where we cannot do more than ObjectSizeVisitor. 1113 Result = unknown(); 1114 } else { 1115 LLVM_DEBUG( 1116 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 1117 << '\n'); 1118 Result = unknown(); 1119 } 1120 1121 // Don't reuse CacheIt since it may be invalid at this point. 1122 CacheMap[V] = Result; 1123 return Result; 1124 } 1125 1126 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 1127 if (!I.getAllocatedType()->isSized()) 1128 return unknown(); 1129 1130 // must be a VLA 1131 assert(I.isArrayAllocation()); 1132 1133 // If needed, adjust the alloca's operand size to match the pointer size. 1134 // Subsequent math operations expect the types to match. 1135 Value *ArraySize = Builder.CreateZExtOrTrunc( 1136 I.getArraySize(), DL.getIntPtrType(I.getContext())); 1137 assert(ArraySize->getType() == Zero->getType() && 1138 "Expected zero constant to have pointer type"); 1139 1140 Value *Size = ConstantInt::get(ArraySize->getType(), 1141 DL.getTypeAllocSize(I.getAllocatedType())); 1142 Size = Builder.CreateMul(Size, ArraySize); 1143 return std::make_pair(Size, Zero); 1144 } 1145 1146 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 1147 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 1148 if (!FnData) 1149 return unknown(); 1150 1151 // Handle strdup-like functions separately. 1152 if (FnData->AllocTy == StrDupLike) { 1153 // TODO: implement evaluation of strdup/strndup 1154 return unknown(); 1155 } 1156 1157 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 1158 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 1159 if (FnData->SndParam < 0) 1160 return std::make_pair(FirstArg, Zero); 1161 1162 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 1163 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 1164 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 1165 return std::make_pair(Size, Zero); 1166 } 1167 1168 SizeOffsetEvalType 1169 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 1170 return unknown(); 1171 } 1172 1173 SizeOffsetEvalType 1174 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 1175 return unknown(); 1176 } 1177 1178 SizeOffsetEvalType 1179 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1180 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1181 if (!bothKnown(PtrData)) 1182 return unknown(); 1183 1184 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1185 Offset = Builder.CreateAdd(PtrData.second, Offset); 1186 return std::make_pair(PtrData.first, Offset); 1187 } 1188 1189 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1190 // clueless 1191 return unknown(); 1192 } 1193 1194 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { 1195 return unknown(); 1196 } 1197 1198 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1199 // Create 2 PHIs: one for size and another for offset. 1200 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1201 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1202 1203 // Insert right away in the cache to handle recursive PHIs. 1204 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1205 1206 // Compute offset/size for each PHI incoming pointer. 1207 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1208 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 1209 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1210 1211 if (!bothKnown(EdgeData)) { 1212 OffsetPHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1213 OffsetPHI->eraseFromParent(); 1214 InsertedInstructions.erase(OffsetPHI); 1215 SizePHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1216 SizePHI->eraseFromParent(); 1217 InsertedInstructions.erase(SizePHI); 1218 return unknown(); 1219 } 1220 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 1221 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 1222 } 1223 1224 Value *Size = SizePHI, *Offset = OffsetPHI; 1225 if (Value *Tmp = SizePHI->hasConstantValue()) { 1226 Size = Tmp; 1227 SizePHI->replaceAllUsesWith(Size); 1228 SizePHI->eraseFromParent(); 1229 InsertedInstructions.erase(SizePHI); 1230 } 1231 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1232 Offset = Tmp; 1233 OffsetPHI->replaceAllUsesWith(Offset); 1234 OffsetPHI->eraseFromParent(); 1235 InsertedInstructions.erase(OffsetPHI); 1236 } 1237 return std::make_pair(Size, Offset); 1238 } 1239 1240 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1241 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1242 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1243 1244 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1245 return unknown(); 1246 if (TrueSide == FalseSide) 1247 return TrueSide; 1248 1249 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1250 FalseSide.first); 1251 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1252 FalseSide.second); 1253 return std::make_pair(Size, Offset); 1254 } 1255 1256 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1257 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1258 << '\n'); 1259 return unknown(); 1260 } 1261