1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <utility> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "memory-builtins" 51 52 enum AllocType : uint8_t { 53 OpNewLike = 1<<0, // allocates; never returns null 54 MallocLike = 1<<1 | OpNewLike, // allocates; may return null 55 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 56 CallocLike = 1<<3, // allocates + bzero 57 ReallocLike = 1<<4, // reallocates 58 StrDupLike = 1<<5, 59 MallocOrCallocLike = MallocLike | CallocLike | AlignedAllocLike, 60 AllocLike = MallocOrCallocLike | StrDupLike, 61 AnyAlloc = AllocLike | ReallocLike 62 }; 63 64 struct AllocFnsTy { 65 AllocType AllocTy; 66 unsigned NumParams; 67 // First and Second size parameters (or -1 if unused) 68 int FstParam, SndParam; 69 }; 70 71 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 72 // know which functions are nounwind, noalias, nocapture parameters, etc. 73 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 74 {LibFunc_malloc, {MallocLike, 1, 0, -1}}, 75 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1}}, 76 {LibFunc_valloc, {MallocLike, 1, 0, -1}}, 77 {LibFunc_Znwj, {OpNewLike, 1, 0, -1}}, // new(unsigned int) 78 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow) 79 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned int, align_val_t) 80 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, // new(unsigned int, align_val_t, nothrow) 81 {MallocLike, 3, 0, -1}}, 82 {LibFunc_Znwm, {OpNewLike, 1, 0, -1}}, // new(unsigned long) 83 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned long, nothrow) 84 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned long, align_val_t) 85 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, // new(unsigned long, align_val_t, nothrow) 86 {MallocLike, 3, 0, -1}}, 87 {LibFunc_Znaj, {OpNewLike, 1, 0, -1}}, // new[](unsigned int) 88 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow) 89 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned int, align_val_t) 90 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, // new[](unsigned int, align_val_t, nothrow) 91 {MallocLike, 3, 0, -1}}, 92 {LibFunc_Znam, {OpNewLike, 1, 0, -1}}, // new[](unsigned long) 93 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned long, nothrow) 94 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned long, align_val_t) 95 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, // new[](unsigned long, align_val_t, nothrow) 96 {MallocLike, 3, 0, -1}}, 97 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1}}, // new(unsigned int) 98 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow) 99 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1}}, // new(unsigned long long) 100 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned long long, nothrow) 101 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1}}, // new[](unsigned int) 102 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow) 103 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1}}, // new[](unsigned long long) 104 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned long long, nothrow) 105 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1}}, 106 {LibFunc_calloc, {CallocLike, 2, 0, 1}}, 107 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1}}, 108 {LibFunc_realloc, {ReallocLike, 2, 1, -1}}, 109 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1}}, 110 {LibFunc_reallocf, {ReallocLike, 2, 1, -1}}, 111 {LibFunc_strdup, {StrDupLike, 1, -1, -1}}, 112 {LibFunc_strndup, {StrDupLike, 2, 1, -1}} 113 // TODO: Handle "int posix_memalign(void **, size_t, size_t)" 114 }; 115 116 static const Function *getCalledFunction(const Value *V, bool LookThroughBitCast, 117 bool &IsNoBuiltin) { 118 // Don't care about intrinsics in this case. 119 if (isa<IntrinsicInst>(V)) 120 return nullptr; 121 122 if (LookThroughBitCast) 123 V = V->stripPointerCasts(); 124 125 const auto *CB = dyn_cast<CallBase>(V); 126 if (!CB) 127 return nullptr; 128 129 IsNoBuiltin = CB->isNoBuiltin(); 130 131 if (const Function *Callee = CB->getCalledFunction()) 132 return Callee; 133 return nullptr; 134 } 135 136 /// Returns the allocation data for the given value if it's either a call to a 137 /// known allocation function, or a call to a function with the allocsize 138 /// attribute. 139 static Optional<AllocFnsTy> 140 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 141 const TargetLibraryInfo *TLI) { 142 // Make sure that the function is available. 143 StringRef FnName = Callee->getName(); 144 LibFunc TLIFn; 145 if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) 146 return None; 147 148 const auto *Iter = find_if( 149 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 150 return P.first == TLIFn; 151 }); 152 153 if (Iter == std::end(AllocationFnData)) 154 return None; 155 156 const AllocFnsTy *FnData = &Iter->second; 157 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 158 return None; 159 160 // Check function prototype. 161 int FstParam = FnData->FstParam; 162 int SndParam = FnData->SndParam; 163 FunctionType *FTy = Callee->getFunctionType(); 164 165 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 166 FTy->getNumParams() == FnData->NumParams && 167 (FstParam < 0 || 168 (FTy->getParamType(FstParam)->isIntegerTy(32) || 169 FTy->getParamType(FstParam)->isIntegerTy(64))) && 170 (SndParam < 0 || 171 FTy->getParamType(SndParam)->isIntegerTy(32) || 172 FTy->getParamType(SndParam)->isIntegerTy(64))) 173 return *FnData; 174 return None; 175 } 176 177 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 178 const TargetLibraryInfo *TLI, 179 bool LookThroughBitCast = false) { 180 bool IsNoBuiltinCall; 181 if (const Function *Callee = 182 getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall)) 183 if (!IsNoBuiltinCall) 184 return getAllocationDataForFunction(Callee, AllocTy, TLI); 185 return None; 186 } 187 188 static Optional<AllocFnsTy> 189 getAllocationData(const Value *V, AllocType AllocTy, 190 function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 191 bool LookThroughBitCast = false) { 192 bool IsNoBuiltinCall; 193 if (const Function *Callee = 194 getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall)) 195 if (!IsNoBuiltinCall) 196 return getAllocationDataForFunction( 197 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 198 return None; 199 } 200 201 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 202 const TargetLibraryInfo *TLI) { 203 bool IsNoBuiltinCall; 204 const Function *Callee = 205 getCalledFunction(V, /*LookThroughBitCast=*/false, IsNoBuiltinCall); 206 if (!Callee) 207 return None; 208 209 // Prefer to use existing information over allocsize. This will give us an 210 // accurate AllocTy. 211 if (!IsNoBuiltinCall) 212 if (Optional<AllocFnsTy> Data = 213 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 214 return Data; 215 216 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 217 if (Attr == Attribute()) 218 return None; 219 220 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 221 222 AllocFnsTy Result; 223 // Because allocsize only tells us how many bytes are allocated, we're not 224 // really allowed to assume anything, so we use MallocLike. 225 Result.AllocTy = MallocLike; 226 Result.NumParams = Callee->getNumOperands(); 227 Result.FstParam = Args.first; 228 Result.SndParam = Args.second.getValueOr(-1); 229 return Result; 230 } 231 232 static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) { 233 const auto *CB = 234 dyn_cast<CallBase>(LookThroughBitCast ? V->stripPointerCasts() : V); 235 return CB && CB->hasRetAttr(Attribute::NoAlias); 236 } 237 238 /// Tests if a value is a call or invoke to a library function that 239 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 240 /// like). 241 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, 242 bool LookThroughBitCast) { 243 return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast).hasValue(); 244 } 245 bool llvm::isAllocationFn( 246 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 247 bool LookThroughBitCast) { 248 return getAllocationData(V, AnyAlloc, GetTLI, LookThroughBitCast).hasValue(); 249 } 250 251 /// Tests if a value is a call or invoke to a function that returns a 252 /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). 253 bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, 254 bool LookThroughBitCast) { 255 // it's safe to consider realloc as noalias since accessing the original 256 // pointer is undefined behavior 257 return isAllocationFn(V, TLI, LookThroughBitCast) || 258 hasNoAliasAttr(V, LookThroughBitCast); 259 } 260 261 /// Tests if a value is a call or invoke to a library function that 262 /// allocates uninitialized memory (such as malloc). 263 bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 264 bool LookThroughBitCast) { 265 return getAllocationData(V, MallocLike, TLI, LookThroughBitCast).hasValue(); 266 } 267 bool llvm::isMallocLikeFn( 268 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 269 bool LookThroughBitCast) { 270 return getAllocationData(V, MallocLike, GetTLI, LookThroughBitCast) 271 .hasValue(); 272 } 273 274 /// Tests if a value is a call or invoke to a library function that 275 /// allocates uninitialized memory with alignment (such as aligned_alloc). 276 bool llvm::isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 277 bool LookThroughBitCast) { 278 return getAllocationData(V, AlignedAllocLike, TLI, LookThroughBitCast) 279 .hasValue(); 280 } 281 bool llvm::isAlignedAllocLikeFn( 282 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 283 bool LookThroughBitCast) { 284 return getAllocationData(V, AlignedAllocLike, GetTLI, LookThroughBitCast) 285 .hasValue(); 286 } 287 288 /// Tests if a value is a call or invoke to a library function that 289 /// allocates zero-filled memory (such as calloc). 290 bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 291 bool LookThroughBitCast) { 292 return getAllocationData(V, CallocLike, TLI, LookThroughBitCast).hasValue(); 293 } 294 295 /// Tests if a value is a call or invoke to a library function that 296 /// allocates memory similar to malloc or calloc. 297 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 298 bool LookThroughBitCast) { 299 return getAllocationData(V, MallocOrCallocLike, TLI, 300 LookThroughBitCast).hasValue(); 301 } 302 303 /// Tests if a value is a call or invoke to a library function that 304 /// allocates memory (either malloc, calloc, or strdup like). 305 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 306 bool LookThroughBitCast) { 307 return getAllocationData(V, AllocLike, TLI, LookThroughBitCast).hasValue(); 308 } 309 310 /// Tests if a value is a call or invoke to a library function that 311 /// reallocates memory (e.g., realloc). 312 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, 313 bool LookThroughBitCast) { 314 return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast).hasValue(); 315 } 316 317 /// Tests if a functions is a call or invoke to a library function that 318 /// reallocates memory (e.g., realloc). 319 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 320 return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); 321 } 322 323 /// Tests if a value is a call or invoke to a library function that 324 /// allocates memory and throws if an allocation failed (e.g., new). 325 bool llvm::isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, 326 bool LookThroughBitCast) { 327 return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast).hasValue(); 328 } 329 330 /// Tests if a value is a call or invoke to a library function that 331 /// allocates memory (strdup, strndup). 332 bool llvm::isStrdupLikeFn(const Value *V, const TargetLibraryInfo *TLI, 333 bool LookThroughBitCast) { 334 return getAllocationData(V, StrDupLike, TLI, LookThroughBitCast).hasValue(); 335 } 336 337 /// extractMallocCall - Returns the corresponding CallInst if the instruction 338 /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we 339 /// ignore InvokeInst here. 340 const CallInst *llvm::extractMallocCall( 341 const Value *I, 342 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 343 return isMallocLikeFn(I, GetTLI) ? dyn_cast<CallInst>(I) : nullptr; 344 } 345 346 static Value *computeArraySize(const CallInst *CI, const DataLayout &DL, 347 const TargetLibraryInfo *TLI, 348 bool LookThroughSExt = false) { 349 if (!CI) 350 return nullptr; 351 352 // The size of the malloc's result type must be known to determine array size. 353 Type *T = getMallocAllocatedType(CI, TLI); 354 if (!T || !T->isSized()) 355 return nullptr; 356 357 unsigned ElementSize = DL.getTypeAllocSize(T); 358 if (StructType *ST = dyn_cast<StructType>(T)) 359 ElementSize = DL.getStructLayout(ST)->getSizeInBytes(); 360 361 // If malloc call's arg can be determined to be a multiple of ElementSize, 362 // return the multiple. Otherwise, return NULL. 363 Value *MallocArg = CI->getArgOperand(0); 364 Value *Multiple = nullptr; 365 if (ComputeMultiple(MallocArg, ElementSize, Multiple, LookThroughSExt)) 366 return Multiple; 367 368 return nullptr; 369 } 370 371 /// getMallocType - Returns the PointerType resulting from the malloc call. 372 /// The PointerType depends on the number of bitcast uses of the malloc call: 373 /// 0: PointerType is the calls' return type. 374 /// 1: PointerType is the bitcast's result type. 375 /// >1: Unique PointerType cannot be determined, return NULL. 376 PointerType *llvm::getMallocType(const CallInst *CI, 377 const TargetLibraryInfo *TLI) { 378 assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call"); 379 380 PointerType *MallocType = nullptr; 381 unsigned NumOfBitCastUses = 0; 382 383 // Determine if CallInst has a bitcast use. 384 for (const User *U : CI->users()) 385 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 386 MallocType = cast<PointerType>(BCI->getDestTy()); 387 NumOfBitCastUses++; 388 } 389 390 // Malloc call has 1 bitcast use, so type is the bitcast's destination type. 391 if (NumOfBitCastUses == 1) 392 return MallocType; 393 394 // Malloc call was not bitcast, so type is the malloc function's return type. 395 if (NumOfBitCastUses == 0) 396 return cast<PointerType>(CI->getType()); 397 398 // Type could not be determined. 399 return nullptr; 400 } 401 402 /// getMallocAllocatedType - Returns the Type allocated by malloc call. 403 /// The Type depends on the number of bitcast uses of the malloc call: 404 /// 0: PointerType is the malloc calls' return type. 405 /// 1: PointerType is the bitcast's result type. 406 /// >1: Unique PointerType cannot be determined, return NULL. 407 Type *llvm::getMallocAllocatedType(const CallInst *CI, 408 const TargetLibraryInfo *TLI) { 409 PointerType *PT = getMallocType(CI, TLI); 410 return PT ? PT->getElementType() : nullptr; 411 } 412 413 /// getMallocArraySize - Returns the array size of a malloc call. If the 414 /// argument passed to malloc is a multiple of the size of the malloced type, 415 /// then return that multiple. For non-array mallocs, the multiple is 416 /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be 417 /// determined. 418 Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL, 419 const TargetLibraryInfo *TLI, 420 bool LookThroughSExt) { 421 assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call"); 422 return computeArraySize(CI, DL, TLI, LookThroughSExt); 423 } 424 425 /// extractCallocCall - Returns the corresponding CallInst if the instruction 426 /// is a calloc call. 427 const CallInst *llvm::extractCallocCall(const Value *I, 428 const TargetLibraryInfo *TLI) { 429 return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr; 430 } 431 432 /// isLibFreeFunction - Returns true if the function is a builtin free() 433 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 434 unsigned ExpectedNumParams; 435 if (TLIFn == LibFunc_free || 436 TLIFn == LibFunc_ZdlPv || // operator delete(void*) 437 TLIFn == LibFunc_ZdaPv || // operator delete[](void*) 438 TLIFn == LibFunc_msvc_delete_ptr32 || // operator delete(void*) 439 TLIFn == LibFunc_msvc_delete_ptr64 || // operator delete(void*) 440 TLIFn == LibFunc_msvc_delete_array_ptr32 || // operator delete[](void*) 441 TLIFn == LibFunc_msvc_delete_array_ptr64) // operator delete[](void*) 442 ExpectedNumParams = 1; 443 else if (TLIFn == LibFunc_ZdlPvj || // delete(void*, uint) 444 TLIFn == LibFunc_ZdlPvm || // delete(void*, ulong) 445 TLIFn == LibFunc_ZdlPvRKSt9nothrow_t || // delete(void*, nothrow) 446 TLIFn == LibFunc_ZdlPvSt11align_val_t || // delete(void*, align_val_t) 447 TLIFn == LibFunc_ZdaPvj || // delete[](void*, uint) 448 TLIFn == LibFunc_ZdaPvm || // delete[](void*, ulong) 449 TLIFn == LibFunc_ZdaPvRKSt9nothrow_t || // delete[](void*, nothrow) 450 TLIFn == LibFunc_ZdaPvSt11align_val_t || // delete[](void*, align_val_t) 451 TLIFn == LibFunc_msvc_delete_ptr32_int || // delete(void*, uint) 452 TLIFn == LibFunc_msvc_delete_ptr64_longlong || // delete(void*, ulonglong) 453 TLIFn == LibFunc_msvc_delete_ptr32_nothrow || // delete(void*, nothrow) 454 TLIFn == LibFunc_msvc_delete_ptr64_nothrow || // delete(void*, nothrow) 455 TLIFn == LibFunc_msvc_delete_array_ptr32_int || // delete[](void*, uint) 456 TLIFn == LibFunc_msvc_delete_array_ptr64_longlong || // delete[](void*, ulonglong) 457 TLIFn == LibFunc_msvc_delete_array_ptr32_nothrow || // delete[](void*, nothrow) 458 TLIFn == LibFunc_msvc_delete_array_ptr64_nothrow) // delete[](void*, nothrow) 459 ExpectedNumParams = 2; 460 else if (TLIFn == LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t || // delete(void*, align_val_t, nothrow) 461 TLIFn == LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t || // delete[](void*, align_val_t, nothrow) 462 TLIFn == LibFunc_ZdlPvjSt11align_val_t || // delete(void*, unsigned long, align_val_t) 463 TLIFn == LibFunc_ZdlPvmSt11align_val_t || // delete(void*, unsigned long, align_val_t) 464 TLIFn == LibFunc_ZdaPvjSt11align_val_t || // delete[](void*, unsigned int, align_val_t) 465 TLIFn == LibFunc_ZdaPvmSt11align_val_t) // delete[](void*, unsigned long, align_val_t) 466 ExpectedNumParams = 3; 467 else 468 return false; 469 470 // Check free prototype. 471 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 472 // attribute will exist. 473 FunctionType *FTy = F->getFunctionType(); 474 if (!FTy->getReturnType()->isVoidTy()) 475 return false; 476 if (FTy->getNumParams() != ExpectedNumParams) 477 return false; 478 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 479 return false; 480 481 return true; 482 } 483 484 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 485 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 486 bool IsNoBuiltinCall; 487 const Function *Callee = 488 getCalledFunction(I, /*LookThroughBitCast=*/false, IsNoBuiltinCall); 489 if (Callee == nullptr || IsNoBuiltinCall) 490 return nullptr; 491 492 StringRef FnName = Callee->getName(); 493 LibFunc TLIFn; 494 if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) 495 return nullptr; 496 497 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 498 } 499 500 501 //===----------------------------------------------------------------------===// 502 // Utility functions to compute size of objects. 503 // 504 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 505 if (Data.second.isNegative() || Data.first.ult(Data.second)) 506 return APInt(Data.first.getBitWidth(), 0); 507 return Data.first - Data.second; 508 } 509 510 /// Compute the size of the object pointed by Ptr. Returns true and the 511 /// object size in Size if successful, and false otherwise. 512 /// If RoundToAlign is true, then Size is rounded up to the alignment of 513 /// allocas, byval arguments, and global variables. 514 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 515 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 516 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 517 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 518 if (!Visitor.bothKnown(Data)) 519 return false; 520 521 Size = getSizeWithOverflow(Data).getZExtValue(); 522 return true; 523 } 524 525 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 526 const DataLayout &DL, 527 const TargetLibraryInfo *TLI, 528 bool MustSucceed) { 529 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 530 "ObjectSize must be a call to llvm.objectsize!"); 531 532 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 533 ObjectSizeOpts EvalOptions; 534 // Unless we have to fold this to something, try to be as accurate as 535 // possible. 536 if (MustSucceed) 537 EvalOptions.EvalMode = 538 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 539 else 540 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 541 542 EvalOptions.NullIsUnknownSize = 543 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 544 545 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 546 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 547 if (StaticOnly) { 548 // FIXME: Does it make sense to just return a failure value if the size won't 549 // fit in the output and `!MustSucceed`? 550 uint64_t Size; 551 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 552 isUIntN(ResultType->getBitWidth(), Size)) 553 return ConstantInt::get(ResultType, Size); 554 } else { 555 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 556 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 557 SizeOffsetEvalType SizeOffsetPair = 558 Eval.compute(ObjectSize->getArgOperand(0)); 559 560 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 561 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 562 Builder.SetInsertPoint(ObjectSize); 563 564 // If we've outside the end of the object, then we can always access 565 // exactly 0 bytes. 566 Value *ResultSize = 567 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 568 Value *UseZero = 569 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 570 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 571 Value *Ret = Builder.CreateSelect( 572 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 573 574 // The non-constant size expression cannot evaluate to -1. 575 if (!isa<Constant>(SizeOffsetPair.first) || 576 !isa<Constant>(SizeOffsetPair.second)) 577 Builder.CreateAssumption( 578 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 579 580 return Ret; 581 } 582 } 583 584 if (!MustSucceed) 585 return nullptr; 586 587 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 588 } 589 590 STATISTIC(ObjectVisitorArgument, 591 "Number of arguments with unsolved size and offset"); 592 STATISTIC(ObjectVisitorLoad, 593 "Number of load instructions with unsolved size and offset"); 594 595 APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Alignment) { 596 if (Options.RoundToAlign && Alignment) 597 return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align(Alignment))); 598 return Size; 599 } 600 601 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 602 const TargetLibraryInfo *TLI, 603 LLVMContext &Context, 604 ObjectSizeOpts Options) 605 : DL(DL), TLI(TLI), Options(Options) { 606 // Pointer size must be rechecked for each object visited since it could have 607 // a different address space. 608 } 609 610 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 611 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 612 Zero = APInt::getNullValue(IntTyBits); 613 614 V = V->stripPointerCasts(); 615 if (Instruction *I = dyn_cast<Instruction>(V)) { 616 // If we have already seen this instruction, bail out. Cycles can happen in 617 // unreachable code after constant propagation. 618 if (!SeenInsts.insert(I).second) 619 return unknown(); 620 621 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 622 return visitGEPOperator(*GEP); 623 return visit(*I); 624 } 625 if (Argument *A = dyn_cast<Argument>(V)) 626 return visitArgument(*A); 627 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 628 return visitConstantPointerNull(*P); 629 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 630 return visitGlobalAlias(*GA); 631 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 632 return visitGlobalVariable(*GV); 633 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 634 return visitUndefValue(*UV); 635 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 636 if (CE->getOpcode() == Instruction::IntToPtr) 637 return unknown(); // clueless 638 if (CE->getOpcode() == Instruction::GetElementPtr) 639 return visitGEPOperator(cast<GEPOperator>(*CE)); 640 } 641 642 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 643 << *V << '\n'); 644 return unknown(); 645 } 646 647 /// When we're compiling N-bit code, and the user uses parameters that are 648 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 649 /// trouble with APInt size issues. This function handles resizing + overflow 650 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 651 /// I's value. 652 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 653 // More bits than we can handle. Checking the bit width isn't necessary, but 654 // it's faster than checking active bits, and should give `false` in the 655 // vast majority of cases. 656 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 657 return false; 658 if (I.getBitWidth() != IntTyBits) 659 I = I.zextOrTrunc(IntTyBits); 660 return true; 661 } 662 663 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 664 if (!I.getAllocatedType()->isSized()) 665 return unknown(); 666 667 if (isa<ScalableVectorType>(I.getAllocatedType())) 668 return unknown(); 669 670 APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); 671 if (!I.isArrayAllocation()) 672 return std::make_pair(align(Size, I.getAlignment()), Zero); 673 674 Value *ArraySize = I.getArraySize(); 675 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 676 APInt NumElems = C->getValue(); 677 if (!CheckedZextOrTrunc(NumElems)) 678 return unknown(); 679 680 bool Overflow; 681 Size = Size.umul_ov(NumElems, Overflow); 682 return Overflow ? unknown() : std::make_pair(align(Size, I.getAlignment()), 683 Zero); 684 } 685 return unknown(); 686 } 687 688 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 689 Type *MemoryTy = A.getPointeeInMemoryValueType(); 690 // No interprocedural analysis is done at the moment. 691 if (!MemoryTy|| !MemoryTy->isSized()) { 692 ++ObjectVisitorArgument; 693 return unknown(); 694 } 695 696 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 697 return std::make_pair(align(Size, A.getParamAlignment()), Zero); 698 } 699 700 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 701 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 702 if (!FnData) 703 return unknown(); 704 705 // Handle strdup-like functions separately. 706 if (FnData->AllocTy == StrDupLike) { 707 APInt Size(IntTyBits, GetStringLength(CB.getArgOperand(0))); 708 if (!Size) 709 return unknown(); 710 711 // Strndup limits strlen. 712 if (FnData->FstParam > 0) { 713 ConstantInt *Arg = 714 dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam)); 715 if (!Arg) 716 return unknown(); 717 718 APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); 719 if (Size.ugt(MaxSize)) 720 Size = MaxSize + 1; 721 } 722 return std::make_pair(Size, Zero); 723 } 724 725 ConstantInt *Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam)); 726 if (!Arg) 727 return unknown(); 728 729 APInt Size = Arg->getValue(); 730 if (!CheckedZextOrTrunc(Size)) 731 return unknown(); 732 733 // Size is determined by just 1 parameter. 734 if (FnData->SndParam < 0) 735 return std::make_pair(Size, Zero); 736 737 Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->SndParam)); 738 if (!Arg) 739 return unknown(); 740 741 APInt NumElems = Arg->getValue(); 742 if (!CheckedZextOrTrunc(NumElems)) 743 return unknown(); 744 745 bool Overflow; 746 Size = Size.umul_ov(NumElems, Overflow); 747 return Overflow ? unknown() : std::make_pair(Size, Zero); 748 749 // TODO: handle more standard functions (+ wchar cousins): 750 // - strdup / strndup 751 // - strcpy / strncpy 752 // - strcat / strncat 753 // - memcpy / memmove 754 // - strcat / strncat 755 // - memset 756 } 757 758 SizeOffsetType 759 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 760 // If null is unknown, there's nothing we can do. Additionally, non-zero 761 // address spaces can make use of null, so we don't presume to know anything 762 // about that. 763 // 764 // TODO: How should this work with address space casts? We currently just drop 765 // them on the floor, but it's unclear what we should do when a NULL from 766 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 767 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 768 return unknown(); 769 return std::make_pair(Zero, Zero); 770 } 771 772 SizeOffsetType 773 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 774 return unknown(); 775 } 776 777 SizeOffsetType 778 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 779 // Easy cases were already folded by previous passes. 780 return unknown(); 781 } 782 783 SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { 784 SizeOffsetType PtrData = compute(GEP.getPointerOperand()); 785 APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0); 786 if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) 787 return unknown(); 788 789 return std::make_pair(PtrData.first, PtrData.second + Offset); 790 } 791 792 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 793 if (GA.isInterposable()) 794 return unknown(); 795 return compute(GA.getAliasee()); 796 } 797 798 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 799 if (!GV.hasDefinitiveInitializer()) 800 return unknown(); 801 802 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 803 return std::make_pair(align(Size, GV.getAlignment()), Zero); 804 } 805 806 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 807 // clueless 808 return unknown(); 809 } 810 811 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { 812 ++ObjectVisitorLoad; 813 return unknown(); 814 } 815 816 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { 817 // too complex to analyze statically. 818 return unknown(); 819 } 820 821 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 822 SizeOffsetType TrueSide = compute(I.getTrueValue()); 823 SizeOffsetType FalseSide = compute(I.getFalseValue()); 824 if (bothKnown(TrueSide) && bothKnown(FalseSide)) { 825 if (TrueSide == FalseSide) { 826 return TrueSide; 827 } 828 829 APInt TrueResult = getSizeWithOverflow(TrueSide); 830 APInt FalseResult = getSizeWithOverflow(FalseSide); 831 832 if (TrueResult == FalseResult) { 833 return TrueSide; 834 } 835 if (Options.EvalMode == ObjectSizeOpts::Mode::Min) { 836 if (TrueResult.slt(FalseResult)) 837 return TrueSide; 838 return FalseSide; 839 } 840 if (Options.EvalMode == ObjectSizeOpts::Mode::Max) { 841 if (TrueResult.sgt(FalseResult)) 842 return TrueSide; 843 return FalseSide; 844 } 845 } 846 return unknown(); 847 } 848 849 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 850 return std::make_pair(Zero, Zero); 851 } 852 853 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 854 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 855 << '\n'); 856 return unknown(); 857 } 858 859 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 860 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 861 ObjectSizeOpts EvalOpts) 862 : DL(DL), TLI(TLI), Context(Context), 863 Builder(Context, TargetFolder(DL), 864 IRBuilderCallbackInserter( 865 [&](Instruction *I) { InsertedInstructions.insert(I); })), 866 EvalOpts(EvalOpts) { 867 // IntTy and Zero must be set for each compute() since the address space may 868 // be different for later objects. 869 } 870 871 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 872 // XXX - Are vectors of pointers possible here? 873 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 874 Zero = ConstantInt::get(IntTy, 0); 875 876 SizeOffsetEvalType Result = compute_(V); 877 878 if (!bothKnown(Result)) { 879 // Erase everything that was computed in this iteration from the cache, so 880 // that no dangling references are left behind. We could be a bit smarter if 881 // we kept a dependency graph. It's probably not worth the complexity. 882 for (const Value *SeenVal : SeenVals) { 883 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 884 // non-computable results can be safely cached 885 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 886 CacheMap.erase(CacheIt); 887 } 888 889 // Erase any instructions we inserted as part of the traversal. 890 for (Instruction *I : InsertedInstructions) { 891 I->replaceAllUsesWith(UndefValue::get(I->getType())); 892 I->eraseFromParent(); 893 } 894 } 895 896 SeenVals.clear(); 897 InsertedInstructions.clear(); 898 return Result; 899 } 900 901 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 902 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 903 SizeOffsetType Const = Visitor.compute(V); 904 if (Visitor.bothKnown(Const)) 905 return std::make_pair(ConstantInt::get(Context, Const.first), 906 ConstantInt::get(Context, Const.second)); 907 908 V = V->stripPointerCasts(); 909 910 // Check cache. 911 CacheMapTy::iterator CacheIt = CacheMap.find(V); 912 if (CacheIt != CacheMap.end()) 913 return CacheIt->second; 914 915 // Always generate code immediately before the instruction being 916 // processed, so that the generated code dominates the same BBs. 917 BuilderTy::InsertPointGuard Guard(Builder); 918 if (Instruction *I = dyn_cast<Instruction>(V)) 919 Builder.SetInsertPoint(I); 920 921 // Now compute the size and offset. 922 SizeOffsetEvalType Result; 923 924 // Record the pointers that were handled in this run, so that they can be 925 // cleaned later if something fails. We also use this set to break cycles that 926 // can occur in dead code. 927 if (!SeenVals.insert(V).second) { 928 Result = unknown(); 929 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 930 Result = visitGEPOperator(*GEP); 931 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 932 Result = visit(*I); 933 } else if (isa<Argument>(V) || 934 (isa<ConstantExpr>(V) && 935 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 936 isa<GlobalAlias>(V) || 937 isa<GlobalVariable>(V)) { 938 // Ignore values where we cannot do more than ObjectSizeVisitor. 939 Result = unknown(); 940 } else { 941 LLVM_DEBUG( 942 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 943 << '\n'); 944 Result = unknown(); 945 } 946 947 // Don't reuse CacheIt since it may be invalid at this point. 948 CacheMap[V] = Result; 949 return Result; 950 } 951 952 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 953 if (!I.getAllocatedType()->isSized()) 954 return unknown(); 955 956 // must be a VLA 957 assert(I.isArrayAllocation()); 958 Value *ArraySize = I.getArraySize(); 959 Value *Size = ConstantInt::get(ArraySize->getType(), 960 DL.getTypeAllocSize(I.getAllocatedType())); 961 Size = Builder.CreateMul(Size, ArraySize); 962 return std::make_pair(Size, Zero); 963 } 964 965 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 966 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 967 if (!FnData) 968 return unknown(); 969 970 // Handle strdup-like functions separately. 971 if (FnData->AllocTy == StrDupLike) { 972 // TODO 973 return unknown(); 974 } 975 976 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 977 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 978 if (FnData->SndParam < 0) 979 return std::make_pair(FirstArg, Zero); 980 981 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 982 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 983 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 984 return std::make_pair(Size, Zero); 985 986 // TODO: handle more standard functions (+ wchar cousins): 987 // - strdup / strndup 988 // - strcpy / strncpy 989 // - strcat / strncat 990 // - memcpy / memmove 991 // - strcat / strncat 992 // - memset 993 } 994 995 SizeOffsetEvalType 996 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 997 return unknown(); 998 } 999 1000 SizeOffsetEvalType 1001 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 1002 return unknown(); 1003 } 1004 1005 SizeOffsetEvalType 1006 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1007 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1008 if (!bothKnown(PtrData)) 1009 return unknown(); 1010 1011 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1012 Offset = Builder.CreateAdd(PtrData.second, Offset); 1013 return std::make_pair(PtrData.first, Offset); 1014 } 1015 1016 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1017 // clueless 1018 return unknown(); 1019 } 1020 1021 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { 1022 return unknown(); 1023 } 1024 1025 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1026 // Create 2 PHIs: one for size and another for offset. 1027 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1028 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1029 1030 // Insert right away in the cache to handle recursive PHIs. 1031 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1032 1033 // Compute offset/size for each PHI incoming pointer. 1034 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1035 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 1036 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1037 1038 if (!bothKnown(EdgeData)) { 1039 OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1040 OffsetPHI->eraseFromParent(); 1041 InsertedInstructions.erase(OffsetPHI); 1042 SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); 1043 SizePHI->eraseFromParent(); 1044 InsertedInstructions.erase(SizePHI); 1045 return unknown(); 1046 } 1047 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 1048 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 1049 } 1050 1051 Value *Size = SizePHI, *Offset = OffsetPHI; 1052 if (Value *Tmp = SizePHI->hasConstantValue()) { 1053 Size = Tmp; 1054 SizePHI->replaceAllUsesWith(Size); 1055 SizePHI->eraseFromParent(); 1056 InsertedInstructions.erase(SizePHI); 1057 } 1058 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1059 Offset = Tmp; 1060 OffsetPHI->replaceAllUsesWith(Offset); 1061 OffsetPHI->eraseFromParent(); 1062 InsertedInstructions.erase(OffsetPHI); 1063 } 1064 return std::make_pair(Size, Offset); 1065 } 1066 1067 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1068 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1069 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1070 1071 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1072 return unknown(); 1073 if (TrueSide == FalseSide) 1074 return TrueSide; 1075 1076 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1077 FalseSide.first); 1078 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1079 FalseSide.second); 1080 return std::make_pair(Size, Offset); 1081 } 1082 1083 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1084 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1085 << '\n'); 1086 return unknown(); 1087 } 1088