1 //===- Evaluator.cpp - LLVM IR evaluator ----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Function evaluator for LLVM IR. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Utils/Evaluator.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/GlobalAlias.h" 26 #include "llvm/IR/GlobalValue.h" 27 #include "llvm/IR/GlobalVariable.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/User.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include <iterator> 41 42 #define DEBUG_TYPE "evaluator" 43 44 using namespace llvm; 45 46 static inline bool 47 isSimpleEnoughValueToCommit(Constant *C, 48 SmallPtrSetImpl<Constant *> &SimpleConstants, 49 const DataLayout &DL); 50 51 /// Return true if the specified constant can be handled by the code generator. 52 /// We don't want to generate something like: 53 /// void *X = &X/42; 54 /// because the code generator doesn't have a relocation that can handle that. 55 /// 56 /// This function should be called if C was not found (but just got inserted) 57 /// in SimpleConstants to avoid having to rescan the same constants all the 58 /// time. 59 static bool 60 isSimpleEnoughValueToCommitHelper(Constant *C, 61 SmallPtrSetImpl<Constant *> &SimpleConstants, 62 const DataLayout &DL) { 63 // Simple global addresses are supported, do not allow dllimport or 64 // thread-local globals. 65 if (auto *GV = dyn_cast<GlobalValue>(C)) 66 return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal(); 67 68 // Simple integer, undef, constant aggregate zero, etc are all supported. 69 if (C->getNumOperands() == 0 || isa<BlockAddress>(C)) 70 return true; 71 72 // Aggregate values are safe if all their elements are. 73 if (isa<ConstantAggregate>(C)) { 74 for (Value *Op : C->operands()) 75 if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL)) 76 return false; 77 return true; 78 } 79 80 // We don't know exactly what relocations are allowed in constant expressions, 81 // so we allow &global+constantoffset, which is safe and uniformly supported 82 // across targets. 83 ConstantExpr *CE = cast<ConstantExpr>(C); 84 switch (CE->getOpcode()) { 85 case Instruction::BitCast: 86 // Bitcast is fine if the casted value is fine. 87 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 88 89 case Instruction::IntToPtr: 90 case Instruction::PtrToInt: 91 // int <=> ptr is fine if the int type is the same size as the 92 // pointer type. 93 if (DL.getTypeSizeInBits(CE->getType()) != 94 DL.getTypeSizeInBits(CE->getOperand(0)->getType())) 95 return false; 96 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 97 98 // GEP is fine if it is simple + constant offset. 99 case Instruction::GetElementPtr: 100 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 101 if (!isa<ConstantInt>(CE->getOperand(i))) 102 return false; 103 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 104 105 case Instruction::Add: 106 // We allow simple+cst. 107 if (!isa<ConstantInt>(CE->getOperand(1))) 108 return false; 109 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 110 } 111 return false; 112 } 113 114 static inline bool 115 isSimpleEnoughValueToCommit(Constant *C, 116 SmallPtrSetImpl<Constant *> &SimpleConstants, 117 const DataLayout &DL) { 118 // If we already checked this constant, we win. 119 if (!SimpleConstants.insert(C).second) 120 return true; 121 // Check the constant. 122 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL); 123 } 124 125 void Evaluator::MutableValue::clear() { 126 if (auto *Agg = Val.dyn_cast<MutableAggregate *>()) 127 delete Agg; 128 Val = nullptr; 129 } 130 131 Constant *Evaluator::MutableValue::read(Type *Ty, APInt Offset, 132 const DataLayout &DL) const { 133 TypeSize TySize = DL.getTypeStoreSize(Ty); 134 const MutableValue *V = this; 135 while (const auto *Agg = V->Val.dyn_cast<MutableAggregate *>()) { 136 Type *AggTy = Agg->Ty; 137 Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset); 138 if (!Index || Index->uge(Agg->Elements.size()) || 139 !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy))) 140 return nullptr; 141 142 V = &Agg->Elements[Index->getZExtValue()]; 143 } 144 145 return ConstantFoldLoadFromConst(V->Val.get<Constant *>(), Ty, Offset, DL); 146 } 147 148 bool Evaluator::MutableValue::makeMutable() { 149 Constant *C = Val.get<Constant *>(); 150 Type *Ty = C->getType(); 151 unsigned NumElements; 152 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { 153 NumElements = VT->getNumElements(); 154 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) 155 NumElements = AT->getNumElements(); 156 else if (auto *ST = dyn_cast<StructType>(Ty)) 157 NumElements = ST->getNumElements(); 158 else 159 return false; 160 161 MutableAggregate *MA = new MutableAggregate(Ty); 162 MA->Elements.reserve(NumElements); 163 for (unsigned I = 0; I < NumElements; ++I) 164 MA->Elements.push_back(C->getAggregateElement(I)); 165 Val = MA; 166 return true; 167 } 168 169 bool Evaluator::MutableValue::write(Constant *V, APInt Offset, 170 const DataLayout &DL) { 171 Type *Ty = V->getType(); 172 TypeSize TySize = DL.getTypeStoreSize(Ty); 173 MutableValue *MV = this; 174 while (Offset != 0 || 175 !CastInst::isBitOrNoopPointerCastable(Ty, MV->getType(), DL)) { 176 if (MV->Val.is<Constant *>() && !MV->makeMutable()) 177 return false; 178 179 MutableAggregate *Agg = MV->Val.get<MutableAggregate *>(); 180 Type *AggTy = Agg->Ty; 181 Optional<APInt> Index = DL.getGEPIndexForOffset(AggTy, Offset); 182 if (!Index || Index->uge(Agg->Elements.size()) || 183 !TypeSize::isKnownLE(TySize, DL.getTypeStoreSize(AggTy))) 184 return false; 185 186 MV = &Agg->Elements[Index->getZExtValue()]; 187 } 188 189 Type *MVType = MV->getType(); 190 MV->clear(); 191 if (Ty->isIntegerTy() && MVType->isPointerTy()) 192 MV->Val = ConstantExpr::getIntToPtr(V, MVType); 193 else if (Ty->isPointerTy() && MVType->isIntegerTy()) 194 MV->Val = ConstantExpr::getPtrToInt(V, MVType); 195 else if (Ty != MVType) 196 MV->Val = ConstantExpr::getBitCast(V, MVType); 197 else 198 MV->Val = V; 199 return true; 200 } 201 202 Constant *Evaluator::MutableAggregate::toConstant() const { 203 SmallVector<Constant *, 32> Consts; 204 for (const MutableValue &MV : Elements) 205 Consts.push_back(MV.toConstant()); 206 207 if (auto *ST = dyn_cast<StructType>(Ty)) 208 return ConstantStruct::get(ST, Consts); 209 if (auto *AT = dyn_cast<ArrayType>(Ty)) 210 return ConstantArray::get(AT, Consts); 211 assert(isa<FixedVectorType>(Ty) && "Must be vector"); 212 return ConstantVector::get(Consts); 213 } 214 215 /// Return the value that would be computed by a load from P after the stores 216 /// reflected by 'memory' have been performed. If we can't decide, return null. 217 Constant *Evaluator::ComputeLoadResult(Constant *P, Type *Ty) { 218 APInt Offset(DL.getIndexTypeSizeInBits(P->getType()), 0); 219 P = cast<Constant>(P->stripAndAccumulateConstantOffsets( 220 DL, Offset, /* AllowNonInbounds */ true)); 221 Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(P->getType())); 222 auto *GV = dyn_cast<GlobalVariable>(P); 223 if (!GV) 224 return nullptr; 225 226 auto It = MutatedMemory.find(GV); 227 if (It != MutatedMemory.end()) 228 return It->second.read(Ty, Offset, DL); 229 230 if (!GV->hasDefinitiveInitializer()) 231 return nullptr; 232 return ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL); 233 } 234 235 static Function *getFunction(Constant *C) { 236 if (auto *Fn = dyn_cast<Function>(C)) 237 return Fn; 238 239 if (auto *Alias = dyn_cast<GlobalAlias>(C)) 240 if (auto *Fn = dyn_cast<Function>(Alias->getAliasee())) 241 return Fn; 242 return nullptr; 243 } 244 245 Function * 246 Evaluator::getCalleeWithFormalArgs(CallBase &CB, 247 SmallVectorImpl<Constant *> &Formals) { 248 auto *V = CB.getCalledOperand()->stripPointerCasts(); 249 if (auto *Fn = getFunction(getVal(V))) 250 return getFormalParams(CB, Fn, Formals) ? Fn : nullptr; 251 return nullptr; 252 } 253 254 bool Evaluator::getFormalParams(CallBase &CB, Function *F, 255 SmallVectorImpl<Constant *> &Formals) { 256 if (!F) 257 return false; 258 259 auto *FTy = F->getFunctionType(); 260 if (FTy->getNumParams() > CB.arg_size()) { 261 LLVM_DEBUG(dbgs() << "Too few arguments for function.\n"); 262 return false; 263 } 264 265 auto ArgI = CB.arg_begin(); 266 for (Type *PTy : FTy->params()) { 267 auto *ArgC = ConstantFoldLoadThroughBitcast(getVal(*ArgI), PTy, DL); 268 if (!ArgC) { 269 LLVM_DEBUG(dbgs() << "Can not convert function argument.\n"); 270 return false; 271 } 272 Formals.push_back(ArgC); 273 ++ArgI; 274 } 275 return true; 276 } 277 278 /// If call expression contains bitcast then we may need to cast 279 /// evaluated return value to a type of the call expression. 280 Constant *Evaluator::castCallResultIfNeeded(Type *ReturnType, Constant *RV) { 281 if (!RV || RV->getType() == ReturnType) 282 return RV; 283 284 RV = ConstantFoldLoadThroughBitcast(RV, ReturnType, DL); 285 if (!RV) 286 LLVM_DEBUG(dbgs() << "Failed to fold bitcast call expr\n"); 287 return RV; 288 } 289 290 /// Evaluate all instructions in block BB, returning true if successful, false 291 /// if we can't evaluate it. NewBB returns the next BB that control flows into, 292 /// or null upon return. StrippedPointerCastsForAliasAnalysis is set to true if 293 /// we looked through pointer casts to evaluate something. 294 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB, 295 bool &StrippedPointerCastsForAliasAnalysis) { 296 // This is the main evaluation loop. 297 while (true) { 298 Constant *InstResult = nullptr; 299 300 LLVM_DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); 301 302 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 303 if (!SI->isSimple()) { 304 LLVM_DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); 305 return false; // no volatile/atomic accesses. 306 } 307 Constant *Ptr = getVal(SI->getOperand(1)); 308 Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI); 309 if (Ptr != FoldedPtr) { 310 LLVM_DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); 311 Ptr = FoldedPtr; 312 LLVM_DEBUG(dbgs() << "; To: " << *Ptr << "\n"); 313 } 314 315 APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 316 Ptr = cast<Constant>(Ptr->stripAndAccumulateConstantOffsets( 317 DL, Offset, /* AllowNonInbounds */ true)); 318 Offset = Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(Ptr->getType())); 319 auto *GV = dyn_cast<GlobalVariable>(Ptr); 320 if (!GV || !GV->hasUniqueInitializer()) { 321 LLVM_DEBUG(dbgs() << "Store is not to global with unique initializer: " 322 << *Ptr << "\n"); 323 return false; 324 } 325 326 // If this might be too difficult for the backend to handle (e.g. the addr 327 // of one global variable divided by another) then we can't commit it. 328 Constant *Val = getVal(SI->getOperand(0)); 329 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { 330 LLVM_DEBUG(dbgs() << "Store value is too complex to evaluate store. " 331 << *Val << "\n"); 332 return false; 333 } 334 335 auto Res = MutatedMemory.try_emplace(GV, GV->getInitializer()); 336 if (!Res.first->second.write(Val, Offset, DL)) 337 return false; 338 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 339 InstResult = ConstantExpr::get(BO->getOpcode(), 340 getVal(BO->getOperand(0)), 341 getVal(BO->getOperand(1))); 342 LLVM_DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " 343 << *InstResult << "\n"); 344 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 345 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 346 getVal(CI->getOperand(0)), 347 getVal(CI->getOperand(1))); 348 LLVM_DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult 349 << "\n"); 350 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 351 InstResult = ConstantExpr::getCast(CI->getOpcode(), 352 getVal(CI->getOperand(0)), 353 CI->getType()); 354 LLVM_DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult 355 << "\n"); 356 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 357 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), 358 getVal(SI->getOperand(1)), 359 getVal(SI->getOperand(2))); 360 LLVM_DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult 361 << "\n"); 362 } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) { 363 InstResult = ConstantExpr::getExtractValue( 364 getVal(EVI->getAggregateOperand()), EVI->getIndices()); 365 LLVM_DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " 366 << *InstResult << "\n"); 367 } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) { 368 InstResult = ConstantExpr::getInsertValue( 369 getVal(IVI->getAggregateOperand()), 370 getVal(IVI->getInsertedValueOperand()), IVI->getIndices()); 371 LLVM_DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " 372 << *InstResult << "\n"); 373 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 374 Constant *P = getVal(GEP->getOperand(0)); 375 SmallVector<Constant*, 8> GEPOps; 376 for (Use &Op : llvm::drop_begin(GEP->operands())) 377 GEPOps.push_back(getVal(Op)); 378 InstResult = 379 ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps, 380 cast<GEPOperator>(GEP)->isInBounds()); 381 LLVM_DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult << "\n"); 382 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 383 if (!LI->isSimple()) { 384 LLVM_DEBUG( 385 dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); 386 return false; // no volatile/atomic accesses. 387 } 388 389 Constant *Ptr = getVal(LI->getOperand(0)); 390 Constant *FoldedPtr = ConstantFoldConstant(Ptr, DL, TLI); 391 if (Ptr != FoldedPtr) { 392 Ptr = FoldedPtr; 393 LLVM_DEBUG(dbgs() << "Found a constant pointer expression, constant " 394 "folding: " 395 << *Ptr << "\n"); 396 } 397 InstResult = ComputeLoadResult(Ptr, LI->getType()); 398 if (!InstResult) { 399 LLVM_DEBUG( 400 dbgs() << "Failed to compute load result. Can not evaluate load." 401 "\n"); 402 return false; // Could not evaluate load. 403 } 404 405 LLVM_DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); 406 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 407 if (AI->isArrayAllocation()) { 408 LLVM_DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); 409 return false; // Cannot handle array allocs. 410 } 411 Type *Ty = AI->getAllocatedType(); 412 AllocaTmps.push_back(std::make_unique<GlobalVariable>( 413 Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty), 414 AI->getName(), /*TLMode=*/GlobalValue::NotThreadLocal, 415 AI->getType()->getPointerAddressSpace())); 416 InstResult = AllocaTmps.back().get(); 417 LLVM_DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); 418 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { 419 CallBase &CB = *cast<CallBase>(&*CurInst); 420 421 // Debug info can safely be ignored here. 422 if (isa<DbgInfoIntrinsic>(CB)) { 423 LLVM_DEBUG(dbgs() << "Ignoring debug info.\n"); 424 ++CurInst; 425 continue; 426 } 427 428 // Cannot handle inline asm. 429 if (CB.isInlineAsm()) { 430 LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); 431 return false; 432 } 433 434 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CB)) { 435 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { 436 if (MSI->isVolatile()) { 437 LLVM_DEBUG(dbgs() << "Can not optimize a volatile memset " 438 << "intrinsic.\n"); 439 return false; 440 } 441 Constant *Ptr = getVal(MSI->getDest()); 442 Constant *Val = getVal(MSI->getValue()); 443 Constant *DestVal = 444 ComputeLoadResult(getVal(Ptr), MSI->getValue()->getType()); 445 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 446 // This memset is a no-op. 447 LLVM_DEBUG(dbgs() << "Ignoring no-op memset.\n"); 448 ++CurInst; 449 continue; 450 } 451 } 452 453 if (II->isLifetimeStartOrEnd()) { 454 LLVM_DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); 455 ++CurInst; 456 continue; 457 } 458 459 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 460 // We don't insert an entry into Values, as it doesn't have a 461 // meaningful return value. 462 if (!II->use_empty()) { 463 LLVM_DEBUG(dbgs() 464 << "Found unused invariant_start. Can't evaluate.\n"); 465 return false; 466 } 467 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); 468 Value *PtrArg = getVal(II->getArgOperand(1)); 469 Value *Ptr = PtrArg->stripPointerCasts(); 470 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { 471 Type *ElemTy = GV->getValueType(); 472 if (!Size->isMinusOne() && 473 Size->getValue().getLimitedValue() >= 474 DL.getTypeStoreSize(ElemTy)) { 475 Invariants.insert(GV); 476 LLVM_DEBUG(dbgs() << "Found a global var that is an invariant: " 477 << *GV << "\n"); 478 } else { 479 LLVM_DEBUG(dbgs() 480 << "Found a global var, but can not treat it as an " 481 "invariant.\n"); 482 } 483 } 484 // Continue even if we do nothing. 485 ++CurInst; 486 continue; 487 } else if (II->getIntrinsicID() == Intrinsic::assume) { 488 LLVM_DEBUG(dbgs() << "Skipping assume intrinsic.\n"); 489 ++CurInst; 490 continue; 491 } else if (II->getIntrinsicID() == Intrinsic::sideeffect) { 492 LLVM_DEBUG(dbgs() << "Skipping sideeffect intrinsic.\n"); 493 ++CurInst; 494 continue; 495 } else if (II->getIntrinsicID() == Intrinsic::pseudoprobe) { 496 LLVM_DEBUG(dbgs() << "Skipping pseudoprobe intrinsic.\n"); 497 ++CurInst; 498 continue; 499 } else { 500 Value *Stripped = CurInst->stripPointerCastsForAliasAnalysis(); 501 // Only attempt to getVal() if we've actually managed to strip 502 // anything away, or else we'll call getVal() on the current 503 // instruction. 504 if (Stripped != &*CurInst) { 505 InstResult = getVal(Stripped); 506 } 507 if (InstResult) { 508 LLVM_DEBUG(dbgs() 509 << "Stripped pointer casts for alias analysis for " 510 "intrinsic call.\n"); 511 StrippedPointerCastsForAliasAnalysis = true; 512 InstResult = ConstantExpr::getBitCast(InstResult, II->getType()); 513 } else { 514 LLVM_DEBUG(dbgs() << "Unknown intrinsic. Cannot evaluate.\n"); 515 return false; 516 } 517 } 518 } 519 520 if (!InstResult) { 521 // Resolve function pointers. 522 SmallVector<Constant *, 8> Formals; 523 Function *Callee = getCalleeWithFormalArgs(CB, Formals); 524 if (!Callee || Callee->isInterposable()) { 525 LLVM_DEBUG(dbgs() << "Can not resolve function pointer.\n"); 526 return false; // Cannot resolve. 527 } 528 529 if (Callee->isDeclaration()) { 530 // If this is a function we can constant fold, do it. 531 if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) { 532 InstResult = castCallResultIfNeeded(CB.getType(), C); 533 if (!InstResult) 534 return false; 535 LLVM_DEBUG(dbgs() << "Constant folded function call. Result: " 536 << *InstResult << "\n"); 537 } else { 538 LLVM_DEBUG(dbgs() << "Can not constant fold function call.\n"); 539 return false; 540 } 541 } else { 542 if (Callee->getFunctionType()->isVarArg()) { 543 LLVM_DEBUG(dbgs() 544 << "Can not constant fold vararg function call.\n"); 545 return false; 546 } 547 548 Constant *RetVal = nullptr; 549 // Execute the call, if successful, use the return value. 550 ValueStack.emplace_back(); 551 if (!EvaluateFunction(Callee, RetVal, Formals)) { 552 LLVM_DEBUG(dbgs() << "Failed to evaluate function.\n"); 553 return false; 554 } 555 ValueStack.pop_back(); 556 InstResult = castCallResultIfNeeded(CB.getType(), RetVal); 557 if (RetVal && !InstResult) 558 return false; 559 560 if (InstResult) { 561 LLVM_DEBUG(dbgs() << "Successfully evaluated function. Result: " 562 << *InstResult << "\n\n"); 563 } else { 564 LLVM_DEBUG(dbgs() 565 << "Successfully evaluated function. Result: 0\n\n"); 566 } 567 } 568 } 569 } else if (CurInst->isTerminator()) { 570 LLVM_DEBUG(dbgs() << "Found a terminator instruction.\n"); 571 572 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 573 if (BI->isUnconditional()) { 574 NextBB = BI->getSuccessor(0); 575 } else { 576 ConstantInt *Cond = 577 dyn_cast<ConstantInt>(getVal(BI->getCondition())); 578 if (!Cond) return false; // Cannot determine. 579 580 NextBB = BI->getSuccessor(!Cond->getZExtValue()); 581 } 582 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 583 ConstantInt *Val = 584 dyn_cast<ConstantInt>(getVal(SI->getCondition())); 585 if (!Val) return false; // Cannot determine. 586 NextBB = SI->findCaseValue(Val)->getCaseSuccessor(); 587 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 588 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); 589 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 590 NextBB = BA->getBasicBlock(); 591 else 592 return false; // Cannot determine. 593 } else if (isa<ReturnInst>(CurInst)) { 594 NextBB = nullptr; 595 } else { 596 // invoke, unwind, resume, unreachable. 597 LLVM_DEBUG(dbgs() << "Can not handle terminator."); 598 return false; // Cannot handle this terminator. 599 } 600 601 // We succeeded at evaluating this block! 602 LLVM_DEBUG(dbgs() << "Successfully evaluated block.\n"); 603 return true; 604 } else { 605 // Did not know how to evaluate this! 606 LLVM_DEBUG( 607 dbgs() << "Failed to evaluate block due to unhandled instruction." 608 "\n"); 609 return false; 610 } 611 612 if (!CurInst->use_empty()) { 613 InstResult = ConstantFoldConstant(InstResult, DL, TLI); 614 setVal(&*CurInst, InstResult); 615 } 616 617 // If we just processed an invoke, we finished evaluating the block. 618 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { 619 NextBB = II->getNormalDest(); 620 LLVM_DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); 621 return true; 622 } 623 624 // Advance program counter. 625 ++CurInst; 626 } 627 } 628 629 /// Evaluate a call to function F, returning true if successful, false if we 630 /// can't evaluate it. ActualArgs contains the formal arguments for the 631 /// function. 632 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, 633 const SmallVectorImpl<Constant*> &ActualArgs) { 634 // Check to see if this function is already executing (recursion). If so, 635 // bail out. TODO: we might want to accept limited recursion. 636 if (is_contained(CallStack, F)) 637 return false; 638 639 CallStack.push_back(F); 640 641 // Initialize arguments to the incoming values specified. 642 unsigned ArgNo = 0; 643 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 644 ++AI, ++ArgNo) 645 setVal(&*AI, ActualArgs[ArgNo]); 646 647 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 648 // we can only evaluate any one basic block at most once. This set keeps 649 // track of what we have executed so we can detect recursive cases etc. 650 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 651 652 // CurBB - The current basic block we're evaluating. 653 BasicBlock *CurBB = &F->front(); 654 655 BasicBlock::iterator CurInst = CurBB->begin(); 656 657 while (true) { 658 BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings. 659 LLVM_DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); 660 661 bool StrippedPointerCastsForAliasAnalysis = false; 662 663 if (!EvaluateBlock(CurInst, NextBB, StrippedPointerCastsForAliasAnalysis)) 664 return false; 665 666 if (!NextBB) { 667 // Successfully running until there's no next block means that we found 668 // the return. Fill it the return value and pop the call stack. 669 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); 670 if (RI->getNumOperands()) { 671 // The Evaluator can look through pointer casts as long as alias 672 // analysis holds because it's just a simple interpreter and doesn't 673 // skip memory accesses due to invariant group metadata, but we can't 674 // let users of Evaluator use a value that's been gleaned looking 675 // through stripping pointer casts. 676 if (StrippedPointerCastsForAliasAnalysis && 677 !RI->getReturnValue()->getType()->isVoidTy()) { 678 return false; 679 } 680 RetVal = getVal(RI->getOperand(0)); 681 } 682 CallStack.pop_back(); 683 return true; 684 } 685 686 // Okay, we succeeded in evaluating this control flow. See if we have 687 // executed the new block before. If so, we have a looping function, 688 // which we cannot evaluate in reasonable time. 689 if (!ExecutedBlocks.insert(NextBB).second) 690 return false; // looped! 691 692 // Okay, we have never been in this block before. Check to see if there 693 // are any PHI nodes. If so, evaluate them with information about where 694 // we came from. 695 PHINode *PN = nullptr; 696 for (CurInst = NextBB->begin(); 697 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 698 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); 699 700 // Advance to the next block. 701 CurBB = NextBB; 702 } 703 } 704