1 //===- InferAddressSpace.cpp - --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // CUDA C/C++ includes memory space designation as variable type qualifers (such 10 // as __global__ and __shared__). Knowing the space of a memory access allows 11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from 12 // shared memory can be translated to `ld.shared` which is roughly 10% faster 13 // than a generic `ld` on an NVIDIA Tesla K40c. 14 // 15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA 16 // compilers must infer the memory space of an address expression from 17 // type-qualified variables. 18 // 19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory 20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend 21 // places only type-qualified variables in specific address spaces, and then 22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0) 23 // (so-called the generic address space) for other instructions to use. 24 // 25 // For example, the Clang translates the following CUDA code 26 // __shared__ float a[10]; 27 // float v = a[i]; 28 // to 29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]* 30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i 31 // %v = load float, float* %1 ; emits ld.f32 32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is 33 // redirected to %0 (the generic version of @a). 34 // 35 // The optimization implemented in this file propagates specific address spaces 36 // from type-qualified variable declarations to its users. For example, it 37 // optimizes the above IR to 38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i 39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32 40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX 41 // codegen is able to emit ld.shared.f32 for %v. 42 // 43 // Address space inference works in two steps. First, it uses a data-flow 44 // analysis to infer as many generic pointers as possible to point to only one 45 // specific address space. In the above example, it can prove that %1 only 46 // points to addrspace(3). This algorithm was published in 47 // CUDA: Compiling and optimizing for a GPU platform 48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang 49 // ICCS 2012 50 // 51 // Then, address space inference replaces all refinable generic pointers with 52 // equivalent specific pointers. 53 // 54 // The major challenge of implementing this optimization is handling PHINodes, 55 // which may create loops in the data flow graph. This brings two complications. 56 // 57 // First, the data flow analysis in Step 1 needs to be circular. For example, 58 // %generic.input = addrspacecast float addrspace(3)* %input to float* 59 // loop: 60 // %y = phi [ %generic.input, %y2 ] 61 // %y2 = getelementptr %y, 1 62 // %v = load %y2 63 // br ..., label %loop, ... 64 // proving %y specific requires proving both %generic.input and %y2 specific, 65 // but proving %y2 specific circles back to %y. To address this complication, 66 // the data flow analysis operates on a lattice: 67 // uninitialized > specific address spaces > generic. 68 // All address expressions (our implementation only considers phi, bitcast, 69 // addrspacecast, and getelementptr) start with the uninitialized address space. 70 // The monotone transfer function moves the address space of a pointer down a 71 // lattice path from uninitialized to specific and then to generic. A join 72 // operation of two different specific address spaces pushes the expression down 73 // to the generic address space. The analysis completes once it reaches a fixed 74 // point. 75 // 76 // Second, IR rewriting in Step 2 also needs to be circular. For example, 77 // converting %y to addrspace(3) requires the compiler to know the converted 78 // %y2, but converting %y2 needs the converted %y. To address this complication, 79 // we break these cycles using "undef" placeholders. When converting an 80 // instruction `I` to a new address space, if its operand `Op` is not converted 81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later. 82 // For instance, our algorithm first converts %y to 83 // %y' = phi float addrspace(3)* [ %input, undef ] 84 // Then, it converts %y2 to 85 // %y2' = getelementptr %y', 1 86 // Finally, it fixes the undef in %y' so that 87 // %y' = phi float addrspace(3)* [ %input, %y2' ] 88 // 89 //===----------------------------------------------------------------------===// 90 91 #include "llvm/Transforms/Scalar/InferAddressSpaces.h" 92 #include "llvm/ADT/ArrayRef.h" 93 #include "llvm/ADT/DenseMap.h" 94 #include "llvm/ADT/DenseSet.h" 95 #include "llvm/ADT/None.h" 96 #include "llvm/ADT/Optional.h" 97 #include "llvm/ADT/SetVector.h" 98 #include "llvm/ADT/SmallVector.h" 99 #include "llvm/Analysis/AssumptionCache.h" 100 #include "llvm/Analysis/TargetTransformInfo.h" 101 #include "llvm/Analysis/ValueTracking.h" 102 #include "llvm/IR/BasicBlock.h" 103 #include "llvm/IR/Constant.h" 104 #include "llvm/IR/Constants.h" 105 #include "llvm/IR/Dominators.h" 106 #include "llvm/IR/Function.h" 107 #include "llvm/IR/IRBuilder.h" 108 #include "llvm/IR/InstIterator.h" 109 #include "llvm/IR/Instruction.h" 110 #include "llvm/IR/Instructions.h" 111 #include "llvm/IR/IntrinsicInst.h" 112 #include "llvm/IR/Intrinsics.h" 113 #include "llvm/IR/LLVMContext.h" 114 #include "llvm/IR/Operator.h" 115 #include "llvm/IR/PassManager.h" 116 #include "llvm/IR/Type.h" 117 #include "llvm/IR/Use.h" 118 #include "llvm/IR/User.h" 119 #include "llvm/IR/Value.h" 120 #include "llvm/IR/ValueHandle.h" 121 #include "llvm/InitializePasses.h" 122 #include "llvm/Pass.h" 123 #include "llvm/Support/Casting.h" 124 #include "llvm/Support/CommandLine.h" 125 #include "llvm/Support/Compiler.h" 126 #include "llvm/Support/Debug.h" 127 #include "llvm/Support/ErrorHandling.h" 128 #include "llvm/Support/raw_ostream.h" 129 #include "llvm/Transforms/Scalar.h" 130 #include "llvm/Transforms/Utils/Local.h" 131 #include "llvm/Transforms/Utils/ValueMapper.h" 132 #include <cassert> 133 #include <iterator> 134 #include <limits> 135 #include <utility> 136 #include <vector> 137 138 #define DEBUG_TYPE "infer-address-spaces" 139 140 using namespace llvm; 141 142 static cl::opt<bool> AssumeDefaultIsFlatAddressSpace( 143 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden, 144 cl::desc("The default address space is assumed as the flat address space. " 145 "This is mainly for test purpose.")); 146 147 static const unsigned UninitializedAddressSpace = 148 std::numeric_limits<unsigned>::max(); 149 150 namespace { 151 152 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>; 153 // Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on 154 // the *def* of a value, PredicatedAddrSpaceMapTy is map where a new 155 // addrspace is inferred on the *use* of a pointer. This map is introduced to 156 // infer addrspace from the addrspace predicate assumption built from assume 157 // intrinsic. In that scenario, only specific uses (under valid assumption 158 // context) could be inferred with a new addrspace. 159 using PredicatedAddrSpaceMapTy = 160 DenseMap<std::pair<const Value *, const Value *>, unsigned>; 161 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>; 162 163 class InferAddressSpaces : public FunctionPass { 164 unsigned FlatAddrSpace = 0; 165 166 public: 167 static char ID; 168 169 InferAddressSpaces() : 170 FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {} 171 InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {} 172 173 void getAnalysisUsage(AnalysisUsage &AU) const override { 174 AU.setPreservesCFG(); 175 AU.addPreserved<DominatorTreeWrapperPass>(); 176 AU.addRequired<AssumptionCacheTracker>(); 177 AU.addRequired<TargetTransformInfoWrapperPass>(); 178 } 179 180 bool runOnFunction(Function &F) override; 181 }; 182 183 class InferAddressSpacesImpl { 184 AssumptionCache &AC; 185 DominatorTree *DT = nullptr; 186 const TargetTransformInfo *TTI = nullptr; 187 const DataLayout *DL = nullptr; 188 189 /// Target specific address space which uses of should be replaced if 190 /// possible. 191 unsigned FlatAddrSpace = 0; 192 193 // Try to update the address space of V. If V is updated, returns true and 194 // false otherwise. 195 bool updateAddressSpace(const Value &V, 196 ValueToAddrSpaceMapTy &InferredAddrSpace, 197 PredicatedAddrSpaceMapTy &PredicatedAS) const; 198 199 // Tries to infer the specific address space of each address expression in 200 // Postorder. 201 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder, 202 ValueToAddrSpaceMapTy &InferredAddrSpace, 203 PredicatedAddrSpaceMapTy &PredicatedAS) const; 204 205 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const; 206 207 Value *cloneInstructionWithNewAddressSpace( 208 Instruction *I, unsigned NewAddrSpace, 209 const ValueToValueMapTy &ValueWithNewAddrSpace, 210 const PredicatedAddrSpaceMapTy &PredicatedAS, 211 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 212 213 // Changes the flat address expressions in function F to point to specific 214 // address spaces if InferredAddrSpace says so. Postorder is the postorder of 215 // all flat expressions in the use-def graph of function F. 216 bool rewriteWithNewAddressSpaces( 217 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 218 const ValueToAddrSpaceMapTy &InferredAddrSpace, 219 const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const; 220 221 void appendsFlatAddressExpressionToPostorderStack( 222 Value *V, PostorderStackTy &PostorderStack, 223 DenseSet<Value *> &Visited) const; 224 225 bool rewriteIntrinsicOperands(IntrinsicInst *II, 226 Value *OldV, Value *NewV) const; 227 void collectRewritableIntrinsicOperands(IntrinsicInst *II, 228 PostorderStackTy &PostorderStack, 229 DenseSet<Value *> &Visited) const; 230 231 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const; 232 233 Value *cloneValueWithNewAddressSpace( 234 Value *V, unsigned NewAddrSpace, 235 const ValueToValueMapTy &ValueWithNewAddrSpace, 236 const PredicatedAddrSpaceMapTy &PredicatedAS, 237 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 238 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const; 239 240 unsigned getPredicatedAddrSpace(const Value &V, Value *Opnd) const; 241 242 public: 243 InferAddressSpacesImpl(AssumptionCache &AC, DominatorTree *DT, 244 const TargetTransformInfo *TTI, unsigned FlatAddrSpace) 245 : AC(AC), DT(DT), TTI(TTI), FlatAddrSpace(FlatAddrSpace) {} 246 bool run(Function &F); 247 }; 248 249 } // end anonymous namespace 250 251 char InferAddressSpaces::ID = 0; 252 253 namespace llvm { 254 255 void initializeInferAddressSpacesPass(PassRegistry &); 256 257 } // end namespace llvm 258 259 INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", 260 false, false) 261 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 262 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 263 INITIALIZE_PASS_END(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", 264 false, false) 265 266 // Check whether that's no-op pointer bicast using a pair of 267 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over 268 // different address spaces. 269 static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL, 270 const TargetTransformInfo *TTI) { 271 assert(I2P->getOpcode() == Instruction::IntToPtr); 272 auto *P2I = dyn_cast<Operator>(I2P->getOperand(0)); 273 if (!P2I || P2I->getOpcode() != Instruction::PtrToInt) 274 return false; 275 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a 276 // no-op cast. Besides checking both of them are no-op casts, as the 277 // reinterpreted pointer may be used in other pointer arithmetic, we also 278 // need to double-check that through the target-specific hook. That ensures 279 // the underlying target also agrees that's a no-op address space cast and 280 // pointer bits are preserved. 281 // The current IR spec doesn't have clear rules on address space casts, 282 // especially a clear definition for pointer bits in non-default address 283 // spaces. It would be undefined if that pointer is dereferenced after an 284 // invalid reinterpret cast. Also, due to the unclearness for the meaning of 285 // bits in non-default address spaces in the current spec, the pointer 286 // arithmetic may also be undefined after invalid pointer reinterpret cast. 287 // However, as we confirm through the target hooks that it's a no-op 288 // addrspacecast, it doesn't matter since the bits should be the same. 289 return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()), 290 I2P->getOperand(0)->getType(), I2P->getType(), 291 DL) && 292 CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()), 293 P2I->getOperand(0)->getType(), P2I->getType(), 294 DL) && 295 TTI->isNoopAddrSpaceCast( 296 P2I->getOperand(0)->getType()->getPointerAddressSpace(), 297 I2P->getType()->getPointerAddressSpace()); 298 } 299 300 // Returns true if V is an address expression. 301 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and 302 // getelementptr operators. 303 static bool isAddressExpression(const Value &V, const DataLayout &DL, 304 const TargetTransformInfo *TTI) { 305 const Operator *Op = dyn_cast<Operator>(&V); 306 if (!Op) 307 return false; 308 309 switch (Op->getOpcode()) { 310 case Instruction::PHI: 311 assert(Op->getType()->isPointerTy()); 312 return true; 313 case Instruction::BitCast: 314 case Instruction::AddrSpaceCast: 315 case Instruction::GetElementPtr: 316 return true; 317 case Instruction::Select: 318 return Op->getType()->isPointerTy(); 319 case Instruction::Call: { 320 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V); 321 return II && II->getIntrinsicID() == Intrinsic::ptrmask; 322 } 323 case Instruction::IntToPtr: 324 return isNoopPtrIntCastPair(Op, DL, TTI); 325 default: 326 // That value is an address expression if it has an assumed address space. 327 return TTI->getAssumedAddrSpace(&V) != UninitializedAddressSpace; 328 } 329 } 330 331 // Returns the pointer operands of V. 332 // 333 // Precondition: V is an address expression. 334 static SmallVector<Value *, 2> 335 getPointerOperands(const Value &V, const DataLayout &DL, 336 const TargetTransformInfo *TTI) { 337 const Operator &Op = cast<Operator>(V); 338 switch (Op.getOpcode()) { 339 case Instruction::PHI: { 340 auto IncomingValues = cast<PHINode>(Op).incoming_values(); 341 return SmallVector<Value *, 2>(IncomingValues.begin(), 342 IncomingValues.end()); 343 } 344 case Instruction::BitCast: 345 case Instruction::AddrSpaceCast: 346 case Instruction::GetElementPtr: 347 return {Op.getOperand(0)}; 348 case Instruction::Select: 349 return {Op.getOperand(1), Op.getOperand(2)}; 350 case Instruction::Call: { 351 const IntrinsicInst &II = cast<IntrinsicInst>(Op); 352 assert(II.getIntrinsicID() == Intrinsic::ptrmask && 353 "unexpected intrinsic call"); 354 return {II.getArgOperand(0)}; 355 } 356 case Instruction::IntToPtr: { 357 assert(isNoopPtrIntCastPair(&Op, DL, TTI)); 358 auto *P2I = cast<Operator>(Op.getOperand(0)); 359 return {P2I->getOperand(0)}; 360 } 361 default: 362 llvm_unreachable("Unexpected instruction type."); 363 } 364 } 365 366 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II, 367 Value *OldV, 368 Value *NewV) const { 369 Module *M = II->getParent()->getParent()->getParent(); 370 371 switch (II->getIntrinsicID()) { 372 case Intrinsic::objectsize: { 373 Type *DestTy = II->getType(); 374 Type *SrcTy = NewV->getType(); 375 Function *NewDecl = 376 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy}); 377 II->setArgOperand(0, NewV); 378 II->setCalledFunction(NewDecl); 379 return true; 380 } 381 case Intrinsic::ptrmask: 382 // This is handled as an address expression, not as a use memory operation. 383 return false; 384 default: { 385 Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 386 if (!Rewrite) 387 return false; 388 if (Rewrite != II) 389 II->replaceAllUsesWith(Rewrite); 390 return true; 391 } 392 } 393 } 394 395 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands( 396 IntrinsicInst *II, PostorderStackTy &PostorderStack, 397 DenseSet<Value *> &Visited) const { 398 auto IID = II->getIntrinsicID(); 399 switch (IID) { 400 case Intrinsic::ptrmask: 401 case Intrinsic::objectsize: 402 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0), 403 PostorderStack, Visited); 404 break; 405 default: 406 SmallVector<int, 2> OpIndexes; 407 if (TTI->collectFlatAddressOperands(OpIndexes, IID)) { 408 for (int Idx : OpIndexes) { 409 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx), 410 PostorderStack, Visited); 411 } 412 } 413 break; 414 } 415 } 416 417 // Returns all flat address expressions in function F. The elements are 418 // If V is an unvisited flat address expression, appends V to PostorderStack 419 // and marks it as visited. 420 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack( 421 Value *V, PostorderStackTy &PostorderStack, 422 DenseSet<Value *> &Visited) const { 423 assert(V->getType()->isPointerTy()); 424 425 // Generic addressing expressions may be hidden in nested constant 426 // expressions. 427 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 428 // TODO: Look in non-address parts, like icmp operands. 429 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 430 PostorderStack.emplace_back(CE, false); 431 432 return; 433 } 434 435 if (V->getType()->getPointerAddressSpace() == FlatAddrSpace && 436 isAddressExpression(*V, *DL, TTI)) { 437 if (Visited.insert(V).second) { 438 PostorderStack.emplace_back(V, false); 439 440 Operator *Op = cast<Operator>(V); 441 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) { 442 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) { 443 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 444 PostorderStack.emplace_back(CE, false); 445 } 446 } 447 } 448 } 449 } 450 451 // Returns all flat address expressions in function F. The elements are ordered 452 // ordered in postorder. 453 std::vector<WeakTrackingVH> 454 InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const { 455 // This function implements a non-recursive postorder traversal of a partial 456 // use-def graph of function F. 457 PostorderStackTy PostorderStack; 458 // The set of visited expressions. 459 DenseSet<Value *> Visited; 460 461 auto PushPtrOperand = [&](Value *Ptr) { 462 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack, 463 Visited); 464 }; 465 466 // Look at operations that may be interesting accelerate by moving to a known 467 // address space. We aim at generating after loads and stores, but pure 468 // addressing calculations may also be faster. 469 for (Instruction &I : instructions(F)) { 470 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 471 if (!GEP->getType()->isVectorTy()) 472 PushPtrOperand(GEP->getPointerOperand()); 473 } else if (auto *LI = dyn_cast<LoadInst>(&I)) 474 PushPtrOperand(LI->getPointerOperand()); 475 else if (auto *SI = dyn_cast<StoreInst>(&I)) 476 PushPtrOperand(SI->getPointerOperand()); 477 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) 478 PushPtrOperand(RMW->getPointerOperand()); 479 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) 480 PushPtrOperand(CmpX->getPointerOperand()); 481 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) { 482 // For memset/memcpy/memmove, any pointer operand can be replaced. 483 PushPtrOperand(MI->getRawDest()); 484 485 // Handle 2nd operand for memcpy/memmove. 486 if (auto *MTI = dyn_cast<MemTransferInst>(MI)) 487 PushPtrOperand(MTI->getRawSource()); 488 } else if (auto *II = dyn_cast<IntrinsicInst>(&I)) 489 collectRewritableIntrinsicOperands(II, PostorderStack, Visited); 490 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) { 491 // FIXME: Handle vectors of pointers 492 if (Cmp->getOperand(0)->getType()->isPointerTy()) { 493 PushPtrOperand(Cmp->getOperand(0)); 494 PushPtrOperand(Cmp->getOperand(1)); 495 } 496 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) { 497 if (!ASC->getType()->isVectorTy()) 498 PushPtrOperand(ASC->getPointerOperand()); 499 } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) { 500 if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI)) 501 PushPtrOperand( 502 cast<Operator>(I2P->getOperand(0))->getOperand(0)); 503 } 504 } 505 506 std::vector<WeakTrackingVH> Postorder; // The resultant postorder. 507 while (!PostorderStack.empty()) { 508 Value *TopVal = PostorderStack.back().getPointer(); 509 // If the operands of the expression on the top are already explored, 510 // adds that expression to the resultant postorder. 511 if (PostorderStack.back().getInt()) { 512 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace) 513 Postorder.push_back(TopVal); 514 PostorderStack.pop_back(); 515 continue; 516 } 517 // Otherwise, adds its operands to the stack and explores them. 518 PostorderStack.back().setInt(true); 519 // Skip values with an assumed address space. 520 if (TTI->getAssumedAddrSpace(TopVal) == UninitializedAddressSpace) { 521 for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) { 522 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack, 523 Visited); 524 } 525 } 526 } 527 return Postorder; 528 } 529 530 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone 531 // of OperandUse.get() in the new address space. If the clone is not ready yet, 532 // returns an undef in the new address space as a placeholder. 533 static Value *operandWithNewAddressSpaceOrCreateUndef( 534 const Use &OperandUse, unsigned NewAddrSpace, 535 const ValueToValueMapTy &ValueWithNewAddrSpace, 536 const PredicatedAddrSpaceMapTy &PredicatedAS, 537 SmallVectorImpl<const Use *> *UndefUsesToFix) { 538 Value *Operand = OperandUse.get(); 539 540 Type *NewPtrTy = PointerType::getWithSamePointeeType( 541 cast<PointerType>(Operand->getType()), NewAddrSpace); 542 543 if (Constant *C = dyn_cast<Constant>(Operand)) 544 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy); 545 546 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) 547 return NewOperand; 548 549 Instruction *Inst = cast<Instruction>(OperandUse.getUser()); 550 auto I = PredicatedAS.find(std::make_pair(Inst, Operand)); 551 if (I != PredicatedAS.end()) { 552 // Insert an addrspacecast on that operand before the user. 553 unsigned NewAS = I->second; 554 Type *NewPtrTy = PointerType::getWithSamePointeeType( 555 cast<PointerType>(Operand->getType()), NewAS); 556 auto *NewI = new AddrSpaceCastInst(Operand, NewPtrTy); 557 NewI->insertBefore(Inst); 558 return NewI; 559 } 560 561 UndefUsesToFix->push_back(&OperandUse); 562 return UndefValue::get(NewPtrTy); 563 } 564 565 // Returns a clone of `I` with its operands converted to those specified in 566 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an 567 // operand whose address space needs to be modified might not exist in 568 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and 569 // adds that operand use to UndefUsesToFix so that caller can fix them later. 570 // 571 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast 572 // from a pointer whose type already matches. Therefore, this function returns a 573 // Value* instead of an Instruction*. 574 // 575 // This may also return nullptr in the case the instruction could not be 576 // rewritten. 577 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace( 578 Instruction *I, unsigned NewAddrSpace, 579 const ValueToValueMapTy &ValueWithNewAddrSpace, 580 const PredicatedAddrSpaceMapTy &PredicatedAS, 581 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 582 Type *NewPtrType = PointerType::getWithSamePointeeType( 583 cast<PointerType>(I->getType()), NewAddrSpace); 584 585 if (I->getOpcode() == Instruction::AddrSpaceCast) { 586 Value *Src = I->getOperand(0); 587 // Because `I` is flat, the source address space must be specific. 588 // Therefore, the inferred address space must be the source space, according 589 // to our algorithm. 590 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 591 if (Src->getType() != NewPtrType) 592 return new BitCastInst(Src, NewPtrType); 593 return Src; 594 } 595 596 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 597 // Technically the intrinsic ID is a pointer typed argument, so specially 598 // handle calls early. 599 assert(II->getIntrinsicID() == Intrinsic::ptrmask); 600 Value *NewPtr = operandWithNewAddressSpaceOrCreateUndef( 601 II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace, 602 PredicatedAS, UndefUsesToFix); 603 Value *Rewrite = 604 TTI->rewriteIntrinsicWithAddressSpace(II, II->getArgOperand(0), NewPtr); 605 if (Rewrite) { 606 assert(Rewrite != II && "cannot modify this pointer operation in place"); 607 return Rewrite; 608 } 609 610 return nullptr; 611 } 612 613 unsigned AS = TTI->getAssumedAddrSpace(I); 614 if (AS != UninitializedAddressSpace) { 615 // For the assumed address space, insert an `addrspacecast` to make that 616 // explicit. 617 Type *NewPtrTy = PointerType::getWithSamePointeeType( 618 cast<PointerType>(I->getType()), AS); 619 auto *NewI = new AddrSpaceCastInst(I, NewPtrTy); 620 NewI->insertAfter(I); 621 return NewI; 622 } 623 624 // Computes the converted pointer operands. 625 SmallVector<Value *, 4> NewPointerOperands; 626 for (const Use &OperandUse : I->operands()) { 627 if (!OperandUse.get()->getType()->isPointerTy()) 628 NewPointerOperands.push_back(nullptr); 629 else 630 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef( 631 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, 632 UndefUsesToFix)); 633 } 634 635 switch (I->getOpcode()) { 636 case Instruction::BitCast: 637 return new BitCastInst(NewPointerOperands[0], NewPtrType); 638 case Instruction::PHI: { 639 assert(I->getType()->isPointerTy()); 640 PHINode *PHI = cast<PHINode>(I); 641 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues()); 642 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) { 643 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index); 644 NewPHI->addIncoming(NewPointerOperands[OperandNo], 645 PHI->getIncomingBlock(Index)); 646 } 647 return NewPHI; 648 } 649 case Instruction::GetElementPtr: { 650 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); 651 GetElementPtrInst *NewGEP = GetElementPtrInst::Create( 652 GEP->getSourceElementType(), NewPointerOperands[0], 653 SmallVector<Value *, 4>(GEP->indices())); 654 NewGEP->setIsInBounds(GEP->isInBounds()); 655 return NewGEP; 656 } 657 case Instruction::Select: 658 assert(I->getType()->isPointerTy()); 659 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1], 660 NewPointerOperands[2], "", nullptr, I); 661 case Instruction::IntToPtr: { 662 assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI)); 663 Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0); 664 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 665 if (Src->getType() != NewPtrType) 666 return new BitCastInst(Src, NewPtrType); 667 return Src; 668 } 669 default: 670 llvm_unreachable("Unexpected opcode"); 671 } 672 } 673 674 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the 675 // constant expression `CE` with its operands replaced as specified in 676 // ValueWithNewAddrSpace. 677 static Value *cloneConstantExprWithNewAddressSpace( 678 ConstantExpr *CE, unsigned NewAddrSpace, 679 const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL, 680 const TargetTransformInfo *TTI) { 681 Type *TargetType = CE->getType()->isPointerTy() 682 ? PointerType::getWithSamePointeeType( 683 cast<PointerType>(CE->getType()), NewAddrSpace) 684 : CE->getType(); 685 686 if (CE->getOpcode() == Instruction::AddrSpaceCast) { 687 // Because CE is flat, the source address space must be specific. 688 // Therefore, the inferred address space must be the source space according 689 // to our algorithm. 690 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() == 691 NewAddrSpace); 692 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType); 693 } 694 695 if (CE->getOpcode() == Instruction::BitCast) { 696 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0))) 697 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType); 698 return ConstantExpr::getAddrSpaceCast(CE, TargetType); 699 } 700 701 if (CE->getOpcode() == Instruction::Select) { 702 Constant *Src0 = CE->getOperand(1); 703 Constant *Src1 = CE->getOperand(2); 704 if (Src0->getType()->getPointerAddressSpace() == 705 Src1->getType()->getPointerAddressSpace()) { 706 707 return ConstantExpr::getSelect( 708 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType), 709 ConstantExpr::getAddrSpaceCast(Src1, TargetType)); 710 } 711 } 712 713 if (CE->getOpcode() == Instruction::IntToPtr) { 714 assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI)); 715 Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0); 716 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 717 return ConstantExpr::getBitCast(Src, TargetType); 718 } 719 720 // Computes the operands of the new constant expression. 721 bool IsNew = false; 722 SmallVector<Constant *, 4> NewOperands; 723 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) { 724 Constant *Operand = CE->getOperand(Index); 725 // If the address space of `Operand` needs to be modified, the new operand 726 // with the new address space should already be in ValueWithNewAddrSpace 727 // because (1) the constant expressions we consider (i.e. addrspacecast, 728 // bitcast, and getelementptr) do not incur cycles in the data flow graph 729 // and (2) this function is called on constant expressions in postorder. 730 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) { 731 IsNew = true; 732 NewOperands.push_back(cast<Constant>(NewOperand)); 733 continue; 734 } 735 if (auto CExpr = dyn_cast<ConstantExpr>(Operand)) 736 if (Value *NewOperand = cloneConstantExprWithNewAddressSpace( 737 CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) { 738 IsNew = true; 739 NewOperands.push_back(cast<Constant>(NewOperand)); 740 continue; 741 } 742 // Otherwise, reuses the old operand. 743 NewOperands.push_back(Operand); 744 } 745 746 // If !IsNew, we will replace the Value with itself. However, replaced values 747 // are assumed to wrapped in a addrspace cast later so drop it now. 748 if (!IsNew) 749 return nullptr; 750 751 if (CE->getOpcode() == Instruction::GetElementPtr) { 752 // Needs to specify the source type while constructing a getelementptr 753 // constant expression. 754 return CE->getWithOperands(NewOperands, TargetType, /*OnlyIfReduced=*/false, 755 cast<GEPOperator>(CE)->getSourceElementType()); 756 } 757 758 return CE->getWithOperands(NewOperands, TargetType); 759 } 760 761 // Returns a clone of the value `V`, with its operands replaced as specified in 762 // ValueWithNewAddrSpace. This function is called on every flat address 763 // expression whose address space needs to be modified, in postorder. 764 // 765 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix. 766 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace( 767 Value *V, unsigned NewAddrSpace, 768 const ValueToValueMapTy &ValueWithNewAddrSpace, 769 const PredicatedAddrSpaceMapTy &PredicatedAS, 770 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 771 // All values in Postorder are flat address expressions. 772 assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace && 773 isAddressExpression(*V, *DL, TTI)); 774 775 if (Instruction *I = dyn_cast<Instruction>(V)) { 776 Value *NewV = cloneInstructionWithNewAddressSpace( 777 I, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, UndefUsesToFix); 778 if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) { 779 if (NewI->getParent() == nullptr) { 780 NewI->insertBefore(I); 781 NewI->takeName(I); 782 } 783 } 784 return NewV; 785 } 786 787 return cloneConstantExprWithNewAddressSpace( 788 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI); 789 } 790 791 // Defines the join operation on the address space lattice (see the file header 792 // comments). 793 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1, 794 unsigned AS2) const { 795 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace) 796 return FlatAddrSpace; 797 798 if (AS1 == UninitializedAddressSpace) 799 return AS2; 800 if (AS2 == UninitializedAddressSpace) 801 return AS1; 802 803 // The join of two different specific address spaces is flat. 804 return (AS1 == AS2) ? AS1 : FlatAddrSpace; 805 } 806 807 bool InferAddressSpacesImpl::run(Function &F) { 808 DL = &F.getParent()->getDataLayout(); 809 810 if (AssumeDefaultIsFlatAddressSpace) 811 FlatAddrSpace = 0; 812 813 if (FlatAddrSpace == UninitializedAddressSpace) { 814 FlatAddrSpace = TTI->getFlatAddressSpace(); 815 if (FlatAddrSpace == UninitializedAddressSpace) 816 return false; 817 } 818 819 // Collects all flat address expressions in postorder. 820 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F); 821 822 // Runs a data-flow analysis to refine the address spaces of every expression 823 // in Postorder. 824 ValueToAddrSpaceMapTy InferredAddrSpace; 825 PredicatedAddrSpaceMapTy PredicatedAS; 826 inferAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS); 827 828 // Changes the address spaces of the flat address expressions who are inferred 829 // to point to a specific address space. 830 return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace, 831 PredicatedAS, &F); 832 } 833 834 // Constants need to be tracked through RAUW to handle cases with nested 835 // constant expressions, so wrap values in WeakTrackingVH. 836 void InferAddressSpacesImpl::inferAddressSpaces( 837 ArrayRef<WeakTrackingVH> Postorder, 838 ValueToAddrSpaceMapTy &InferredAddrSpace, 839 PredicatedAddrSpaceMapTy &PredicatedAS) const { 840 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end()); 841 // Initially, all expressions are in the uninitialized address space. 842 for (Value *V : Postorder) 843 InferredAddrSpace[V] = UninitializedAddressSpace; 844 845 while (!Worklist.empty()) { 846 Value *V = Worklist.pop_back_val(); 847 848 // Try to update the address space of the stack top according to the 849 // address spaces of its operands. 850 if (!updateAddressSpace(*V, InferredAddrSpace, PredicatedAS)) 851 continue; 852 853 for (Value *User : V->users()) { 854 // Skip if User is already in the worklist. 855 if (Worklist.count(User)) 856 continue; 857 858 auto Pos = InferredAddrSpace.find(User); 859 // Our algorithm only updates the address spaces of flat address 860 // expressions, which are those in InferredAddrSpace. 861 if (Pos == InferredAddrSpace.end()) 862 continue; 863 864 // Function updateAddressSpace moves the address space down a lattice 865 // path. Therefore, nothing to do if User is already inferred as flat (the 866 // bottom element in the lattice). 867 if (Pos->second == FlatAddrSpace) 868 continue; 869 870 Worklist.insert(User); 871 } 872 } 873 } 874 875 unsigned InferAddressSpacesImpl::getPredicatedAddrSpace(const Value &V, 876 Value *Opnd) const { 877 const Instruction *I = dyn_cast<Instruction>(&V); 878 if (!I) 879 return UninitializedAddressSpace; 880 881 Opnd = Opnd->stripInBoundsOffsets(); 882 for (auto &AssumeVH : AC.assumptionsFor(Opnd)) { 883 if (!AssumeVH) 884 continue; 885 CallInst *CI = cast<CallInst>(AssumeVH); 886 if (!isValidAssumeForContext(CI, I, DT)) 887 continue; 888 889 const Value *Ptr; 890 unsigned AS; 891 std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(CI->getArgOperand(0)); 892 if (Ptr) 893 return AS; 894 } 895 896 return UninitializedAddressSpace; 897 } 898 899 bool InferAddressSpacesImpl::updateAddressSpace( 900 const Value &V, ValueToAddrSpaceMapTy &InferredAddrSpace, 901 PredicatedAddrSpaceMapTy &PredicatedAS) const { 902 assert(InferredAddrSpace.count(&V)); 903 904 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << V << '\n'); 905 906 // The new inferred address space equals the join of the address spaces 907 // of all its pointer operands. 908 unsigned NewAS = UninitializedAddressSpace; 909 910 const Operator &Op = cast<Operator>(V); 911 if (Op.getOpcode() == Instruction::Select) { 912 Value *Src0 = Op.getOperand(1); 913 Value *Src1 = Op.getOperand(2); 914 915 auto I = InferredAddrSpace.find(Src0); 916 unsigned Src0AS = (I != InferredAddrSpace.end()) ? 917 I->second : Src0->getType()->getPointerAddressSpace(); 918 919 auto J = InferredAddrSpace.find(Src1); 920 unsigned Src1AS = (J != InferredAddrSpace.end()) ? 921 J->second : Src1->getType()->getPointerAddressSpace(); 922 923 auto *C0 = dyn_cast<Constant>(Src0); 924 auto *C1 = dyn_cast<Constant>(Src1); 925 926 // If one of the inputs is a constant, we may be able to do a constant 927 // addrspacecast of it. Defer inferring the address space until the input 928 // address space is known. 929 if ((C1 && Src0AS == UninitializedAddressSpace) || 930 (C0 && Src1AS == UninitializedAddressSpace)) 931 return false; 932 933 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS)) 934 NewAS = Src1AS; 935 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS)) 936 NewAS = Src0AS; 937 else 938 NewAS = joinAddressSpaces(Src0AS, Src1AS); 939 } else { 940 unsigned AS = TTI->getAssumedAddrSpace(&V); 941 if (AS != UninitializedAddressSpace) { 942 // Use the assumed address space directly. 943 NewAS = AS; 944 } else { 945 // Otherwise, infer the address space from its pointer operands. 946 for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) { 947 auto I = InferredAddrSpace.find(PtrOperand); 948 unsigned OperandAS; 949 if (I == InferredAddrSpace.end()) { 950 OperandAS = PtrOperand->getType()->getPointerAddressSpace(); 951 if (OperandAS == FlatAddrSpace) { 952 // Check AC for assumption dominating V. 953 unsigned AS = getPredicatedAddrSpace(V, PtrOperand); 954 if (AS != UninitializedAddressSpace) { 955 LLVM_DEBUG(dbgs() 956 << " deduce operand AS from the predicate addrspace " 957 << AS << '\n'); 958 OperandAS = AS; 959 // Record this use with the predicated AS. 960 PredicatedAS[std::make_pair(&V, PtrOperand)] = OperandAS; 961 } 962 } 963 } else 964 OperandAS = I->second; 965 966 // join(flat, *) = flat. So we can break if NewAS is already flat. 967 NewAS = joinAddressSpaces(NewAS, OperandAS); 968 if (NewAS == FlatAddrSpace) 969 break; 970 } 971 } 972 } 973 974 unsigned OldAS = InferredAddrSpace.lookup(&V); 975 assert(OldAS != FlatAddrSpace); 976 if (OldAS == NewAS) 977 return false; 978 979 // If any updates are made, grabs its users to the worklist because 980 // their address spaces can also be possibly updated. 981 LLVM_DEBUG(dbgs() << " to " << NewAS << '\n'); 982 InferredAddrSpace[&V] = NewAS; 983 return true; 984 } 985 986 /// \p returns true if \p U is the pointer operand of a memory instruction with 987 /// a single pointer operand that can have its address space changed by simply 988 /// mutating the use to a new value. If the memory instruction is volatile, 989 /// return true only if the target allows the memory instruction to be volatile 990 /// in the new address space. 991 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI, 992 Use &U, unsigned AddrSpace) { 993 User *Inst = U.getUser(); 994 unsigned OpNo = U.getOperandNo(); 995 bool VolatileIsAllowed = false; 996 if (auto *I = dyn_cast<Instruction>(Inst)) 997 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace); 998 999 if (auto *LI = dyn_cast<LoadInst>(Inst)) 1000 return OpNo == LoadInst::getPointerOperandIndex() && 1001 (VolatileIsAllowed || !LI->isVolatile()); 1002 1003 if (auto *SI = dyn_cast<StoreInst>(Inst)) 1004 return OpNo == StoreInst::getPointerOperandIndex() && 1005 (VolatileIsAllowed || !SI->isVolatile()); 1006 1007 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) 1008 return OpNo == AtomicRMWInst::getPointerOperandIndex() && 1009 (VolatileIsAllowed || !RMW->isVolatile()); 1010 1011 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) 1012 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() && 1013 (VolatileIsAllowed || !CmpX->isVolatile()); 1014 1015 return false; 1016 } 1017 1018 /// Update memory intrinsic uses that require more complex processing than 1019 /// simple memory instructions. Thse require re-mangling and may have multiple 1020 /// pointer operands. 1021 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, 1022 Value *NewV) { 1023 IRBuilder<> B(MI); 1024 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa); 1025 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope); 1026 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias); 1027 1028 if (auto *MSI = dyn_cast<MemSetInst>(MI)) { 1029 B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(), 1030 MaybeAlign(MSI->getDestAlignment()), 1031 false, // isVolatile 1032 TBAA, ScopeMD, NoAliasMD); 1033 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) { 1034 Value *Src = MTI->getRawSource(); 1035 Value *Dest = MTI->getRawDest(); 1036 1037 // Be careful in case this is a self-to-self copy. 1038 if (Src == OldV) 1039 Src = NewV; 1040 1041 if (Dest == OldV) 1042 Dest = NewV; 1043 1044 if (isa<MemCpyInlineInst>(MTI)) { 1045 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); 1046 B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src, 1047 MTI->getSourceAlign(), MTI->getLength(), 1048 false, // isVolatile 1049 TBAA, TBAAStruct, ScopeMD, NoAliasMD); 1050 } else if (isa<MemCpyInst>(MTI)) { 1051 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); 1052 B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 1053 MTI->getLength(), 1054 false, // isVolatile 1055 TBAA, TBAAStruct, ScopeMD, NoAliasMD); 1056 } else { 1057 assert(isa<MemMoveInst>(MTI)); 1058 B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 1059 MTI->getLength(), 1060 false, // isVolatile 1061 TBAA, ScopeMD, NoAliasMD); 1062 } 1063 } else 1064 llvm_unreachable("unhandled MemIntrinsic"); 1065 1066 MI->eraseFromParent(); 1067 return true; 1068 } 1069 1070 // \p returns true if it is OK to change the address space of constant \p C with 1071 // a ConstantExpr addrspacecast. 1072 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant *C, 1073 unsigned NewAS) const { 1074 assert(NewAS != UninitializedAddressSpace); 1075 1076 unsigned SrcAS = C->getType()->getPointerAddressSpace(); 1077 if (SrcAS == NewAS || isa<UndefValue>(C)) 1078 return true; 1079 1080 // Prevent illegal casts between different non-flat address spaces. 1081 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace) 1082 return false; 1083 1084 if (isa<ConstantPointerNull>(C)) 1085 return true; 1086 1087 if (auto *Op = dyn_cast<Operator>(C)) { 1088 // If we already have a constant addrspacecast, it should be safe to cast it 1089 // off. 1090 if (Op->getOpcode() == Instruction::AddrSpaceCast) 1091 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS); 1092 1093 if (Op->getOpcode() == Instruction::IntToPtr && 1094 Op->getType()->getPointerAddressSpace() == FlatAddrSpace) 1095 return true; 1096 } 1097 1098 return false; 1099 } 1100 1101 static Value::use_iterator skipToNextUser(Value::use_iterator I, 1102 Value::use_iterator End) { 1103 User *CurUser = I->getUser(); 1104 ++I; 1105 1106 while (I != End && I->getUser() == CurUser) 1107 ++I; 1108 1109 return I; 1110 } 1111 1112 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces( 1113 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 1114 const ValueToAddrSpaceMapTy &InferredAddrSpace, 1115 const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const { 1116 // For each address expression to be modified, creates a clone of it with its 1117 // pointer operands converted to the new address space. Since the pointer 1118 // operands are converted, the clone is naturally in the new address space by 1119 // construction. 1120 ValueToValueMapTy ValueWithNewAddrSpace; 1121 SmallVector<const Use *, 32> UndefUsesToFix; 1122 for (Value* V : Postorder) { 1123 unsigned NewAddrSpace = InferredAddrSpace.lookup(V); 1124 1125 // In some degenerate cases (e.g. invalid IR in unreachable code), we may 1126 // not even infer the value to have its original address space. 1127 if (NewAddrSpace == UninitializedAddressSpace) 1128 continue; 1129 1130 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) { 1131 Value *New = 1132 cloneValueWithNewAddressSpace(V, NewAddrSpace, ValueWithNewAddrSpace, 1133 PredicatedAS, &UndefUsesToFix); 1134 if (New) 1135 ValueWithNewAddrSpace[V] = New; 1136 } 1137 } 1138 1139 if (ValueWithNewAddrSpace.empty()) 1140 return false; 1141 1142 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace. 1143 for (const Use *UndefUse : UndefUsesToFix) { 1144 User *V = UndefUse->getUser(); 1145 User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V)); 1146 if (!NewV) 1147 continue; 1148 1149 unsigned OperandNo = UndefUse->getOperandNo(); 1150 assert(isa<UndefValue>(NewV->getOperand(OperandNo))); 1151 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get())); 1152 } 1153 1154 SmallVector<Instruction *, 16> DeadInstructions; 1155 1156 // Replaces the uses of the old address expressions with the new ones. 1157 for (const WeakTrackingVH &WVH : Postorder) { 1158 assert(WVH && "value was unexpectedly deleted"); 1159 Value *V = WVH; 1160 Value *NewV = ValueWithNewAddrSpace.lookup(V); 1161 if (NewV == nullptr) 1162 continue; 1163 1164 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n " 1165 << *NewV << '\n'); 1166 1167 if (Constant *C = dyn_cast<Constant>(V)) { 1168 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1169 C->getType()); 1170 if (C != Replace) { 1171 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace 1172 << ": " << *Replace << '\n'); 1173 C->replaceAllUsesWith(Replace); 1174 V = Replace; 1175 } 1176 } 1177 1178 Value::use_iterator I, E, Next; 1179 for (I = V->use_begin(), E = V->use_end(); I != E; ) { 1180 Use &U = *I; 1181 1182 // Some users may see the same pointer operand in multiple operands. Skip 1183 // to the next instruction. 1184 I = skipToNextUser(I, E); 1185 1186 if (isSimplePointerUseValidToReplace( 1187 TTI, U, V->getType()->getPointerAddressSpace())) { 1188 // If V is used as the pointer operand of a compatible memory operation, 1189 // sets the pointer operand to NewV. This replacement does not change 1190 // the element type, so the resultant load/store is still valid. 1191 U.set(NewV); 1192 continue; 1193 } 1194 1195 User *CurUser = U.getUser(); 1196 // Skip if the current user is the new value itself. 1197 if (CurUser == NewV) 1198 continue; 1199 // Handle more complex cases like intrinsic that need to be remangled. 1200 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) { 1201 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV)) 1202 continue; 1203 } 1204 1205 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) { 1206 if (rewriteIntrinsicOperands(II, V, NewV)) 1207 continue; 1208 } 1209 1210 if (isa<Instruction>(CurUser)) { 1211 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) { 1212 // If we can infer that both pointers are in the same addrspace, 1213 // transform e.g. 1214 // %cmp = icmp eq float* %p, %q 1215 // into 1216 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q 1217 1218 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1219 int SrcIdx = U.getOperandNo(); 1220 int OtherIdx = (SrcIdx == 0) ? 1 : 0; 1221 Value *OtherSrc = Cmp->getOperand(OtherIdx); 1222 1223 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) { 1224 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) { 1225 Cmp->setOperand(OtherIdx, OtherNewV); 1226 Cmp->setOperand(SrcIdx, NewV); 1227 continue; 1228 } 1229 } 1230 1231 // Even if the type mismatches, we can cast the constant. 1232 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) { 1233 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) { 1234 Cmp->setOperand(SrcIdx, NewV); 1235 Cmp->setOperand(OtherIdx, 1236 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType())); 1237 continue; 1238 } 1239 } 1240 } 1241 1242 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) { 1243 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1244 if (ASC->getDestAddressSpace() == NewAS) { 1245 if (!cast<PointerType>(ASC->getType()) 1246 ->hasSameElementTypeAs( 1247 cast<PointerType>(NewV->getType()))) { 1248 NewV = CastInst::Create(Instruction::BitCast, NewV, 1249 ASC->getType(), "", ASC); 1250 } 1251 ASC->replaceAllUsesWith(NewV); 1252 DeadInstructions.push_back(ASC); 1253 continue; 1254 } 1255 } 1256 1257 // Otherwise, replaces the use with flat(NewV). 1258 if (Instruction *Inst = dyn_cast<Instruction>(V)) { 1259 // Don't create a copy of the original addrspacecast. 1260 if (U == V && isa<AddrSpaceCastInst>(V)) 1261 continue; 1262 1263 BasicBlock::iterator InsertPos = std::next(Inst->getIterator()); 1264 while (isa<PHINode>(InsertPos)) 1265 ++InsertPos; 1266 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos)); 1267 } else { 1268 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1269 V->getType())); 1270 } 1271 } 1272 } 1273 1274 if (V->use_empty()) { 1275 if (Instruction *I = dyn_cast<Instruction>(V)) 1276 DeadInstructions.push_back(I); 1277 } 1278 } 1279 1280 for (Instruction *I : DeadInstructions) 1281 RecursivelyDeleteTriviallyDeadInstructions(I); 1282 1283 return true; 1284 } 1285 1286 bool InferAddressSpaces::runOnFunction(Function &F) { 1287 if (skipFunction(F)) 1288 return false; 1289 1290 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1291 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1292 return InferAddressSpacesImpl( 1293 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), DT, 1294 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F), 1295 FlatAddrSpace) 1296 .run(F); 1297 } 1298 1299 FunctionPass *llvm::createInferAddressSpacesPass(unsigned AddressSpace) { 1300 return new InferAddressSpaces(AddressSpace); 1301 } 1302 1303 InferAddressSpacesPass::InferAddressSpacesPass() 1304 : FlatAddrSpace(UninitializedAddressSpace) {} 1305 InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace) 1306 : FlatAddrSpace(AddressSpace) {} 1307 1308 PreservedAnalyses InferAddressSpacesPass::run(Function &F, 1309 FunctionAnalysisManager &AM) { 1310 bool Changed = 1311 InferAddressSpacesImpl(AM.getResult<AssumptionAnalysis>(F), 1312 AM.getCachedResult<DominatorTreeAnalysis>(F), 1313 &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace) 1314 .run(F); 1315 if (Changed) { 1316 PreservedAnalyses PA; 1317 PA.preserveSet<CFGAnalyses>(); 1318 PA.preserve<DominatorTreeAnalysis>(); 1319 return PA; 1320 } 1321 return PreservedAnalyses::all(); 1322 } 1323