1 //===- InferAddressSpace.cpp - --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // CUDA C/C++ includes memory space designation as variable type qualifers (such 10 // as __global__ and __shared__). Knowing the space of a memory access allows 11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from 12 // shared memory can be translated to `ld.shared` which is roughly 10% faster 13 // than a generic `ld` on an NVIDIA Tesla K40c. 14 // 15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA 16 // compilers must infer the memory space of an address expression from 17 // type-qualified variables. 18 // 19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory 20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend 21 // places only type-qualified variables in specific address spaces, and then 22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0) 23 // (so-called the generic address space) for other instructions to use. 24 // 25 // For example, the Clang translates the following CUDA code 26 // __shared__ float a[10]; 27 // float v = a[i]; 28 // to 29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]* 30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i 31 // %v = load float, float* %1 ; emits ld.f32 32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is 33 // redirected to %0 (the generic version of @a). 34 // 35 // The optimization implemented in this file propagates specific address spaces 36 // from type-qualified variable declarations to its users. For example, it 37 // optimizes the above IR to 38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i 39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32 40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX 41 // codegen is able to emit ld.shared.f32 for %v. 42 // 43 // Address space inference works in two steps. First, it uses a data-flow 44 // analysis to infer as many generic pointers as possible to point to only one 45 // specific address space. In the above example, it can prove that %1 only 46 // points to addrspace(3). This algorithm was published in 47 // CUDA: Compiling and optimizing for a GPU platform 48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang 49 // ICCS 2012 50 // 51 // Then, address space inference replaces all refinable generic pointers with 52 // equivalent specific pointers. 53 // 54 // The major challenge of implementing this optimization is handling PHINodes, 55 // which may create loops in the data flow graph. This brings two complications. 56 // 57 // First, the data flow analysis in Step 1 needs to be circular. For example, 58 // %generic.input = addrspacecast float addrspace(3)* %input to float* 59 // loop: 60 // %y = phi [ %generic.input, %y2 ] 61 // %y2 = getelementptr %y, 1 62 // %v = load %y2 63 // br ..., label %loop, ... 64 // proving %y specific requires proving both %generic.input and %y2 specific, 65 // but proving %y2 specific circles back to %y. To address this complication, 66 // the data flow analysis operates on a lattice: 67 // uninitialized > specific address spaces > generic. 68 // All address expressions (our implementation only considers phi, bitcast, 69 // addrspacecast, and getelementptr) start with the uninitialized address space. 70 // The monotone transfer function moves the address space of a pointer down a 71 // lattice path from uninitialized to specific and then to generic. A join 72 // operation of two different specific address spaces pushes the expression down 73 // to the generic address space. The analysis completes once it reaches a fixed 74 // point. 75 // 76 // Second, IR rewriting in Step 2 also needs to be circular. For example, 77 // converting %y to addrspace(3) requires the compiler to know the converted 78 // %y2, but converting %y2 needs the converted %y. To address this complication, 79 // we break these cycles using "undef" placeholders. When converting an 80 // instruction `I` to a new address space, if its operand `Op` is not converted 81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later. 82 // For instance, our algorithm first converts %y to 83 // %y' = phi float addrspace(3)* [ %input, undef ] 84 // Then, it converts %y2 to 85 // %y2' = getelementptr %y', 1 86 // Finally, it fixes the undef in %y' so that 87 // %y' = phi float addrspace(3)* [ %input, %y2' ] 88 // 89 //===----------------------------------------------------------------------===// 90 91 #include "llvm/ADT/ArrayRef.h" 92 #include "llvm/ADT/DenseMap.h" 93 #include "llvm/ADT/DenseSet.h" 94 #include "llvm/ADT/None.h" 95 #include "llvm/ADT/Optional.h" 96 #include "llvm/ADT/SetVector.h" 97 #include "llvm/ADT/SmallVector.h" 98 #include "llvm/Analysis/TargetTransformInfo.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/Function.h" 103 #include "llvm/IR/IRBuilder.h" 104 #include "llvm/IR/InstIterator.h" 105 #include "llvm/IR/Instruction.h" 106 #include "llvm/IR/Instructions.h" 107 #include "llvm/IR/IntrinsicInst.h" 108 #include "llvm/IR/Intrinsics.h" 109 #include "llvm/IR/LLVMContext.h" 110 #include "llvm/IR/Operator.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/Use.h" 113 #include "llvm/IR/User.h" 114 #include "llvm/IR/Value.h" 115 #include "llvm/IR/ValueHandle.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/raw_ostream.h" 123 #include "llvm/Transforms/Scalar.h" 124 #include "llvm/Transforms/Utils/Local.h" 125 #include "llvm/Transforms/Utils/ValueMapper.h" 126 #include <cassert> 127 #include <iterator> 128 #include <limits> 129 #include <utility> 130 #include <vector> 131 132 #define DEBUG_TYPE "infer-address-spaces" 133 134 using namespace llvm; 135 136 static cl::opt<bool> AssumeDefaultIsFlatAddressSpace( 137 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden, 138 cl::desc("The default address space is assumed as the flat address space. " 139 "This is mainly for test purpose.")); 140 141 static const unsigned UninitializedAddressSpace = 142 std::numeric_limits<unsigned>::max(); 143 144 namespace { 145 146 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>; 147 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>; 148 149 /// InferAddressSpaces 150 class InferAddressSpaces : public FunctionPass { 151 const TargetTransformInfo *TTI = nullptr; 152 const DataLayout *DL = nullptr; 153 154 /// Target specific address space which uses of should be replaced if 155 /// possible. 156 unsigned FlatAddrSpace = 0; 157 158 public: 159 static char ID; 160 161 InferAddressSpaces() : 162 FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {} 163 InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {} 164 165 void getAnalysisUsage(AnalysisUsage &AU) const override { 166 AU.setPreservesCFG(); 167 AU.addRequired<TargetTransformInfoWrapperPass>(); 168 } 169 170 bool runOnFunction(Function &F) override; 171 172 private: 173 // Returns the new address space of V if updated; otherwise, returns None. 174 Optional<unsigned> 175 updateAddressSpace(const Value &V, 176 const ValueToAddrSpaceMapTy &InferredAddrSpace) const; 177 178 // Tries to infer the specific address space of each address expression in 179 // Postorder. 180 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder, 181 ValueToAddrSpaceMapTy *InferredAddrSpace) const; 182 183 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const; 184 185 Value *cloneInstructionWithNewAddressSpace( 186 Instruction *I, unsigned NewAddrSpace, 187 const ValueToValueMapTy &ValueWithNewAddrSpace, 188 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 189 190 // Changes the flat address expressions in function F to point to specific 191 // address spaces if InferredAddrSpace says so. Postorder is the postorder of 192 // all flat expressions in the use-def graph of function F. 193 bool rewriteWithNewAddressSpaces( 194 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 195 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const; 196 197 void appendsFlatAddressExpressionToPostorderStack( 198 Value *V, PostorderStackTy &PostorderStack, 199 DenseSet<Value *> &Visited) const; 200 201 bool rewriteIntrinsicOperands(IntrinsicInst *II, 202 Value *OldV, Value *NewV) const; 203 void collectRewritableIntrinsicOperands(IntrinsicInst *II, 204 PostorderStackTy &PostorderStack, 205 DenseSet<Value *> &Visited) const; 206 207 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const; 208 209 Value *cloneValueWithNewAddressSpace( 210 Value *V, unsigned NewAddrSpace, 211 const ValueToValueMapTy &ValueWithNewAddrSpace, 212 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 213 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const; 214 }; 215 216 } // end anonymous namespace 217 218 char InferAddressSpaces::ID = 0; 219 220 namespace llvm { 221 222 void initializeInferAddressSpacesPass(PassRegistry &); 223 224 } // end namespace llvm 225 226 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", 227 false, false) 228 229 // Check whether that's no-op pointer bicast using a pair of 230 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over 231 // different address spaces. 232 static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL, 233 const TargetTransformInfo *TTI) { 234 assert(I2P->getOpcode() == Instruction::IntToPtr); 235 auto *P2I = dyn_cast<Operator>(I2P->getOperand(0)); 236 if (!P2I || P2I->getOpcode() != Instruction::PtrToInt) 237 return false; 238 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a 239 // no-op cast. Besides checking both of them are no-op casts, as the 240 // reinterpreted pointer may be used in other pointer arithmetic, we also 241 // need to double-check that through the target-specific hook. That ensures 242 // the underlying target also agrees that's a no-op address space cast and 243 // pointer bits are preserved. 244 // The current IR spec doesn't have clear rules on address space casts, 245 // especially a clear definition for pointer bits in non-default address 246 // spaces. It would be undefined if that pointer is dereferenced after an 247 // invalid reinterpret cast. Also, due to the unclearness for the meaning of 248 // bits in non-default address spaces in the current spec, the pointer 249 // arithmetic may also be undefined after invalid pointer reinterpret cast. 250 // However, as we confirm through the target hooks that it's a no-op 251 // addrspacecast, it doesn't matter since the bits should be the same. 252 return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()), 253 I2P->getOperand(0)->getType(), I2P->getType(), 254 DL) && 255 CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()), 256 P2I->getOperand(0)->getType(), P2I->getType(), 257 DL) && 258 TTI->isNoopAddrSpaceCast( 259 P2I->getOperand(0)->getType()->getPointerAddressSpace(), 260 I2P->getType()->getPointerAddressSpace()); 261 } 262 263 // Returns true if V is an address expression. 264 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and 265 // getelementptr operators. 266 static bool isAddressExpression(const Value &V, const DataLayout &DL, 267 const TargetTransformInfo *TTI) { 268 const Operator *Op = dyn_cast<Operator>(&V); 269 if (!Op) 270 return false; 271 272 switch (Op->getOpcode()) { 273 case Instruction::PHI: 274 assert(Op->getType()->isPointerTy()); 275 return true; 276 case Instruction::BitCast: 277 case Instruction::AddrSpaceCast: 278 case Instruction::GetElementPtr: 279 return true; 280 case Instruction::Select: 281 return Op->getType()->isPointerTy(); 282 case Instruction::Call: { 283 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V); 284 return II && II->getIntrinsicID() == Intrinsic::ptrmask; 285 } 286 case Instruction::IntToPtr: 287 return isNoopPtrIntCastPair(Op, DL, TTI); 288 default: 289 return false; 290 } 291 } 292 293 // Returns the pointer operands of V. 294 // 295 // Precondition: V is an address expression. 296 static SmallVector<Value *, 2> 297 getPointerOperands(const Value &V, const DataLayout &DL, 298 const TargetTransformInfo *TTI) { 299 const Operator &Op = cast<Operator>(V); 300 switch (Op.getOpcode()) { 301 case Instruction::PHI: { 302 auto IncomingValues = cast<PHINode>(Op).incoming_values(); 303 return SmallVector<Value *, 2>(IncomingValues.begin(), 304 IncomingValues.end()); 305 } 306 case Instruction::BitCast: 307 case Instruction::AddrSpaceCast: 308 case Instruction::GetElementPtr: 309 return {Op.getOperand(0)}; 310 case Instruction::Select: 311 return {Op.getOperand(1), Op.getOperand(2)}; 312 case Instruction::Call: { 313 const IntrinsicInst &II = cast<IntrinsicInst>(Op); 314 assert(II.getIntrinsicID() == Intrinsic::ptrmask && 315 "unexpected intrinsic call"); 316 return {II.getArgOperand(0)}; 317 } 318 case Instruction::IntToPtr: { 319 assert(isNoopPtrIntCastPair(&Op, DL, TTI)); 320 auto *P2I = cast<Operator>(Op.getOperand(0)); 321 return {P2I->getOperand(0)}; 322 } 323 default: 324 llvm_unreachable("Unexpected instruction type."); 325 } 326 } 327 328 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II, 329 Value *OldV, 330 Value *NewV) const { 331 Module *M = II->getParent()->getParent()->getParent(); 332 333 switch (II->getIntrinsicID()) { 334 case Intrinsic::objectsize: { 335 Type *DestTy = II->getType(); 336 Type *SrcTy = NewV->getType(); 337 Function *NewDecl = 338 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy}); 339 II->setArgOperand(0, NewV); 340 II->setCalledFunction(NewDecl); 341 return true; 342 } 343 case Intrinsic::ptrmask: 344 // This is handled as an address expression, not as a use memory operation. 345 return false; 346 default: { 347 Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 348 if (!Rewrite) 349 return false; 350 if (Rewrite != II) 351 II->replaceAllUsesWith(Rewrite); 352 return true; 353 } 354 } 355 } 356 357 void InferAddressSpaces::collectRewritableIntrinsicOperands( 358 IntrinsicInst *II, PostorderStackTy &PostorderStack, 359 DenseSet<Value *> &Visited) const { 360 auto IID = II->getIntrinsicID(); 361 switch (IID) { 362 case Intrinsic::ptrmask: 363 case Intrinsic::objectsize: 364 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0), 365 PostorderStack, Visited); 366 break; 367 default: 368 SmallVector<int, 2> OpIndexes; 369 if (TTI->collectFlatAddressOperands(OpIndexes, IID)) { 370 for (int Idx : OpIndexes) { 371 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx), 372 PostorderStack, Visited); 373 } 374 } 375 break; 376 } 377 } 378 379 // Returns all flat address expressions in function F. The elements are 380 // If V is an unvisited flat address expression, appends V to PostorderStack 381 // and marks it as visited. 382 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack( 383 Value *V, PostorderStackTy &PostorderStack, 384 DenseSet<Value *> &Visited) const { 385 assert(V->getType()->isPointerTy()); 386 387 // Generic addressing expressions may be hidden in nested constant 388 // expressions. 389 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 390 // TODO: Look in non-address parts, like icmp operands. 391 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 392 PostorderStack.emplace_back(CE, false); 393 394 return; 395 } 396 397 if (isAddressExpression(*V, *DL, TTI) && 398 V->getType()->getPointerAddressSpace() == FlatAddrSpace) { 399 if (Visited.insert(V).second) { 400 PostorderStack.emplace_back(V, false); 401 402 Operator *Op = cast<Operator>(V); 403 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) { 404 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) { 405 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 406 PostorderStack.emplace_back(CE, false); 407 } 408 } 409 } 410 } 411 } 412 413 // Returns all flat address expressions in function F. The elements are ordered 414 // ordered in postorder. 415 std::vector<WeakTrackingVH> 416 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const { 417 // This function implements a non-recursive postorder traversal of a partial 418 // use-def graph of function F. 419 PostorderStackTy PostorderStack; 420 // The set of visited expressions. 421 DenseSet<Value *> Visited; 422 423 auto PushPtrOperand = [&](Value *Ptr) { 424 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack, 425 Visited); 426 }; 427 428 // Look at operations that may be interesting accelerate by moving to a known 429 // address space. We aim at generating after loads and stores, but pure 430 // addressing calculations may also be faster. 431 for (Instruction &I : instructions(F)) { 432 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 433 if (!GEP->getType()->isVectorTy()) 434 PushPtrOperand(GEP->getPointerOperand()); 435 } else if (auto *LI = dyn_cast<LoadInst>(&I)) 436 PushPtrOperand(LI->getPointerOperand()); 437 else if (auto *SI = dyn_cast<StoreInst>(&I)) 438 PushPtrOperand(SI->getPointerOperand()); 439 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) 440 PushPtrOperand(RMW->getPointerOperand()); 441 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) 442 PushPtrOperand(CmpX->getPointerOperand()); 443 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) { 444 // For memset/memcpy/memmove, any pointer operand can be replaced. 445 PushPtrOperand(MI->getRawDest()); 446 447 // Handle 2nd operand for memcpy/memmove. 448 if (auto *MTI = dyn_cast<MemTransferInst>(MI)) 449 PushPtrOperand(MTI->getRawSource()); 450 } else if (auto *II = dyn_cast<IntrinsicInst>(&I)) 451 collectRewritableIntrinsicOperands(II, PostorderStack, Visited); 452 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) { 453 // FIXME: Handle vectors of pointers 454 if (Cmp->getOperand(0)->getType()->isPointerTy()) { 455 PushPtrOperand(Cmp->getOperand(0)); 456 PushPtrOperand(Cmp->getOperand(1)); 457 } 458 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) { 459 if (!ASC->getType()->isVectorTy()) 460 PushPtrOperand(ASC->getPointerOperand()); 461 } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) { 462 if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI)) 463 PushPtrOperand( 464 cast<PtrToIntInst>(I2P->getOperand(0))->getPointerOperand()); 465 } 466 } 467 468 std::vector<WeakTrackingVH> Postorder; // The resultant postorder. 469 while (!PostorderStack.empty()) { 470 Value *TopVal = PostorderStack.back().getPointer(); 471 // If the operands of the expression on the top are already explored, 472 // adds that expression to the resultant postorder. 473 if (PostorderStack.back().getInt()) { 474 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace) 475 Postorder.push_back(TopVal); 476 PostorderStack.pop_back(); 477 continue; 478 } 479 // Otherwise, adds its operands to the stack and explores them. 480 PostorderStack.back().setInt(true); 481 for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) { 482 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack, 483 Visited); 484 } 485 } 486 return Postorder; 487 } 488 489 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone 490 // of OperandUse.get() in the new address space. If the clone is not ready yet, 491 // returns an undef in the new address space as a placeholder. 492 static Value *operandWithNewAddressSpaceOrCreateUndef( 493 const Use &OperandUse, unsigned NewAddrSpace, 494 const ValueToValueMapTy &ValueWithNewAddrSpace, 495 SmallVectorImpl<const Use *> *UndefUsesToFix) { 496 Value *Operand = OperandUse.get(); 497 498 Type *NewPtrTy = 499 Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); 500 501 if (Constant *C = dyn_cast<Constant>(Operand)) 502 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy); 503 504 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) 505 return NewOperand; 506 507 UndefUsesToFix->push_back(&OperandUse); 508 return UndefValue::get(NewPtrTy); 509 } 510 511 // Returns a clone of `I` with its operands converted to those specified in 512 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an 513 // operand whose address space needs to be modified might not exist in 514 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and 515 // adds that operand use to UndefUsesToFix so that caller can fix them later. 516 // 517 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast 518 // from a pointer whose type already matches. Therefore, this function returns a 519 // Value* instead of an Instruction*. 520 // 521 // This may also return nullptr in the case the instruction could not be 522 // rewritten. 523 Value *InferAddressSpaces::cloneInstructionWithNewAddressSpace( 524 Instruction *I, unsigned NewAddrSpace, 525 const ValueToValueMapTy &ValueWithNewAddrSpace, 526 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 527 Type *NewPtrType = 528 I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); 529 530 if (I->getOpcode() == Instruction::AddrSpaceCast) { 531 Value *Src = I->getOperand(0); 532 // Because `I` is flat, the source address space must be specific. 533 // Therefore, the inferred address space must be the source space, according 534 // to our algorithm. 535 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 536 if (Src->getType() != NewPtrType) 537 return new BitCastInst(Src, NewPtrType); 538 return Src; 539 } 540 541 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 542 // Technically the intrinsic ID is a pointer typed argument, so specially 543 // handle calls early. 544 assert(II->getIntrinsicID() == Intrinsic::ptrmask); 545 Value *NewPtr = operandWithNewAddressSpaceOrCreateUndef( 546 II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace, 547 UndefUsesToFix); 548 Value *Rewrite = 549 TTI->rewriteIntrinsicWithAddressSpace(II, II->getArgOperand(0), NewPtr); 550 if (Rewrite) { 551 assert(Rewrite != II && "cannot modify this pointer operation in place"); 552 return Rewrite; 553 } 554 555 return nullptr; 556 } 557 558 // Computes the converted pointer operands. 559 SmallVector<Value *, 4> NewPointerOperands; 560 for (const Use &OperandUse : I->operands()) { 561 if (!OperandUse.get()->getType()->isPointerTy()) 562 NewPointerOperands.push_back(nullptr); 563 else 564 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef( 565 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix)); 566 } 567 568 switch (I->getOpcode()) { 569 case Instruction::BitCast: 570 return new BitCastInst(NewPointerOperands[0], NewPtrType); 571 case Instruction::PHI: { 572 assert(I->getType()->isPointerTy()); 573 PHINode *PHI = cast<PHINode>(I); 574 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues()); 575 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) { 576 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index); 577 NewPHI->addIncoming(NewPointerOperands[OperandNo], 578 PHI->getIncomingBlock(Index)); 579 } 580 return NewPHI; 581 } 582 case Instruction::GetElementPtr: { 583 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); 584 GetElementPtrInst *NewGEP = GetElementPtrInst::Create( 585 GEP->getSourceElementType(), NewPointerOperands[0], 586 SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end())); 587 NewGEP->setIsInBounds(GEP->isInBounds()); 588 return NewGEP; 589 } 590 case Instruction::Select: 591 assert(I->getType()->isPointerTy()); 592 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1], 593 NewPointerOperands[2], "", nullptr, I); 594 case Instruction::IntToPtr: { 595 assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI)); 596 Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0); 597 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 598 if (Src->getType() != NewPtrType) 599 return new BitCastInst(Src, NewPtrType); 600 return Src; 601 } 602 default: 603 llvm_unreachable("Unexpected opcode"); 604 } 605 } 606 607 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the 608 // constant expression `CE` with its operands replaced as specified in 609 // ValueWithNewAddrSpace. 610 static Value *cloneConstantExprWithNewAddressSpace( 611 ConstantExpr *CE, unsigned NewAddrSpace, 612 const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL, 613 const TargetTransformInfo *TTI) { 614 Type *TargetType = 615 CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); 616 617 if (CE->getOpcode() == Instruction::AddrSpaceCast) { 618 // Because CE is flat, the source address space must be specific. 619 // Therefore, the inferred address space must be the source space according 620 // to our algorithm. 621 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() == 622 NewAddrSpace); 623 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType); 624 } 625 626 if (CE->getOpcode() == Instruction::BitCast) { 627 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0))) 628 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType); 629 return ConstantExpr::getAddrSpaceCast(CE, TargetType); 630 } 631 632 if (CE->getOpcode() == Instruction::Select) { 633 Constant *Src0 = CE->getOperand(1); 634 Constant *Src1 = CE->getOperand(2); 635 if (Src0->getType()->getPointerAddressSpace() == 636 Src1->getType()->getPointerAddressSpace()) { 637 638 return ConstantExpr::getSelect( 639 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType), 640 ConstantExpr::getAddrSpaceCast(Src1, TargetType)); 641 } 642 } 643 644 if (CE->getOpcode() == Instruction::IntToPtr) { 645 assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI)); 646 Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0); 647 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 648 return ConstantExpr::getBitCast(Src, TargetType); 649 } 650 651 // Computes the operands of the new constant expression. 652 bool IsNew = false; 653 SmallVector<Constant *, 4> NewOperands; 654 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) { 655 Constant *Operand = CE->getOperand(Index); 656 // If the address space of `Operand` needs to be modified, the new operand 657 // with the new address space should already be in ValueWithNewAddrSpace 658 // because (1) the constant expressions we consider (i.e. addrspacecast, 659 // bitcast, and getelementptr) do not incur cycles in the data flow graph 660 // and (2) this function is called on constant expressions in postorder. 661 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) { 662 IsNew = true; 663 NewOperands.push_back(cast<Constant>(NewOperand)); 664 continue; 665 } 666 if (auto CExpr = dyn_cast<ConstantExpr>(Operand)) 667 if (Value *NewOperand = cloneConstantExprWithNewAddressSpace( 668 CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) { 669 IsNew = true; 670 NewOperands.push_back(cast<Constant>(NewOperand)); 671 continue; 672 } 673 // Otherwise, reuses the old operand. 674 NewOperands.push_back(Operand); 675 } 676 677 // If !IsNew, we will replace the Value with itself. However, replaced values 678 // are assumed to wrapped in a addrspace cast later so drop it now. 679 if (!IsNew) 680 return nullptr; 681 682 if (CE->getOpcode() == Instruction::GetElementPtr) { 683 // Needs to specify the source type while constructing a getelementptr 684 // constant expression. 685 return CE->getWithOperands( 686 NewOperands, TargetType, /*OnlyIfReduced=*/false, 687 NewOperands[0]->getType()->getPointerElementType()); 688 } 689 690 return CE->getWithOperands(NewOperands, TargetType); 691 } 692 693 // Returns a clone of the value `V`, with its operands replaced as specified in 694 // ValueWithNewAddrSpace. This function is called on every flat address 695 // expression whose address space needs to be modified, in postorder. 696 // 697 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix. 698 Value *InferAddressSpaces::cloneValueWithNewAddressSpace( 699 Value *V, unsigned NewAddrSpace, 700 const ValueToValueMapTy &ValueWithNewAddrSpace, 701 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 702 // All values in Postorder are flat address expressions. 703 assert(isAddressExpression(*V, *DL, TTI) && 704 V->getType()->getPointerAddressSpace() == FlatAddrSpace); 705 706 if (Instruction *I = dyn_cast<Instruction>(V)) { 707 Value *NewV = cloneInstructionWithNewAddressSpace( 708 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix); 709 if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) { 710 if (NewI->getParent() == nullptr) { 711 NewI->insertBefore(I); 712 NewI->takeName(I); 713 } 714 } 715 return NewV; 716 } 717 718 return cloneConstantExprWithNewAddressSpace( 719 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI); 720 } 721 722 // Defines the join operation on the address space lattice (see the file header 723 // comments). 724 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1, 725 unsigned AS2) const { 726 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace) 727 return FlatAddrSpace; 728 729 if (AS1 == UninitializedAddressSpace) 730 return AS2; 731 if (AS2 == UninitializedAddressSpace) 732 return AS1; 733 734 // The join of two different specific address spaces is flat. 735 return (AS1 == AS2) ? AS1 : FlatAddrSpace; 736 } 737 738 bool InferAddressSpaces::runOnFunction(Function &F) { 739 if (skipFunction(F)) 740 return false; 741 742 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 743 DL = &F.getParent()->getDataLayout(); 744 745 if (AssumeDefaultIsFlatAddressSpace) 746 FlatAddrSpace = 0; 747 748 if (FlatAddrSpace == UninitializedAddressSpace) { 749 FlatAddrSpace = TTI->getFlatAddressSpace(); 750 if (FlatAddrSpace == UninitializedAddressSpace) 751 return false; 752 } 753 754 // Collects all flat address expressions in postorder. 755 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F); 756 757 // Runs a data-flow analysis to refine the address spaces of every expression 758 // in Postorder. 759 ValueToAddrSpaceMapTy InferredAddrSpace; 760 inferAddressSpaces(Postorder, &InferredAddrSpace); 761 762 // Changes the address spaces of the flat address expressions who are inferred 763 // to point to a specific address space. 764 return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace, &F); 765 } 766 767 // Constants need to be tracked through RAUW to handle cases with nested 768 // constant expressions, so wrap values in WeakTrackingVH. 769 void InferAddressSpaces::inferAddressSpaces( 770 ArrayRef<WeakTrackingVH> Postorder, 771 ValueToAddrSpaceMapTy *InferredAddrSpace) const { 772 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end()); 773 // Initially, all expressions are in the uninitialized address space. 774 for (Value *V : Postorder) 775 (*InferredAddrSpace)[V] = UninitializedAddressSpace; 776 777 while (!Worklist.empty()) { 778 Value *V = Worklist.pop_back_val(); 779 780 // Tries to update the address space of the stack top according to the 781 // address spaces of its operands. 782 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n'); 783 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace); 784 if (!NewAS.hasValue()) 785 continue; 786 // If any updates are made, grabs its users to the worklist because 787 // their address spaces can also be possibly updated. 788 LLVM_DEBUG(dbgs() << " to " << NewAS.getValue() << '\n'); 789 (*InferredAddrSpace)[V] = NewAS.getValue(); 790 791 for (Value *User : V->users()) { 792 // Skip if User is already in the worklist. 793 if (Worklist.count(User)) 794 continue; 795 796 auto Pos = InferredAddrSpace->find(User); 797 // Our algorithm only updates the address spaces of flat address 798 // expressions, which are those in InferredAddrSpace. 799 if (Pos == InferredAddrSpace->end()) 800 continue; 801 802 // Function updateAddressSpace moves the address space down a lattice 803 // path. Therefore, nothing to do if User is already inferred as flat (the 804 // bottom element in the lattice). 805 if (Pos->second == FlatAddrSpace) 806 continue; 807 808 Worklist.insert(User); 809 } 810 } 811 } 812 813 Optional<unsigned> InferAddressSpaces::updateAddressSpace( 814 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const { 815 assert(InferredAddrSpace.count(&V)); 816 817 // The new inferred address space equals the join of the address spaces 818 // of all its pointer operands. 819 unsigned NewAS = UninitializedAddressSpace; 820 821 const Operator &Op = cast<Operator>(V); 822 if (Op.getOpcode() == Instruction::Select) { 823 Value *Src0 = Op.getOperand(1); 824 Value *Src1 = Op.getOperand(2); 825 826 auto I = InferredAddrSpace.find(Src0); 827 unsigned Src0AS = (I != InferredAddrSpace.end()) ? 828 I->second : Src0->getType()->getPointerAddressSpace(); 829 830 auto J = InferredAddrSpace.find(Src1); 831 unsigned Src1AS = (J != InferredAddrSpace.end()) ? 832 J->second : Src1->getType()->getPointerAddressSpace(); 833 834 auto *C0 = dyn_cast<Constant>(Src0); 835 auto *C1 = dyn_cast<Constant>(Src1); 836 837 // If one of the inputs is a constant, we may be able to do a constant 838 // addrspacecast of it. Defer inferring the address space until the input 839 // address space is known. 840 if ((C1 && Src0AS == UninitializedAddressSpace) || 841 (C0 && Src1AS == UninitializedAddressSpace)) 842 return None; 843 844 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS)) 845 NewAS = Src1AS; 846 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS)) 847 NewAS = Src0AS; 848 else 849 NewAS = joinAddressSpaces(Src0AS, Src1AS); 850 } else { 851 for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) { 852 auto I = InferredAddrSpace.find(PtrOperand); 853 unsigned OperandAS = I != InferredAddrSpace.end() ? 854 I->second : PtrOperand->getType()->getPointerAddressSpace(); 855 856 // join(flat, *) = flat. So we can break if NewAS is already flat. 857 NewAS = joinAddressSpaces(NewAS, OperandAS); 858 if (NewAS == FlatAddrSpace) 859 break; 860 } 861 } 862 863 unsigned OldAS = InferredAddrSpace.lookup(&V); 864 assert(OldAS != FlatAddrSpace); 865 if (OldAS == NewAS) 866 return None; 867 return NewAS; 868 } 869 870 /// \p returns true if \p U is the pointer operand of a memory instruction with 871 /// a single pointer operand that can have its address space changed by simply 872 /// mutating the use to a new value. If the memory instruction is volatile, 873 /// return true only if the target allows the memory instruction to be volatile 874 /// in the new address space. 875 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI, 876 Use &U, unsigned AddrSpace) { 877 User *Inst = U.getUser(); 878 unsigned OpNo = U.getOperandNo(); 879 bool VolatileIsAllowed = false; 880 if (auto *I = dyn_cast<Instruction>(Inst)) 881 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace); 882 883 if (auto *LI = dyn_cast<LoadInst>(Inst)) 884 return OpNo == LoadInst::getPointerOperandIndex() && 885 (VolatileIsAllowed || !LI->isVolatile()); 886 887 if (auto *SI = dyn_cast<StoreInst>(Inst)) 888 return OpNo == StoreInst::getPointerOperandIndex() && 889 (VolatileIsAllowed || !SI->isVolatile()); 890 891 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) 892 return OpNo == AtomicRMWInst::getPointerOperandIndex() && 893 (VolatileIsAllowed || !RMW->isVolatile()); 894 895 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) 896 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() && 897 (VolatileIsAllowed || !CmpX->isVolatile()); 898 899 return false; 900 } 901 902 /// Update memory intrinsic uses that require more complex processing than 903 /// simple memory instructions. Thse require re-mangling and may have multiple 904 /// pointer operands. 905 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, 906 Value *NewV) { 907 IRBuilder<> B(MI); 908 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa); 909 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope); 910 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias); 911 912 if (auto *MSI = dyn_cast<MemSetInst>(MI)) { 913 B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(), 914 MaybeAlign(MSI->getDestAlignment()), 915 false, // isVolatile 916 TBAA, ScopeMD, NoAliasMD); 917 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) { 918 Value *Src = MTI->getRawSource(); 919 Value *Dest = MTI->getRawDest(); 920 921 // Be careful in case this is a self-to-self copy. 922 if (Src == OldV) 923 Src = NewV; 924 925 if (Dest == OldV) 926 Dest = NewV; 927 928 if (isa<MemCpyInst>(MTI)) { 929 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); 930 B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 931 MTI->getLength(), 932 false, // isVolatile 933 TBAA, TBAAStruct, ScopeMD, NoAliasMD); 934 } else { 935 assert(isa<MemMoveInst>(MTI)); 936 B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 937 MTI->getLength(), 938 false, // isVolatile 939 TBAA, ScopeMD, NoAliasMD); 940 } 941 } else 942 llvm_unreachable("unhandled MemIntrinsic"); 943 944 MI->eraseFromParent(); 945 return true; 946 } 947 948 // \p returns true if it is OK to change the address space of constant \p C with 949 // a ConstantExpr addrspacecast. 950 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const { 951 assert(NewAS != UninitializedAddressSpace); 952 953 unsigned SrcAS = C->getType()->getPointerAddressSpace(); 954 if (SrcAS == NewAS || isa<UndefValue>(C)) 955 return true; 956 957 // Prevent illegal casts between different non-flat address spaces. 958 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace) 959 return false; 960 961 if (isa<ConstantPointerNull>(C)) 962 return true; 963 964 if (auto *Op = dyn_cast<Operator>(C)) { 965 // If we already have a constant addrspacecast, it should be safe to cast it 966 // off. 967 if (Op->getOpcode() == Instruction::AddrSpaceCast) 968 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS); 969 970 if (Op->getOpcode() == Instruction::IntToPtr && 971 Op->getType()->getPointerAddressSpace() == FlatAddrSpace) 972 return true; 973 } 974 975 return false; 976 } 977 978 static Value::use_iterator skipToNextUser(Value::use_iterator I, 979 Value::use_iterator End) { 980 User *CurUser = I->getUser(); 981 ++I; 982 983 while (I != End && I->getUser() == CurUser) 984 ++I; 985 986 return I; 987 } 988 989 bool InferAddressSpaces::rewriteWithNewAddressSpaces( 990 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 991 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { 992 // For each address expression to be modified, creates a clone of it with its 993 // pointer operands converted to the new address space. Since the pointer 994 // operands are converted, the clone is naturally in the new address space by 995 // construction. 996 ValueToValueMapTy ValueWithNewAddrSpace; 997 SmallVector<const Use *, 32> UndefUsesToFix; 998 for (Value* V : Postorder) { 999 unsigned NewAddrSpace = InferredAddrSpace.lookup(V); 1000 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) { 1001 Value *New = cloneValueWithNewAddressSpace( 1002 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix); 1003 if (New) 1004 ValueWithNewAddrSpace[V] = New; 1005 } 1006 } 1007 1008 if (ValueWithNewAddrSpace.empty()) 1009 return false; 1010 1011 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace. 1012 for (const Use *UndefUse : UndefUsesToFix) { 1013 User *V = UndefUse->getUser(); 1014 User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V)); 1015 if (!NewV) 1016 continue; 1017 1018 unsigned OperandNo = UndefUse->getOperandNo(); 1019 assert(isa<UndefValue>(NewV->getOperand(OperandNo))); 1020 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get())); 1021 } 1022 1023 SmallVector<Instruction *, 16> DeadInstructions; 1024 1025 // Replaces the uses of the old address expressions with the new ones. 1026 for (const WeakTrackingVH &WVH : Postorder) { 1027 assert(WVH && "value was unexpectedly deleted"); 1028 Value *V = WVH; 1029 Value *NewV = ValueWithNewAddrSpace.lookup(V); 1030 if (NewV == nullptr) 1031 continue; 1032 1033 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n " 1034 << *NewV << '\n'); 1035 1036 if (Constant *C = dyn_cast<Constant>(V)) { 1037 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1038 C->getType()); 1039 if (C != Replace) { 1040 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace 1041 << ": " << *Replace << '\n'); 1042 C->replaceAllUsesWith(Replace); 1043 V = Replace; 1044 } 1045 } 1046 1047 Value::use_iterator I, E, Next; 1048 for (I = V->use_begin(), E = V->use_end(); I != E; ) { 1049 Use &U = *I; 1050 1051 // Some users may see the same pointer operand in multiple operands. Skip 1052 // to the next instruction. 1053 I = skipToNextUser(I, E); 1054 1055 if (isSimplePointerUseValidToReplace( 1056 TTI, U, V->getType()->getPointerAddressSpace())) { 1057 // If V is used as the pointer operand of a compatible memory operation, 1058 // sets the pointer operand to NewV. This replacement does not change 1059 // the element type, so the resultant load/store is still valid. 1060 U.set(NewV); 1061 continue; 1062 } 1063 1064 User *CurUser = U.getUser(); 1065 // Handle more complex cases like intrinsic that need to be remangled. 1066 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) { 1067 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV)) 1068 continue; 1069 } 1070 1071 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) { 1072 if (rewriteIntrinsicOperands(II, V, NewV)) 1073 continue; 1074 } 1075 1076 if (isa<Instruction>(CurUser)) { 1077 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) { 1078 // If we can infer that both pointers are in the same addrspace, 1079 // transform e.g. 1080 // %cmp = icmp eq float* %p, %q 1081 // into 1082 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q 1083 1084 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1085 int SrcIdx = U.getOperandNo(); 1086 int OtherIdx = (SrcIdx == 0) ? 1 : 0; 1087 Value *OtherSrc = Cmp->getOperand(OtherIdx); 1088 1089 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) { 1090 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) { 1091 Cmp->setOperand(OtherIdx, OtherNewV); 1092 Cmp->setOperand(SrcIdx, NewV); 1093 continue; 1094 } 1095 } 1096 1097 // Even if the type mismatches, we can cast the constant. 1098 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) { 1099 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) { 1100 Cmp->setOperand(SrcIdx, NewV); 1101 Cmp->setOperand(OtherIdx, 1102 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType())); 1103 continue; 1104 } 1105 } 1106 } 1107 1108 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) { 1109 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1110 if (ASC->getDestAddressSpace() == NewAS) { 1111 if (ASC->getType()->getPointerElementType() != 1112 NewV->getType()->getPointerElementType()) { 1113 NewV = CastInst::Create(Instruction::BitCast, NewV, 1114 ASC->getType(), "", ASC); 1115 } 1116 ASC->replaceAllUsesWith(NewV); 1117 DeadInstructions.push_back(ASC); 1118 continue; 1119 } 1120 } 1121 1122 // Otherwise, replaces the use with flat(NewV). 1123 if (Instruction *Inst = dyn_cast<Instruction>(V)) { 1124 // Don't create a copy of the original addrspacecast. 1125 if (U == V && isa<AddrSpaceCastInst>(V)) 1126 continue; 1127 1128 BasicBlock::iterator InsertPos = std::next(Inst->getIterator()); 1129 while (isa<PHINode>(InsertPos)) 1130 ++InsertPos; 1131 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos)); 1132 } else { 1133 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1134 V->getType())); 1135 } 1136 } 1137 } 1138 1139 if (V->use_empty()) { 1140 if (Instruction *I = dyn_cast<Instruction>(V)) 1141 DeadInstructions.push_back(I); 1142 } 1143 } 1144 1145 for (Instruction *I : DeadInstructions) 1146 RecursivelyDeleteTriviallyDeadInstructions(I); 1147 1148 return true; 1149 } 1150 1151 FunctionPass *llvm::createInferAddressSpacesPass(unsigned AddressSpace) { 1152 return new InferAddressSpaces(AddressSpace); 1153 } 1154