1 //===-- SPIRVEmitIntrinsics.cpp - emit SPIRV intrinsics ---------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The pass emits SPIRV intrinsics keeping essential high-level information for 10 // the translation of LLVM IR to SPIR-V. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SPIRV.h" 15 #include "SPIRVTargetMachine.h" 16 #include "SPIRVUtils.h" 17 #include "llvm/IR/IRBuilder.h" 18 #include "llvm/IR/InstIterator.h" 19 #include "llvm/IR/InstVisitor.h" 20 #include "llvm/IR/IntrinsicsSPIRV.h" 21 22 #include <queue> 23 24 // This pass performs the following transformation on LLVM IR level required 25 // for the following translation to SPIR-V: 26 // - replaces direct usages of aggregate constants with target-specific 27 // intrinsics; 28 // - replaces aggregates-related instructions (extract/insert, ld/st, etc) 29 // with a target-specific intrinsics; 30 // - emits intrinsics for the global variable initializers since IRTranslator 31 // doesn't handle them and it's not very convenient to translate them 32 // ourselves; 33 // - emits intrinsics to keep track of the string names assigned to the values; 34 // - emits intrinsics to keep track of constants (this is necessary to have an 35 // LLVM IR constant after the IRTranslation is completed) for their further 36 // deduplication; 37 // - emits intrinsics to keep track of original LLVM types of the values 38 // to be able to emit proper SPIR-V types eventually. 39 // 40 // TODO: consider removing spv.track.constant in favor of spv.assign.type. 41 42 using namespace llvm; 43 44 namespace llvm { 45 void initializeSPIRVEmitIntrinsicsPass(PassRegistry &); 46 } // namespace llvm 47 48 namespace { 49 class SPIRVEmitIntrinsics 50 : public FunctionPass, 51 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> { 52 SPIRVTargetMachine *TM = nullptr; 53 IRBuilder<> *IRB = nullptr; 54 Function *F = nullptr; 55 bool TrackConstants = true; 56 DenseMap<Instruction *, Constant *> AggrConsts; 57 DenseSet<Instruction *> AggrStores; 58 void preprocessCompositeConstants(); 59 CallInst *buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef<Type *> Types, 60 Value *Arg, Value *Arg2) { 61 ConstantAsMetadata *CM = ValueAsMetadata::getConstant(Arg); 62 MDTuple *TyMD = MDNode::get(F->getContext(), CM); 63 MetadataAsValue *VMD = MetadataAsValue::get(F->getContext(), TyMD); 64 return IRB->CreateIntrinsic(IntrID, {Types}, {Arg2, VMD}); 65 } 66 void replaceMemInstrUses(Instruction *Old, Instruction *New); 67 void processInstrAfterVisit(Instruction *I); 68 void insertAssignTypeIntrs(Instruction *I); 69 void processGlobalValue(GlobalVariable &GV); 70 71 public: 72 static char ID; 73 SPIRVEmitIntrinsics() : FunctionPass(ID) { 74 initializeSPIRVEmitIntrinsicsPass(*PassRegistry::getPassRegistry()); 75 } 76 SPIRVEmitIntrinsics(SPIRVTargetMachine *_TM) : FunctionPass(ID), TM(_TM) { 77 initializeSPIRVEmitIntrinsicsPass(*PassRegistry::getPassRegistry()); 78 } 79 Instruction *visitInstruction(Instruction &I) { return &I; } 80 Instruction *visitSwitchInst(SwitchInst &I); 81 Instruction *visitGetElementPtrInst(GetElementPtrInst &I); 82 Instruction *visitBitCastInst(BitCastInst &I); 83 Instruction *visitInsertElementInst(InsertElementInst &I); 84 Instruction *visitExtractElementInst(ExtractElementInst &I); 85 Instruction *visitInsertValueInst(InsertValueInst &I); 86 Instruction *visitExtractValueInst(ExtractValueInst &I); 87 Instruction *visitLoadInst(LoadInst &I); 88 Instruction *visitStoreInst(StoreInst &I); 89 Instruction *visitAllocaInst(AllocaInst &I); 90 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); 91 bool runOnFunction(Function &F) override; 92 }; 93 } // namespace 94 95 char SPIRVEmitIntrinsics::ID = 0; 96 97 INITIALIZE_PASS(SPIRVEmitIntrinsics, "emit-intrinsics", "SPIRV emit intrinsics", 98 false, false) 99 100 static inline bool isAssignTypeInstr(const Instruction *I) { 101 return isa<IntrinsicInst>(I) && 102 cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::spv_assign_type; 103 } 104 105 static bool isMemInstrToReplace(Instruction *I) { 106 return isa<StoreInst>(I) || isa<LoadInst>(I) || isa<InsertValueInst>(I) || 107 isa<ExtractValueInst>(I) || isa<AtomicCmpXchgInst>(I); 108 } 109 110 static bool isAggrToReplace(const Value *V) { 111 return isa<ConstantAggregate>(V) || isa<ConstantDataArray>(V) || 112 (isa<ConstantAggregateZero>(V) && !V->getType()->isVectorTy()); 113 } 114 115 static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I) { 116 if (isa<PHINode>(I)) 117 B.SetInsertPoint(I->getParent(), I->getParent()->getFirstInsertionPt()); 118 else 119 B.SetInsertPoint(I); 120 } 121 122 static bool requireAssignType(Instruction *I) { 123 IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(I); 124 if (Intr) { 125 switch (Intr->getIntrinsicID()) { 126 case Intrinsic::invariant_start: 127 case Intrinsic::invariant_end: 128 return false; 129 } 130 } 131 return true; 132 } 133 134 void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old, 135 Instruction *New) { 136 while (!Old->user_empty()) { 137 auto *U = Old->user_back(); 138 if (isAssignTypeInstr(U)) { 139 IRB->SetInsertPoint(U); 140 SmallVector<Value *, 2> Args = {New, U->getOperand(1)}; 141 IRB->CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args); 142 U->eraseFromParent(); 143 } else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) || 144 isa<CallInst>(U)) { 145 U->replaceUsesOfWith(Old, New); 146 } else { 147 llvm_unreachable("illegal aggregate intrinsic user"); 148 } 149 } 150 Old->eraseFromParent(); 151 } 152 153 void SPIRVEmitIntrinsics::preprocessCompositeConstants() { 154 std::queue<Instruction *> Worklist; 155 for (auto &I : instructions(F)) 156 Worklist.push(&I); 157 158 while (!Worklist.empty()) { 159 auto *I = Worklist.front(); 160 assert(I); 161 bool KeepInst = false; 162 for (const auto &Op : I->operands()) { 163 auto BuildCompositeIntrinsic = [&KeepInst, &Worklist, &I, &Op, 164 this](Constant *AggrC, 165 ArrayRef<Value *> Args) { 166 IRB->SetInsertPoint(I); 167 auto *CCI = 168 IRB->CreateIntrinsic(Intrinsic::spv_const_composite, {}, {Args}); 169 Worklist.push(CCI); 170 I->replaceUsesOfWith(Op, CCI); 171 KeepInst = true; 172 AggrConsts[CCI] = AggrC; 173 }; 174 175 if (auto *AggrC = dyn_cast<ConstantAggregate>(Op)) { 176 SmallVector<Value *> Args(AggrC->op_begin(), AggrC->op_end()); 177 BuildCompositeIntrinsic(AggrC, Args); 178 } else if (auto *AggrC = dyn_cast<ConstantDataArray>(Op)) { 179 SmallVector<Value *> Args; 180 for (unsigned i = 0; i < AggrC->getNumElements(); ++i) 181 Args.push_back(AggrC->getElementAsConstant(i)); 182 BuildCompositeIntrinsic(AggrC, Args); 183 } else if (isa<ConstantAggregateZero>(Op) && 184 !Op->getType()->isVectorTy()) { 185 auto *AggrC = cast<ConstantAggregateZero>(Op); 186 SmallVector<Value *> Args(AggrC->op_begin(), AggrC->op_end()); 187 BuildCompositeIntrinsic(AggrC, Args); 188 } 189 } 190 if (!KeepInst) 191 Worklist.pop(); 192 } 193 } 194 195 Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) { 196 SmallVector<Value *, 4> Args; 197 for (auto &Op : I.operands()) 198 if (Op.get()->getType()->isSized()) 199 Args.push_back(Op); 200 IRB->CreateIntrinsic(Intrinsic::spv_switch, {I.getOperand(0)->getType()}, 201 {Args}); 202 return &I; 203 } 204 205 Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) { 206 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()}; 207 SmallVector<Value *, 4> Args; 208 Args.push_back(IRB->getInt1(I.isInBounds())); 209 for (auto &Op : I.operands()) 210 Args.push_back(Op); 211 auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args}); 212 I.replaceAllUsesWith(NewI); 213 I.eraseFromParent(); 214 return NewI; 215 } 216 217 Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &I) { 218 SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()}; 219 SmallVector<Value *> Args(I.op_begin(), I.op_end()); 220 auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_bitcast, {Types}, {Args}); 221 std::string InstName = I.hasName() ? I.getName().str() : ""; 222 I.replaceAllUsesWith(NewI); 223 I.eraseFromParent(); 224 NewI->setName(InstName); 225 return NewI; 226 } 227 228 Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &I) { 229 SmallVector<Type *, 4> Types = {I.getType(), I.getOperand(0)->getType(), 230 I.getOperand(1)->getType(), 231 I.getOperand(2)->getType()}; 232 SmallVector<Value *> Args(I.op_begin(), I.op_end()); 233 auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_insertelt, {Types}, {Args}); 234 std::string InstName = I.hasName() ? I.getName().str() : ""; 235 I.replaceAllUsesWith(NewI); 236 I.eraseFromParent(); 237 NewI->setName(InstName); 238 return NewI; 239 } 240 241 Instruction * 242 SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &I) { 243 SmallVector<Type *, 3> Types = {I.getType(), I.getVectorOperandType(), 244 I.getIndexOperand()->getType()}; 245 SmallVector<Value *, 2> Args = {I.getVectorOperand(), I.getIndexOperand()}; 246 auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_extractelt, {Types}, {Args}); 247 std::string InstName = I.hasName() ? I.getName().str() : ""; 248 I.replaceAllUsesWith(NewI); 249 I.eraseFromParent(); 250 NewI->setName(InstName); 251 return NewI; 252 } 253 254 Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) { 255 SmallVector<Type *, 1> Types = {I.getInsertedValueOperand()->getType()}; 256 SmallVector<Value *> Args; 257 for (auto &Op : I.operands()) 258 if (isa<UndefValue>(Op)) 259 Args.push_back(UndefValue::get(IRB->getInt32Ty())); 260 else 261 Args.push_back(Op); 262 for (auto &Op : I.indices()) 263 Args.push_back(IRB->getInt32(Op)); 264 Instruction *NewI = 265 IRB->CreateIntrinsic(Intrinsic::spv_insertv, {Types}, {Args}); 266 replaceMemInstrUses(&I, NewI); 267 return NewI; 268 } 269 270 Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &I) { 271 SmallVector<Value *> Args; 272 for (auto &Op : I.operands()) 273 Args.push_back(Op); 274 for (auto &Op : I.indices()) 275 Args.push_back(IRB->getInt32(Op)); 276 auto *NewI = 277 IRB->CreateIntrinsic(Intrinsic::spv_extractv, {I.getType()}, {Args}); 278 I.replaceAllUsesWith(NewI); 279 I.eraseFromParent(); 280 return NewI; 281 } 282 283 Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &I) { 284 if (!I.getType()->isAggregateType()) 285 return &I; 286 TrackConstants = false; 287 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering(); 288 MachineMemOperand::Flags Flags = 289 TLI->getLoadMemOperandFlags(I, F->getParent()->getDataLayout()); 290 auto *NewI = 291 IRB->CreateIntrinsic(Intrinsic::spv_load, {I.getOperand(0)->getType()}, 292 {I.getPointerOperand(), IRB->getInt16(Flags), 293 IRB->getInt8(I.getAlign().value())}); 294 replaceMemInstrUses(&I, NewI); 295 return NewI; 296 } 297 298 Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) { 299 if (!AggrStores.contains(&I)) 300 return &I; 301 TrackConstants = false; 302 const auto *TLI = TM->getSubtargetImpl()->getTargetLowering(); 303 MachineMemOperand::Flags Flags = 304 TLI->getStoreMemOperandFlags(I, F->getParent()->getDataLayout()); 305 auto *PtrOp = I.getPointerOperand(); 306 auto *NewI = IRB->CreateIntrinsic( 307 Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()}, 308 {I.getValueOperand(), PtrOp, IRB->getInt16(Flags), 309 IRB->getInt8(I.getAlign().value())}); 310 I.eraseFromParent(); 311 return NewI; 312 } 313 314 Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) { 315 TrackConstants = false; 316 return &I; 317 } 318 319 Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 320 assert(I.getType()->isAggregateType() && "Aggregate result is expected"); 321 SmallVector<Value *> Args; 322 for (auto &Op : I.operands()) 323 Args.push_back(Op); 324 Args.push_back(IRB->getInt32(I.getSyncScopeID())); 325 Args.push_back(IRB->getInt32( 326 static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering())))); 327 Args.push_back(IRB->getInt32( 328 static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering())))); 329 auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_cmpxchg, 330 {I.getPointerOperand()->getType()}, {Args}); 331 replaceMemInstrUses(&I, NewI); 332 return NewI; 333 } 334 335 void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV) { 336 // Skip special artifical variable llvm.global.annotations. 337 if (GV.getName() == "llvm.global.annotations") 338 return; 339 if (GV.hasInitializer() && !isa<UndefValue>(GV.getInitializer())) { 340 Constant *Init = GV.getInitializer(); 341 Type *Ty = isAggrToReplace(Init) ? IRB->getInt32Ty() : Init->getType(); 342 Constant *Const = isAggrToReplace(Init) ? IRB->getInt32(1) : Init; 343 auto *InitInst = IRB->CreateIntrinsic(Intrinsic::spv_init_global, 344 {GV.getType(), Ty}, {&GV, Const}); 345 InitInst->setArgOperand(1, Init); 346 } 347 if ((!GV.hasInitializer() || isa<UndefValue>(GV.getInitializer())) && 348 GV.getNumUses() == 0) 349 IRB->CreateIntrinsic(Intrinsic::spv_unref_global, GV.getType(), &GV); 350 } 351 352 void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I) { 353 Type *Ty = I->getType(); 354 if (!Ty->isVoidTy() && requireAssignType(I)) { 355 setInsertPointSkippingPhis(*IRB, I->getNextNode()); 356 Type *TypeToAssign = Ty; 357 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 358 if (II->getIntrinsicID() == Intrinsic::spv_const_composite) { 359 auto t = AggrConsts.find(II); 360 assert(t != AggrConsts.end()); 361 TypeToAssign = t->second->getType(); 362 } 363 } 364 Constant *Const = Constant::getNullValue(TypeToAssign); 365 buildIntrWithMD(Intrinsic::spv_assign_type, {Ty}, Const, I); 366 } 367 for (const auto &Op : I->operands()) { 368 if (isa<ConstantPointerNull>(Op) || isa<UndefValue>(Op) || 369 // Check GetElementPtrConstantExpr case. 370 (isa<ConstantExpr>(Op) && isa<GEPOperator>(Op))) { 371 IRB->SetInsertPoint(I); 372 if (isa<UndefValue>(Op) && Op->getType()->isAggregateType()) 373 buildIntrWithMD(Intrinsic::spv_assign_type, {IRB->getInt32Ty()}, Op, 374 UndefValue::get(IRB->getInt32Ty())); 375 else 376 buildIntrWithMD(Intrinsic::spv_assign_type, {Op->getType()}, Op, Op); 377 } 378 } 379 } 380 381 void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I) { 382 auto *II = dyn_cast<IntrinsicInst>(I); 383 if (II && II->getIntrinsicID() == Intrinsic::spv_const_composite && 384 TrackConstants) { 385 IRB->SetInsertPoint(I->getNextNode()); 386 Type *Ty = IRB->getInt32Ty(); 387 auto t = AggrConsts.find(I); 388 assert(t != AggrConsts.end()); 389 auto *NewOp = 390 buildIntrWithMD(Intrinsic::spv_track_constant, {Ty, Ty}, t->second, I); 391 I->replaceAllUsesWith(NewOp); 392 NewOp->setArgOperand(0, I); 393 } 394 for (const auto &Op : I->operands()) { 395 if ((isa<ConstantAggregateZero>(Op) && Op->getType()->isVectorTy()) || 396 isa<PHINode>(I) || isa<SwitchInst>(I)) 397 TrackConstants = false; 398 if ((isa<ConstantData>(Op) || isa<ConstantExpr>(Op)) && TrackConstants) { 399 unsigned OpNo = Op.getOperandNo(); 400 if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) || 401 (II->paramHasAttr(OpNo, Attribute::ImmArg)))) 402 continue; 403 IRB->SetInsertPoint(I); 404 auto *NewOp = buildIntrWithMD(Intrinsic::spv_track_constant, 405 {Op->getType(), Op->getType()}, Op, Op); 406 I->setOperand(OpNo, NewOp); 407 } 408 } 409 if (I->hasName()) { 410 setInsertPointSkippingPhis(*IRB, I->getNextNode()); 411 std::vector<Value *> Args = {I}; 412 addStringImm(I->getName(), *IRB, Args); 413 IRB->CreateIntrinsic(Intrinsic::spv_assign_name, {I->getType()}, Args); 414 } 415 } 416 417 bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) { 418 if (Func.isDeclaration()) 419 return false; 420 F = &Func; 421 IRB = new IRBuilder<>(Func.getContext()); 422 AggrConsts.clear(); 423 AggrStores.clear(); 424 425 // StoreInst's operand type can be changed during the next transformations, 426 // so we need to store it in the set. Also store already transformed types. 427 for (auto &I : instructions(Func)) { 428 StoreInst *SI = dyn_cast<StoreInst>(&I); 429 if (!SI) 430 continue; 431 Type *ElTy = SI->getValueOperand()->getType(); 432 PointerType *PTy = cast<PointerType>(SI->getOperand(1)->getType()); 433 if (ElTy->isAggregateType() || ElTy->isVectorTy() || 434 !PTy->isOpaqueOrPointeeTypeMatches(ElTy)) 435 AggrStores.insert(&I); 436 } 437 438 IRB->SetInsertPoint(&Func.getEntryBlock().front()); 439 for (auto &GV : Func.getParent()->globals()) 440 processGlobalValue(GV); 441 442 preprocessCompositeConstants(); 443 SmallVector<Instruction *> Worklist; 444 for (auto &I : instructions(Func)) 445 Worklist.push_back(&I); 446 447 for (auto &I : Worklist) 448 insertAssignTypeIntrs(I); 449 450 for (auto *I : Worklist) { 451 TrackConstants = true; 452 if (!I->getType()->isVoidTy() || isa<StoreInst>(I)) 453 IRB->SetInsertPoint(I->getNextNode()); 454 I = visit(*I); 455 processInstrAfterVisit(I); 456 } 457 return true; 458 } 459 460 FunctionPass *llvm::createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM) { 461 return new SPIRVEmitIntrinsics(TM); 462 } 463