1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This pass does misc. AMDGPU optimizations on IR before instruction 11 /// selection. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "llvm/ADT/FloatingPointMode.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/ConstantFolding.h" 22 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 23 #include "llvm/Analysis/Loads.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/CodeGen/Passes.h" 26 #include "llvm/CodeGen/TargetPassConfig.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/InstVisitor.h" 35 #include "llvm/IR/InstrTypes.h" 36 #include "llvm/IR/Instruction.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/LLVMContext.h" 41 #include "llvm/IR/Operator.h" 42 #include "llvm/IR/Type.h" 43 #include "llvm/IR/Value.h" 44 #include "llvm/InitializePasses.h" 45 #include "llvm/Pass.h" 46 #include "llvm/Support/Casting.h" 47 #include "llvm/Transforms/Utils/IntegerDivision.h" 48 #include <cassert> 49 #include <iterator> 50 51 #define DEBUG_TYPE "amdgpu-codegenprepare" 52 53 using namespace llvm; 54 55 namespace { 56 57 static cl::opt<bool> WidenLoads( 58 "amdgpu-codegenprepare-widen-constant-loads", 59 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"), 60 cl::ReallyHidden, 61 cl::init(false)); 62 63 static cl::opt<bool> UseMul24Intrin( 64 "amdgpu-codegenprepare-mul24", 65 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"), 66 cl::ReallyHidden, 67 cl::init(true)); 68 69 // Legalize 64-bit division by using the generic IR expansion. 70 static cl::opt<bool> ExpandDiv64InIR( 71 "amdgpu-codegenprepare-expand-div64", 72 cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"), 73 cl::ReallyHidden, 74 cl::init(false)); 75 76 // Leave all division operations as they are. This supersedes ExpandDiv64InIR 77 // and is used for testing the legalizer. 78 static cl::opt<bool> DisableIDivExpand( 79 "amdgpu-codegenprepare-disable-idiv-expansion", 80 cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"), 81 cl::ReallyHidden, 82 cl::init(false)); 83 84 class AMDGPUCodeGenPrepare : public FunctionPass, 85 public InstVisitor<AMDGPUCodeGenPrepare, bool> { 86 const GCNSubtarget *ST = nullptr; 87 AssumptionCache *AC = nullptr; 88 DominatorTree *DT = nullptr; 89 LegacyDivergenceAnalysis *DA = nullptr; 90 Module *Mod = nullptr; 91 const DataLayout *DL = nullptr; 92 bool HasUnsafeFPMath = false; 93 bool HasFP32Denormals = false; 94 95 /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to 96 /// binary operation \p V. 97 /// 98 /// \returns Binary operation \p V. 99 /// \returns \p T's base element bit width. 100 unsigned getBaseElementBitWidth(const Type *T) const; 101 102 /// \returns Equivalent 32 bit integer type for given type \p T. For example, 103 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32> 104 /// is returned. 105 Type *getI32Ty(IRBuilder<> &B, const Type *T) const; 106 107 /// \returns True if binary operation \p I is a signed binary operation, false 108 /// otherwise. 109 bool isSigned(const BinaryOperator &I) const; 110 111 /// \returns True if the condition of 'select' operation \p I comes from a 112 /// signed 'icmp' operation, false otherwise. 113 bool isSigned(const SelectInst &I) const; 114 115 /// \returns True if type \p T needs to be promoted to 32 bit integer type, 116 /// false otherwise. 117 bool needsPromotionToI32(const Type *T) const; 118 119 /// Promotes uniform binary operation \p I to equivalent 32 bit binary 120 /// operation. 121 /// 122 /// \details \p I's base element bit width must be greater than 1 and less 123 /// than or equal 16. Promotion is done by sign or zero extending operands to 124 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and 125 /// truncating the result of 32 bit binary operation back to \p I's original 126 /// type. Division operation is not promoted. 127 /// 128 /// \returns True if \p I is promoted to equivalent 32 bit binary operation, 129 /// false otherwise. 130 bool promoteUniformOpToI32(BinaryOperator &I) const; 131 132 /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation. 133 /// 134 /// \details \p I's base element bit width must be greater than 1 and less 135 /// than or equal 16. Promotion is done by sign or zero extending operands to 136 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation. 137 /// 138 /// \returns True. 139 bool promoteUniformOpToI32(ICmpInst &I) const; 140 141 /// Promotes uniform 'select' operation \p I to 32 bit 'select' 142 /// operation. 143 /// 144 /// \details \p I's base element bit width must be greater than 1 and less 145 /// than or equal 16. Promotion is done by sign or zero extending operands to 146 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the 147 /// result of 32 bit 'select' operation back to \p I's original type. 148 /// 149 /// \returns True. 150 bool promoteUniformOpToI32(SelectInst &I) const; 151 152 /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse' 153 /// intrinsic. 154 /// 155 /// \details \p I's base element bit width must be greater than 1 and less 156 /// than or equal 16. Promotion is done by zero extending the operand to 32 157 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the 158 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the 159 /// shift amount is 32 minus \p I's base element bit width), and truncating 160 /// the result of the shift operation back to \p I's original type. 161 /// 162 /// \returns True. 163 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const; 164 165 166 unsigned numBitsUnsigned(Value *Op, unsigned ScalarSize) const; 167 unsigned numBitsSigned(Value *Op, unsigned ScalarSize) const; 168 bool isI24(Value *V, unsigned ScalarSize) const; 169 bool isU24(Value *V, unsigned ScalarSize) const; 170 171 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24. 172 /// SelectionDAG has an issue where an and asserting the bits are known 173 bool replaceMulWithMul24(BinaryOperator &I) const; 174 175 /// Perform same function as equivalently named function in DAGCombiner. Since 176 /// we expand some divisions here, we need to perform this before obscuring. 177 bool foldBinOpIntoSelect(BinaryOperator &I) const; 178 179 bool divHasSpecialOptimization(BinaryOperator &I, 180 Value *Num, Value *Den) const; 181 int getDivNumBits(BinaryOperator &I, 182 Value *Num, Value *Den, 183 unsigned AtLeast, bool Signed) const; 184 185 /// Expands 24 bit div or rem. 186 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, 187 Value *Num, Value *Den, 188 bool IsDiv, bool IsSigned) const; 189 190 Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I, 191 Value *Num, Value *Den, unsigned NumBits, 192 bool IsDiv, bool IsSigned) const; 193 194 /// Expands 32 bit div or rem. 195 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, 196 Value *Num, Value *Den) const; 197 198 Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I, 199 Value *Num, Value *Den) const; 200 void expandDivRem64(BinaryOperator &I) const; 201 202 /// Widen a scalar load. 203 /// 204 /// \details \p Widen scalar load for uniform, small type loads from constant 205 // memory / to a full 32-bits and then truncate the input to allow a scalar 206 // load instead of a vector load. 207 // 208 /// \returns True. 209 210 bool canWidenScalarExtLoad(LoadInst &I) const; 211 212 public: 213 static char ID; 214 215 AMDGPUCodeGenPrepare() : FunctionPass(ID) {} 216 217 bool visitFDiv(BinaryOperator &I); 218 219 bool visitInstruction(Instruction &I) { return false; } 220 bool visitBinaryOperator(BinaryOperator &I); 221 bool visitLoadInst(LoadInst &I); 222 bool visitICmpInst(ICmpInst &I); 223 bool visitSelectInst(SelectInst &I); 224 225 bool visitIntrinsicInst(IntrinsicInst &I); 226 bool visitBitreverseIntrinsicInst(IntrinsicInst &I); 227 228 bool doInitialization(Module &M) override; 229 bool runOnFunction(Function &F) override; 230 231 StringRef getPassName() const override { return "AMDGPU IR optimizations"; } 232 233 void getAnalysisUsage(AnalysisUsage &AU) const override { 234 AU.addRequired<AssumptionCacheTracker>(); 235 AU.addRequired<LegacyDivergenceAnalysis>(); 236 237 // FIXME: Division expansion needs to preserve the dominator tree. 238 if (!ExpandDiv64InIR) 239 AU.setPreservesAll(); 240 } 241 }; 242 243 } // end anonymous namespace 244 245 unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const { 246 assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 247 248 if (T->isIntegerTy()) 249 return T->getIntegerBitWidth(); 250 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth(); 251 } 252 253 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const { 254 assert(needsPromotionToI32(T) && "T does not need promotion to i32"); 255 256 if (T->isIntegerTy()) 257 return B.getInt32Ty(); 258 return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T)); 259 } 260 261 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { 262 return I.getOpcode() == Instruction::AShr || 263 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem; 264 } 265 266 bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const { 267 return isa<ICmpInst>(I.getOperand(0)) ? 268 cast<ICmpInst>(I.getOperand(0))->isSigned() : false; 269 } 270 271 bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const { 272 const IntegerType *IntTy = dyn_cast<IntegerType>(T); 273 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16) 274 return true; 275 276 if (const VectorType *VT = dyn_cast<VectorType>(T)) { 277 // TODO: The set of packed operations is more limited, so may want to 278 // promote some anyway. 279 if (ST->hasVOP3PInsts()) 280 return false; 281 282 return needsPromotionToI32(VT->getElementType()); 283 } 284 285 return false; 286 } 287 288 // Return true if the op promoted to i32 should have nsw set. 289 static bool promotedOpIsNSW(const Instruction &I) { 290 switch (I.getOpcode()) { 291 case Instruction::Shl: 292 case Instruction::Add: 293 case Instruction::Sub: 294 return true; 295 case Instruction::Mul: 296 return I.hasNoUnsignedWrap(); 297 default: 298 return false; 299 } 300 } 301 302 // Return true if the op promoted to i32 should have nuw set. 303 static bool promotedOpIsNUW(const Instruction &I) { 304 switch (I.getOpcode()) { 305 case Instruction::Shl: 306 case Instruction::Add: 307 case Instruction::Mul: 308 return true; 309 case Instruction::Sub: 310 return I.hasNoUnsignedWrap(); 311 default: 312 return false; 313 } 314 } 315 316 bool AMDGPUCodeGenPrepare::canWidenScalarExtLoad(LoadInst &I) const { 317 Type *Ty = I.getType(); 318 const DataLayout &DL = Mod->getDataLayout(); 319 int TySize = DL.getTypeSizeInBits(Ty); 320 Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty); 321 322 return I.isSimple() && TySize < 32 && Alignment >= 4 && DA->isUniform(&I); 323 } 324 325 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const { 326 assert(needsPromotionToI32(I.getType()) && 327 "I does not need promotion to i32"); 328 329 if (I.getOpcode() == Instruction::SDiv || 330 I.getOpcode() == Instruction::UDiv || 331 I.getOpcode() == Instruction::SRem || 332 I.getOpcode() == Instruction::URem) 333 return false; 334 335 IRBuilder<> Builder(&I); 336 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 337 338 Type *I32Ty = getI32Ty(Builder, I.getType()); 339 Value *ExtOp0 = nullptr; 340 Value *ExtOp1 = nullptr; 341 Value *ExtRes = nullptr; 342 Value *TruncRes = nullptr; 343 344 if (isSigned(I)) { 345 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 346 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 347 } else { 348 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 349 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 350 } 351 352 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1); 353 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) { 354 if (promotedOpIsNSW(cast<Instruction>(I))) 355 Inst->setHasNoSignedWrap(); 356 357 if (promotedOpIsNUW(cast<Instruction>(I))) 358 Inst->setHasNoUnsignedWrap(); 359 360 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 361 Inst->setIsExact(ExactOp->isExact()); 362 } 363 364 TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 365 366 I.replaceAllUsesWith(TruncRes); 367 I.eraseFromParent(); 368 369 return true; 370 } 371 372 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const { 373 assert(needsPromotionToI32(I.getOperand(0)->getType()) && 374 "I does not need promotion to i32"); 375 376 IRBuilder<> Builder(&I); 377 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 378 379 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType()); 380 Value *ExtOp0 = nullptr; 381 Value *ExtOp1 = nullptr; 382 Value *NewICmp = nullptr; 383 384 if (I.isSigned()) { 385 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty); 386 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 387 } else { 388 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty); 389 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 390 } 391 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1); 392 393 I.replaceAllUsesWith(NewICmp); 394 I.eraseFromParent(); 395 396 return true; 397 } 398 399 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const { 400 assert(needsPromotionToI32(I.getType()) && 401 "I does not need promotion to i32"); 402 403 IRBuilder<> Builder(&I); 404 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 405 406 Type *I32Ty = getI32Ty(Builder, I.getType()); 407 Value *ExtOp1 = nullptr; 408 Value *ExtOp2 = nullptr; 409 Value *ExtRes = nullptr; 410 Value *TruncRes = nullptr; 411 412 if (isSigned(I)) { 413 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty); 414 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty); 415 } else { 416 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty); 417 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty); 418 } 419 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2); 420 TruncRes = Builder.CreateTrunc(ExtRes, I.getType()); 421 422 I.replaceAllUsesWith(TruncRes); 423 I.eraseFromParent(); 424 425 return true; 426 } 427 428 bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32( 429 IntrinsicInst &I) const { 430 assert(I.getIntrinsicID() == Intrinsic::bitreverse && 431 "I must be bitreverse intrinsic"); 432 assert(needsPromotionToI32(I.getType()) && 433 "I does not need promotion to i32"); 434 435 IRBuilder<> Builder(&I); 436 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 437 438 Type *I32Ty = getI32Ty(Builder, I.getType()); 439 Function *I32 = 440 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty }); 441 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty); 442 Value *ExtRes = Builder.CreateCall(I32, { ExtOp }); 443 Value *LShrOp = 444 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType())); 445 Value *TruncRes = 446 Builder.CreateTrunc(LShrOp, I.getType()); 447 448 I.replaceAllUsesWith(TruncRes); 449 I.eraseFromParent(); 450 451 return true; 452 } 453 454 unsigned AMDGPUCodeGenPrepare::numBitsUnsigned(Value *Op, 455 unsigned ScalarSize) const { 456 KnownBits Known = computeKnownBits(Op, *DL, 0, AC); 457 return ScalarSize - Known.countMinLeadingZeros(); 458 } 459 460 unsigned AMDGPUCodeGenPrepare::numBitsSigned(Value *Op, 461 unsigned ScalarSize) const { 462 // In order for this to be a signed 24-bit value, bit 23, must 463 // be a sign bit. 464 return ScalarSize - ComputeNumSignBits(Op, *DL, 0, AC); 465 } 466 467 bool AMDGPUCodeGenPrepare::isI24(Value *V, unsigned ScalarSize) const { 468 return ScalarSize >= 24 && // Types less than 24-bit should be treated 469 // as unsigned 24-bit values. 470 numBitsSigned(V, ScalarSize) < 24; 471 } 472 473 bool AMDGPUCodeGenPrepare::isU24(Value *V, unsigned ScalarSize) const { 474 return numBitsUnsigned(V, ScalarSize) <= 24; 475 } 476 477 static void extractValues(IRBuilder<> &Builder, 478 SmallVectorImpl<Value *> &Values, Value *V) { 479 auto *VT = dyn_cast<FixedVectorType>(V->getType()); 480 if (!VT) { 481 Values.push_back(V); 482 return; 483 } 484 485 for (int I = 0, E = VT->getNumElements(); I != E; ++I) 486 Values.push_back(Builder.CreateExtractElement(V, I)); 487 } 488 489 static Value *insertValues(IRBuilder<> &Builder, 490 Type *Ty, 491 SmallVectorImpl<Value *> &Values) { 492 if (Values.size() == 1) 493 return Values[0]; 494 495 Value *NewVal = UndefValue::get(Ty); 496 for (int I = 0, E = Values.size(); I != E; ++I) 497 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I); 498 499 return NewVal; 500 } 501 502 bool AMDGPUCodeGenPrepare::replaceMulWithMul24(BinaryOperator &I) const { 503 if (I.getOpcode() != Instruction::Mul) 504 return false; 505 506 Type *Ty = I.getType(); 507 unsigned Size = Ty->getScalarSizeInBits(); 508 if (Size <= 16 && ST->has16BitInsts()) 509 return false; 510 511 // Prefer scalar if this could be s_mul_i32 512 if (DA->isUniform(&I)) 513 return false; 514 515 Value *LHS = I.getOperand(0); 516 Value *RHS = I.getOperand(1); 517 IRBuilder<> Builder(&I); 518 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 519 520 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 521 522 // TODO: Should this try to match mulhi24? 523 if (ST->hasMulU24() && isU24(LHS, Size) && isU24(RHS, Size)) { 524 IntrID = Intrinsic::amdgcn_mul_u24; 525 } else if (ST->hasMulI24() && isI24(LHS, Size) && isI24(RHS, Size)) { 526 IntrID = Intrinsic::amdgcn_mul_i24; 527 } else 528 return false; 529 530 SmallVector<Value *, 4> LHSVals; 531 SmallVector<Value *, 4> RHSVals; 532 SmallVector<Value *, 4> ResultVals; 533 extractValues(Builder, LHSVals, LHS); 534 extractValues(Builder, RHSVals, RHS); 535 536 537 IntegerType *I32Ty = Builder.getInt32Ty(); 538 FunctionCallee Intrin = Intrinsic::getDeclaration(Mod, IntrID); 539 for (int I = 0, E = LHSVals.size(); I != E; ++I) { 540 Value *LHS, *RHS; 541 if (IntrID == Intrinsic::amdgcn_mul_u24) { 542 LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty); 543 RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty); 544 } else { 545 LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty); 546 RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty); 547 } 548 549 Value *Result = Builder.CreateCall(Intrin, {LHS, RHS}); 550 551 if (IntrID == Intrinsic::amdgcn_mul_u24) { 552 ResultVals.push_back(Builder.CreateZExtOrTrunc(Result, 553 LHSVals[I]->getType())); 554 } else { 555 ResultVals.push_back(Builder.CreateSExtOrTrunc(Result, 556 LHSVals[I]->getType())); 557 } 558 } 559 560 Value *NewVal = insertValues(Builder, Ty, ResultVals); 561 NewVal->takeName(&I); 562 I.replaceAllUsesWith(NewVal); 563 I.eraseFromParent(); 564 565 return true; 566 } 567 568 // Find a select instruction, which may have been casted. This is mostly to deal 569 // with cases where i16 selects were promoted here to i32. 570 static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) { 571 Cast = nullptr; 572 if (SelectInst *Sel = dyn_cast<SelectInst>(V)) 573 return Sel; 574 575 if ((Cast = dyn_cast<CastInst>(V))) { 576 if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0))) 577 return Sel; 578 } 579 580 return nullptr; 581 } 582 583 bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const { 584 // Don't do this unless the old select is going away. We want to eliminate the 585 // binary operator, not replace a binop with a select. 586 int SelOpNo = 0; 587 588 CastInst *CastOp; 589 590 // TODO: Should probably try to handle some cases with multiple 591 // users. Duplicating the select may be profitable for division. 592 SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp); 593 if (!Sel || !Sel->hasOneUse()) { 594 SelOpNo = 1; 595 Sel = findSelectThroughCast(BO.getOperand(1), CastOp); 596 } 597 598 if (!Sel || !Sel->hasOneUse()) 599 return false; 600 601 Constant *CT = dyn_cast<Constant>(Sel->getTrueValue()); 602 Constant *CF = dyn_cast<Constant>(Sel->getFalseValue()); 603 Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1)); 604 if (!CBO || !CT || !CF) 605 return false; 606 607 if (CastOp) { 608 if (!CastOp->hasOneUse()) 609 return false; 610 CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL); 611 CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL); 612 } 613 614 // TODO: Handle special 0/-1 cases DAG combine does, although we only really 615 // need to handle divisions here. 616 Constant *FoldedT = SelOpNo ? 617 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) : 618 ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL); 619 if (isa<ConstantExpr>(FoldedT)) 620 return false; 621 622 Constant *FoldedF = SelOpNo ? 623 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) : 624 ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL); 625 if (isa<ConstantExpr>(FoldedF)) 626 return false; 627 628 IRBuilder<> Builder(&BO); 629 Builder.SetCurrentDebugLocation(BO.getDebugLoc()); 630 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO)) 631 Builder.setFastMathFlags(FPOp->getFastMathFlags()); 632 633 Value *NewSelect = Builder.CreateSelect(Sel->getCondition(), 634 FoldedT, FoldedF); 635 NewSelect->takeName(&BO); 636 BO.replaceAllUsesWith(NewSelect); 637 BO.eraseFromParent(); 638 if (CastOp) 639 CastOp->eraseFromParent(); 640 Sel->eraseFromParent(); 641 return true; 642 } 643 644 // Optimize fdiv with rcp: 645 // 646 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 647 // allowed with unsafe-fp-math or afn. 648 // 649 // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 650 static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp, 651 bool RcpIsAccurate, IRBuilder<> &Builder, 652 Module *Mod) { 653 654 if (!AllowInaccurateRcp && !RcpIsAccurate) 655 return nullptr; 656 657 Type *Ty = Den->getType(); 658 if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) { 659 if (AllowInaccurateRcp || RcpIsAccurate) { 660 if (CLHS->isExactlyValue(1.0)) { 661 Function *Decl = Intrinsic::getDeclaration( 662 Mod, Intrinsic::amdgcn_rcp, Ty); 663 664 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 665 // the CI documentation has a worst case error of 1 ulp. 666 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 667 // use it as long as we aren't trying to use denormals. 668 // 669 // v_rcp_f16 and v_rsq_f16 DO support denormals. 670 671 // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't 672 // insert rsq intrinsic here. 673 674 // 1.0 / x -> rcp(x) 675 return Builder.CreateCall(Decl, { Den }); 676 } 677 678 // Same as for 1.0, but expand the sign out of the constant. 679 if (CLHS->isExactlyValue(-1.0)) { 680 Function *Decl = Intrinsic::getDeclaration( 681 Mod, Intrinsic::amdgcn_rcp, Ty); 682 683 // -1.0 / x -> rcp (fneg x) 684 Value *FNeg = Builder.CreateFNeg(Den); 685 return Builder.CreateCall(Decl, { FNeg }); 686 } 687 } 688 } 689 690 if (AllowInaccurateRcp) { 691 Function *Decl = Intrinsic::getDeclaration( 692 Mod, Intrinsic::amdgcn_rcp, Ty); 693 694 // Turn into multiply by the reciprocal. 695 // x / y -> x * (1.0 / y) 696 Value *Recip = Builder.CreateCall(Decl, { Den }); 697 return Builder.CreateFMul(Num, Recip); 698 } 699 return nullptr; 700 } 701 702 // optimize with fdiv.fast: 703 // 704 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 705 // 706 // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 707 // 708 // NOTE: optimizeWithRcp should be tried first because rcp is the preference. 709 static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy, 710 bool HasDenormals, IRBuilder<> &Builder, 711 Module *Mod) { 712 // fdiv.fast can achieve 2.5 ULP accuracy. 713 if (ReqdAccuracy < 2.5f) 714 return nullptr; 715 716 // Only have fdiv.fast for f32. 717 Type *Ty = Den->getType(); 718 if (!Ty->isFloatTy()) 719 return nullptr; 720 721 bool NumIsOne = false; 722 if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) { 723 if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0)) 724 NumIsOne = true; 725 } 726 727 // fdiv does not support denormals. But 1.0/x is always fine to use it. 728 if (HasDenormals && !NumIsOne) 729 return nullptr; 730 731 Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast); 732 return Builder.CreateCall(Decl, { Num, Den }); 733 } 734 735 // Optimizations is performed based on fpmath, fast math flags as well as 736 // denormals to optimize fdiv with either rcp or fdiv.fast. 737 // 738 // With rcp: 739 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is 740 // allowed with unsafe-fp-math or afn. 741 // 742 // a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn. 743 // 744 // With fdiv.fast: 745 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed. 746 // 747 // 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp. 748 // 749 // NOTE: rcp is the preference in cases that both are legal. 750 bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { 751 752 Type *Ty = FDiv.getType()->getScalarType(); 753 754 // No intrinsic for fdiv16 if target does not support f16. 755 if (Ty->isHalfTy() && !ST->has16BitInsts()) 756 return false; 757 758 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv); 759 const float ReqdAccuracy = FPOp->getFPAccuracy(); 760 761 // Inaccurate rcp is allowed with unsafe-fp-math or afn. 762 FastMathFlags FMF = FPOp->getFastMathFlags(); 763 const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc(); 764 765 // rcp_f16 is accurate for !fpmath >= 1.0ulp. 766 // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed. 767 // rcp_f64 is never accurate. 768 const bool RcpIsAccurate = (Ty->isHalfTy() && ReqdAccuracy >= 1.0f) || 769 (Ty->isFloatTy() && !HasFP32Denormals && ReqdAccuracy >= 1.0f); 770 771 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator())); 772 Builder.setFastMathFlags(FMF); 773 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc()); 774 775 Value *Num = FDiv.getOperand(0); 776 Value *Den = FDiv.getOperand(1); 777 778 Value *NewFDiv = nullptr; 779 if (auto *VT = dyn_cast<FixedVectorType>(FDiv.getType())) { 780 NewFDiv = UndefValue::get(VT); 781 782 // FIXME: Doesn't do the right thing for cases where the vector is partially 783 // constant. This works when the scalarizer pass is run first. 784 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) { 785 Value *NumEltI = Builder.CreateExtractElement(Num, I); 786 Value *DenEltI = Builder.CreateExtractElement(Den, I); 787 // Try rcp first. 788 Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp, 789 RcpIsAccurate, Builder, Mod); 790 if (!NewElt) // Try fdiv.fast. 791 NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy, 792 HasFP32Denormals, Builder, Mod); 793 if (!NewElt) // Keep the original. 794 NewElt = Builder.CreateFDiv(NumEltI, DenEltI); 795 796 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I); 797 } 798 } else { // Scalar FDiv. 799 // Try rcp first. 800 NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate, 801 Builder, Mod); 802 if (!NewFDiv) { // Try fdiv.fast. 803 NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy, HasFP32Denormals, 804 Builder, Mod); 805 } 806 } 807 808 if (NewFDiv) { 809 FDiv.replaceAllUsesWith(NewFDiv); 810 NewFDiv->takeName(&FDiv); 811 FDiv.eraseFromParent(); 812 } 813 814 return !!NewFDiv; 815 } 816 817 static bool hasUnsafeFPMath(const Function &F) { 818 Attribute Attr = F.getFnAttribute("unsafe-fp-math"); 819 return Attr.getValueAsString() == "true"; 820 } 821 822 static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder, 823 Value *LHS, Value *RHS) { 824 Type *I32Ty = Builder.getInt32Ty(); 825 Type *I64Ty = Builder.getInt64Ty(); 826 827 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty); 828 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty); 829 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64); 830 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty); 831 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32)); 832 Hi = Builder.CreateTrunc(Hi, I32Ty); 833 return std::make_pair(Lo, Hi); 834 } 835 836 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) { 837 return getMul64(Builder, LHS, RHS).second; 838 } 839 840 /// Figure out how many bits are really needed for this ddivision. \p AtLeast is 841 /// an optimization hint to bypass the second ComputeNumSignBits call if we the 842 /// first one is insufficient. Returns -1 on failure. 843 int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I, 844 Value *Num, Value *Den, 845 unsigned AtLeast, bool IsSigned) const { 846 const DataLayout &DL = Mod->getDataLayout(); 847 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I); 848 if (LHSSignBits < AtLeast) 849 return -1; 850 851 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I); 852 if (RHSSignBits < AtLeast) 853 return -1; 854 855 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 856 unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits; 857 if (IsSigned) 858 ++DivBits; 859 return DivBits; 860 } 861 862 // The fractional part of a float is enough to accurately represent up to 863 // a 24-bit signed integer. 864 Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder, 865 BinaryOperator &I, 866 Value *Num, Value *Den, 867 bool IsDiv, bool IsSigned) const { 868 int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned); 869 if (DivBits == -1) 870 return nullptr; 871 return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned); 872 } 873 874 Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder, 875 BinaryOperator &I, 876 Value *Num, Value *Den, 877 unsigned DivBits, 878 bool IsDiv, bool IsSigned) const { 879 Type *I32Ty = Builder.getInt32Ty(); 880 Num = Builder.CreateTrunc(Num, I32Ty); 881 Den = Builder.CreateTrunc(Den, I32Ty); 882 883 Type *F32Ty = Builder.getFloatTy(); 884 ConstantInt *One = Builder.getInt32(1); 885 Value *JQ = One; 886 887 if (IsSigned) { 888 // char|short jq = ia ^ ib; 889 JQ = Builder.CreateXor(Num, Den); 890 891 // jq = jq >> (bitsize - 2) 892 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30)); 893 894 // jq = jq | 0x1 895 JQ = Builder.CreateOr(JQ, One); 896 } 897 898 // int ia = (int)LHS; 899 Value *IA = Num; 900 901 // int ib, (int)RHS; 902 Value *IB = Den; 903 904 // float fa = (float)ia; 905 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty) 906 : Builder.CreateUIToFP(IA, F32Ty); 907 908 // float fb = (float)ib; 909 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty) 910 : Builder.CreateUIToFP(IB,F32Ty); 911 912 Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, 913 Builder.getFloatTy()); 914 Value *RCP = Builder.CreateCall(RcpDecl, { FB }); 915 Value *FQM = Builder.CreateFMul(FA, RCP); 916 917 // fq = trunc(fqm); 918 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM); 919 FQ->copyFastMathFlags(Builder.getFastMathFlags()); 920 921 // float fqneg = -fq; 922 Value *FQNeg = Builder.CreateFNeg(FQ); 923 924 // float fr = mad(fqneg, fb, fa); 925 auto FMAD = !ST->hasMadMacF32Insts() 926 ? Intrinsic::fma 927 : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz; 928 Value *FR = Builder.CreateIntrinsic(FMAD, 929 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ); 930 931 // int iq = (int)fq; 932 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty) 933 : Builder.CreateFPToUI(FQ, I32Ty); 934 935 // fr = fabs(fr); 936 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ); 937 938 // fb = fabs(fb); 939 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ); 940 941 // int cv = fr >= fb; 942 Value *CV = Builder.CreateFCmpOGE(FR, FB); 943 944 // jq = (cv ? jq : 0); 945 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0)); 946 947 // dst = iq + jq; 948 Value *Div = Builder.CreateAdd(IQ, JQ); 949 950 Value *Res = Div; 951 if (!IsDiv) { 952 // Rem needs compensation, it's easier to recompute it 953 Value *Rem = Builder.CreateMul(Div, Den); 954 Res = Builder.CreateSub(Num, Rem); 955 } 956 957 if (DivBits != 0 && DivBits < 32) { 958 // Extend in register from the number of bits this divide really is. 959 if (IsSigned) { 960 int InRegBits = 32 - DivBits; 961 962 Res = Builder.CreateShl(Res, InRegBits); 963 Res = Builder.CreateAShr(Res, InRegBits); 964 } else { 965 ConstantInt *TruncMask 966 = Builder.getInt32((UINT64_C(1) << DivBits) - 1); 967 Res = Builder.CreateAnd(Res, TruncMask); 968 } 969 } 970 971 return Res; 972 } 973 974 // Try to recognize special cases the DAG will emit special, better expansions 975 // than the general expansion we do here. 976 977 // TODO: It would be better to just directly handle those optimizations here. 978 bool AMDGPUCodeGenPrepare::divHasSpecialOptimization( 979 BinaryOperator &I, Value *Num, Value *Den) const { 980 if (Constant *C = dyn_cast<Constant>(Den)) { 981 // Arbitrary constants get a better expansion as long as a wider mulhi is 982 // legal. 983 if (C->getType()->getScalarSizeInBits() <= 32) 984 return true; 985 986 // TODO: Sdiv check for not exact for some reason. 987 988 // If there's no wider mulhi, there's only a better expansion for powers of 989 // two. 990 // TODO: Should really know for each vector element. 991 if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT)) 992 return true; 993 994 return false; 995 } 996 997 if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) { 998 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 999 if (BinOpDen->getOpcode() == Instruction::Shl && 1000 isa<Constant>(BinOpDen->getOperand(0)) && 1001 isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true, 1002 0, AC, &I, DT)) { 1003 return true; 1004 } 1005 } 1006 1007 return false; 1008 } 1009 1010 static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) { 1011 // Check whether the sign can be determined statically. 1012 KnownBits Known = computeKnownBits(V, *DL); 1013 if (Known.isNegative()) 1014 return Constant::getAllOnesValue(V->getType()); 1015 if (Known.isNonNegative()) 1016 return Constant::getNullValue(V->getType()); 1017 return Builder.CreateAShr(V, Builder.getInt32(31)); 1018 } 1019 1020 Value *AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder, 1021 BinaryOperator &I, Value *X, 1022 Value *Y) const { 1023 Instruction::BinaryOps Opc = I.getOpcode(); 1024 assert(Opc == Instruction::URem || Opc == Instruction::UDiv || 1025 Opc == Instruction::SRem || Opc == Instruction::SDiv); 1026 1027 FastMathFlags FMF; 1028 FMF.setFast(); 1029 Builder.setFastMathFlags(FMF); 1030 1031 if (divHasSpecialOptimization(I, X, Y)) 1032 return nullptr; // Keep it for later optimization. 1033 1034 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv; 1035 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv; 1036 1037 Type *Ty = X->getType(); 1038 Type *I32Ty = Builder.getInt32Ty(); 1039 Type *F32Ty = Builder.getFloatTy(); 1040 1041 if (Ty->getScalarSizeInBits() < 32) { 1042 if (IsSigned) { 1043 X = Builder.CreateSExt(X, I32Ty); 1044 Y = Builder.CreateSExt(Y, I32Ty); 1045 } else { 1046 X = Builder.CreateZExt(X, I32Ty); 1047 Y = Builder.CreateZExt(Y, I32Ty); 1048 } 1049 } 1050 1051 if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) { 1052 return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) : 1053 Builder.CreateZExtOrTrunc(Res, Ty); 1054 } 1055 1056 ConstantInt *Zero = Builder.getInt32(0); 1057 ConstantInt *One = Builder.getInt32(1); 1058 1059 Value *Sign = nullptr; 1060 if (IsSigned) { 1061 Value *SignX = getSign32(X, Builder, DL); 1062 Value *SignY = getSign32(Y, Builder, DL); 1063 // Remainder sign is the same as LHS 1064 Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX; 1065 1066 X = Builder.CreateAdd(X, SignX); 1067 Y = Builder.CreateAdd(Y, SignY); 1068 1069 X = Builder.CreateXor(X, SignX); 1070 Y = Builder.CreateXor(Y, SignY); 1071 } 1072 1073 // The algorithm here is based on ideas from "Software Integer Division", Tom 1074 // Rodeheffer, August 2008. 1075 // 1076 // unsigned udiv(unsigned x, unsigned y) { 1077 // // Initial estimate of inv(y). The constant is less than 2^32 to ensure 1078 // // that this is a lower bound on inv(y), even if some of the calculations 1079 // // round up. 1080 // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y)); 1081 // 1082 // // One round of UNR (Unsigned integer Newton-Raphson) to improve z. 1083 // // Empirically this is guaranteed to give a "two-y" lower bound on 1084 // // inv(y). 1085 // z += umulh(z, -y * z); 1086 // 1087 // // Quotient/remainder estimate. 1088 // unsigned q = umulh(x, z); 1089 // unsigned r = x - q * y; 1090 // 1091 // // Two rounds of quotient/remainder refinement. 1092 // if (r >= y) { 1093 // ++q; 1094 // r -= y; 1095 // } 1096 // if (r >= y) { 1097 // ++q; 1098 // r -= y; 1099 // } 1100 // 1101 // return q; 1102 // } 1103 1104 // Initial estimate of inv(y). 1105 Value *FloatY = Builder.CreateUIToFP(Y, F32Ty); 1106 Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty); 1107 Value *RcpY = Builder.CreateCall(Rcp, {FloatY}); 1108 Constant *Scale = ConstantFP::get(F32Ty, BitsToFloat(0x4F7FFFFE)); 1109 Value *ScaledY = Builder.CreateFMul(RcpY, Scale); 1110 Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty); 1111 1112 // One round of UNR. 1113 Value *NegY = Builder.CreateSub(Zero, Y); 1114 Value *NegYZ = Builder.CreateMul(NegY, Z); 1115 Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ)); 1116 1117 // Quotient/remainder estimate. 1118 Value *Q = getMulHu(Builder, X, Z); 1119 Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y)); 1120 1121 // First quotient/remainder refinement. 1122 Value *Cond = Builder.CreateICmpUGE(R, Y); 1123 if (IsDiv) 1124 Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q); 1125 R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R); 1126 1127 // Second quotient/remainder refinement. 1128 Cond = Builder.CreateICmpUGE(R, Y); 1129 Value *Res; 1130 if (IsDiv) 1131 Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q); 1132 else 1133 Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R); 1134 1135 if (IsSigned) { 1136 Res = Builder.CreateXor(Res, Sign); 1137 Res = Builder.CreateSub(Res, Sign); 1138 } 1139 1140 Res = Builder.CreateTrunc(Res, Ty); 1141 1142 return Res; 1143 } 1144 1145 Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder, 1146 BinaryOperator &I, 1147 Value *Num, Value *Den) const { 1148 if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den)) 1149 return nullptr; // Keep it for later optimization. 1150 1151 Instruction::BinaryOps Opc = I.getOpcode(); 1152 1153 bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv; 1154 bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem; 1155 1156 int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned); 1157 if (NumDivBits == -1) 1158 return nullptr; 1159 1160 Value *Narrowed = nullptr; 1161 if (NumDivBits <= 24) { 1162 Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits, 1163 IsDiv, IsSigned); 1164 } else if (NumDivBits <= 32) { 1165 Narrowed = expandDivRem32(Builder, I, Num, Den); 1166 } 1167 1168 if (Narrowed) { 1169 return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) : 1170 Builder.CreateZExt(Narrowed, Num->getType()); 1171 } 1172 1173 return nullptr; 1174 } 1175 1176 void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const { 1177 Instruction::BinaryOps Opc = I.getOpcode(); 1178 // Do the general expansion. 1179 if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) { 1180 expandDivisionUpTo64Bits(&I); 1181 return; 1182 } 1183 1184 if (Opc == Instruction::URem || Opc == Instruction::SRem) { 1185 expandRemainderUpTo64Bits(&I); 1186 return; 1187 } 1188 1189 llvm_unreachable("not a division"); 1190 } 1191 1192 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) { 1193 if (foldBinOpIntoSelect(I)) 1194 return true; 1195 1196 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1197 DA->isUniform(&I) && promoteUniformOpToI32(I)) 1198 return true; 1199 1200 if (UseMul24Intrin && replaceMulWithMul24(I)) 1201 return true; 1202 1203 bool Changed = false; 1204 Instruction::BinaryOps Opc = I.getOpcode(); 1205 Type *Ty = I.getType(); 1206 Value *NewDiv = nullptr; 1207 unsigned ScalarSize = Ty->getScalarSizeInBits(); 1208 1209 SmallVector<BinaryOperator *, 8> Div64ToExpand; 1210 1211 if ((Opc == Instruction::URem || Opc == Instruction::UDiv || 1212 Opc == Instruction::SRem || Opc == Instruction::SDiv) && 1213 ScalarSize <= 64 && 1214 !DisableIDivExpand) { 1215 Value *Num = I.getOperand(0); 1216 Value *Den = I.getOperand(1); 1217 IRBuilder<> Builder(&I); 1218 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 1219 1220 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { 1221 NewDiv = UndefValue::get(VT); 1222 1223 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) { 1224 Value *NumEltN = Builder.CreateExtractElement(Num, N); 1225 Value *DenEltN = Builder.CreateExtractElement(Den, N); 1226 1227 Value *NewElt; 1228 if (ScalarSize <= 32) { 1229 NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN); 1230 if (!NewElt) 1231 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 1232 } else { 1233 // See if this 64-bit division can be shrunk to 32/24-bits before 1234 // producing the general expansion. 1235 NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN); 1236 if (!NewElt) { 1237 // The general 64-bit expansion introduces control flow and doesn't 1238 // return the new value. Just insert a scalar copy and defer 1239 // expanding it. 1240 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN); 1241 Div64ToExpand.push_back(cast<BinaryOperator>(NewElt)); 1242 } 1243 } 1244 1245 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N); 1246 } 1247 } else { 1248 if (ScalarSize <= 32) 1249 NewDiv = expandDivRem32(Builder, I, Num, Den); 1250 else { 1251 NewDiv = shrinkDivRem64(Builder, I, Num, Den); 1252 if (!NewDiv) 1253 Div64ToExpand.push_back(&I); 1254 } 1255 } 1256 1257 if (NewDiv) { 1258 I.replaceAllUsesWith(NewDiv); 1259 I.eraseFromParent(); 1260 Changed = true; 1261 } 1262 } 1263 1264 if (ExpandDiv64InIR) { 1265 // TODO: We get much worse code in specially handled constant cases. 1266 for (BinaryOperator *Div : Div64ToExpand) { 1267 expandDivRem64(*Div); 1268 Changed = true; 1269 } 1270 } 1271 1272 return Changed; 1273 } 1274 1275 bool AMDGPUCodeGenPrepare::visitLoadInst(LoadInst &I) { 1276 if (!WidenLoads) 1277 return false; 1278 1279 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || 1280 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && 1281 canWidenScalarExtLoad(I)) { 1282 IRBuilder<> Builder(&I); 1283 Builder.SetCurrentDebugLocation(I.getDebugLoc()); 1284 1285 Type *I32Ty = Builder.getInt32Ty(); 1286 Type *PT = PointerType::get(I32Ty, I.getPointerAddressSpace()); 1287 Value *BitCast= Builder.CreateBitCast(I.getPointerOperand(), PT); 1288 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, BitCast); 1289 WidenLoad->copyMetadata(I); 1290 1291 // If we have range metadata, we need to convert the type, and not make 1292 // assumptions about the high bits. 1293 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) { 1294 ConstantInt *Lower = 1295 mdconst::extract<ConstantInt>(Range->getOperand(0)); 1296 1297 if (Lower->getValue().isNullValue()) { 1298 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr); 1299 } else { 1300 Metadata *LowAndHigh[] = { 1301 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))), 1302 // Don't make assumptions about the high bits. 1303 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0)) 1304 }; 1305 1306 WidenLoad->setMetadata(LLVMContext::MD_range, 1307 MDNode::get(Mod->getContext(), LowAndHigh)); 1308 } 1309 } 1310 1311 int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType()); 1312 Type *IntNTy = Builder.getIntNTy(TySize); 1313 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); 1314 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); 1315 I.replaceAllUsesWith(ValOrig); 1316 I.eraseFromParent(); 1317 return true; 1318 } 1319 1320 return false; 1321 } 1322 1323 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) { 1324 bool Changed = false; 1325 1326 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) && 1327 DA->isUniform(&I)) 1328 Changed |= promoteUniformOpToI32(I); 1329 1330 return Changed; 1331 } 1332 1333 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) { 1334 bool Changed = false; 1335 1336 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1337 DA->isUniform(&I)) 1338 Changed |= promoteUniformOpToI32(I); 1339 1340 return Changed; 1341 } 1342 1343 bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) { 1344 switch (I.getIntrinsicID()) { 1345 case Intrinsic::bitreverse: 1346 return visitBitreverseIntrinsicInst(I); 1347 default: 1348 return false; 1349 } 1350 } 1351 1352 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) { 1353 bool Changed = false; 1354 1355 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) && 1356 DA->isUniform(&I)) 1357 Changed |= promoteUniformBitreverseToI32(I); 1358 1359 return Changed; 1360 } 1361 1362 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) { 1363 Mod = &M; 1364 DL = &Mod->getDataLayout(); 1365 return false; 1366 } 1367 1368 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) { 1369 if (skipFunction(F)) 1370 return false; 1371 1372 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 1373 if (!TPC) 1374 return false; 1375 1376 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>(); 1377 ST = &TM.getSubtarget<GCNSubtarget>(F); 1378 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1379 DA = &getAnalysis<LegacyDivergenceAnalysis>(); 1380 1381 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1382 DT = DTWP ? &DTWP->getDomTree() : nullptr; 1383 1384 HasUnsafeFPMath = hasUnsafeFPMath(F); 1385 1386 AMDGPU::SIModeRegisterDefaults Mode(F); 1387 HasFP32Denormals = Mode.allFP32Denormals(); 1388 1389 bool MadeChange = false; 1390 1391 Function::iterator NextBB; 1392 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) { 1393 BasicBlock *BB = &*FI; 1394 NextBB = std::next(FI); 1395 1396 BasicBlock::iterator Next; 1397 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) { 1398 Next = std::next(I); 1399 1400 MadeChange |= visit(*I); 1401 1402 if (Next != E) { // Control flow changed 1403 BasicBlock *NextInstBB = Next->getParent(); 1404 if (NextInstBB != BB) { 1405 BB = NextInstBB; 1406 E = BB->end(); 1407 FE = F.end(); 1408 } 1409 } 1410 } 1411 } 1412 1413 return MadeChange; 1414 } 1415 1416 INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE, 1417 "AMDGPU IR optimizations", false, false) 1418 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1419 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 1420 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations", 1421 false, false) 1422 1423 char AMDGPUCodeGenPrepare::ID = 0; 1424 1425 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() { 1426 return new AMDGPUCodeGenPrepare(); 1427 } 1428