1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/BranchProbabilityInfo.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 26 #include "llvm/CodeGen/LowLevelType.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/StackProtector.h" 36 #include "llvm/CodeGen/SwitchLoweringUtils.h" 37 #include "llvm/CodeGen/TargetFrameLowering.h" 38 #include "llvm/CodeGen/TargetInstrInfo.h" 39 #include "llvm/CodeGen/TargetLowering.h" 40 #include "llvm/CodeGen/TargetPassConfig.h" 41 #include "llvm/CodeGen/TargetRegisterInfo.h" 42 #include "llvm/CodeGen/TargetSubtargetInfo.h" 43 #include "llvm/IR/BasicBlock.h" 44 #include "llvm/IR/CFG.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfo.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/IntrinsicInst.h" 56 #include "llvm/IR/Intrinsics.h" 57 #include "llvm/IR/LLVMContext.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/MC/MCContext.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CodeGen.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/LowLevelTypeImpl.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Target/TargetIntrinsicInfo.h" 74 #include "llvm/Target/TargetMachine.h" 75 #include <algorithm> 76 #include <cassert> 77 #include <cstddef> 78 #include <cstdint> 79 #include <iterator> 80 #include <string> 81 #include <utility> 82 #include <vector> 83 84 #define DEBUG_TYPE "irtranslator" 85 86 using namespace llvm; 87 88 static cl::opt<bool> 89 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 90 cl::desc("Should enable CSE in irtranslator"), 91 cl::Optional, cl::init(false)); 92 char IRTranslator::ID = 0; 93 94 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 95 false, false) 96 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 97 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 98 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 99 INITIALIZE_PASS_DEPENDENCY(StackProtector) 100 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 101 false, false) 102 103 static void reportTranslationError(MachineFunction &MF, 104 const TargetPassConfig &TPC, 105 OptimizationRemarkEmitter &ORE, 106 OptimizationRemarkMissed &R) { 107 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 108 109 // Print the function name explicitly if we don't have a debug location (which 110 // makes the diagnostic less useful) or if we're going to emit a raw error. 111 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 112 R << (" (in function: " + MF.getName() + ")").str(); 113 114 if (TPC.isGlobalISelAbortEnabled()) 115 report_fatal_error(R.getMsg()); 116 else 117 ORE.emit(R); 118 } 119 120 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel) 121 : MachineFunctionPass(ID), OptLevel(optlevel) {} 122 123 #ifndef NDEBUG 124 namespace { 125 /// Verify that every instruction created has the same DILocation as the 126 /// instruction being translated. 127 class DILocationVerifier : public GISelChangeObserver { 128 const Instruction *CurrInst = nullptr; 129 130 public: 131 DILocationVerifier() = default; 132 ~DILocationVerifier() = default; 133 134 const Instruction *getCurrentInst() const { return CurrInst; } 135 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 136 137 void erasingInstr(MachineInstr &MI) override {} 138 void changingInstr(MachineInstr &MI) override {} 139 void changedInstr(MachineInstr &MI) override {} 140 141 void createdInstr(MachineInstr &MI) override { 142 assert(getCurrentInst() && "Inserted instruction without a current MI"); 143 144 // Only print the check message if we're actually checking it. 145 #ifndef NDEBUG 146 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 147 << " was copied to " << MI); 148 #endif 149 // We allow insts in the entry block to have a debug loc line of 0 because 150 // they could have originated from constants, and we don't want a jumpy 151 // debug experience. 152 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || 153 MI.getDebugLoc().getLine() == 0) && 154 "Line info was not transferred to all instructions"); 155 } 156 }; 157 } // namespace 158 #endif // ifndef NDEBUG 159 160 161 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 162 AU.addRequired<StackProtector>(); 163 AU.addRequired<TargetPassConfig>(); 164 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 165 if (OptLevel != CodeGenOpt::None) 166 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 167 getSelectionDAGFallbackAnalysisUsage(AU); 168 MachineFunctionPass::getAnalysisUsage(AU); 169 } 170 171 IRTranslator::ValueToVRegInfo::VRegListT & 172 IRTranslator::allocateVRegs(const Value &Val) { 173 auto VRegsIt = VMap.findVRegs(Val); 174 if (VRegsIt != VMap.vregs_end()) 175 return *VRegsIt->second; 176 auto *Regs = VMap.getVRegs(Val); 177 auto *Offsets = VMap.getOffsets(Val); 178 SmallVector<LLT, 4> SplitTys; 179 computeValueLLTs(*DL, *Val.getType(), SplitTys, 180 Offsets->empty() ? Offsets : nullptr); 181 for (unsigned i = 0; i < SplitTys.size(); ++i) 182 Regs->push_back(0); 183 return *Regs; 184 } 185 186 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { 187 auto VRegsIt = VMap.findVRegs(Val); 188 if (VRegsIt != VMap.vregs_end()) 189 return *VRegsIt->second; 190 191 if (Val.getType()->isVoidTy()) 192 return *VMap.getVRegs(Val); 193 194 // Create entry for this type. 195 auto *VRegs = VMap.getVRegs(Val); 196 auto *Offsets = VMap.getOffsets(Val); 197 198 assert(Val.getType()->isSized() && 199 "Don't know how to create an empty vreg"); 200 201 SmallVector<LLT, 4> SplitTys; 202 computeValueLLTs(*DL, *Val.getType(), SplitTys, 203 Offsets->empty() ? Offsets : nullptr); 204 205 if (!isa<Constant>(Val)) { 206 for (auto Ty : SplitTys) 207 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 208 return *VRegs; 209 } 210 211 if (Val.getType()->isAggregateType()) { 212 // UndefValue, ConstantAggregateZero 213 auto &C = cast<Constant>(Val); 214 unsigned Idx = 0; 215 while (auto Elt = C.getAggregateElement(Idx++)) { 216 auto EltRegs = getOrCreateVRegs(*Elt); 217 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 218 } 219 } else { 220 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 221 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 222 bool Success = translate(cast<Constant>(Val), VRegs->front()); 223 if (!Success) { 224 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 225 MF->getFunction().getSubprogram(), 226 &MF->getFunction().getEntryBlock()); 227 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 228 reportTranslationError(*MF, *TPC, *ORE, R); 229 return *VRegs; 230 } 231 } 232 233 return *VRegs; 234 } 235 236 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 237 auto MapEntry = FrameIndices.find(&AI); 238 if (MapEntry != FrameIndices.end()) 239 return MapEntry->second; 240 241 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); 242 uint64_t Size = 243 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 244 245 // Always allocate at least one byte. 246 Size = std::max<uint64_t>(Size, 1u); 247 248 int &FI = FrameIndices[&AI]; 249 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI); 250 return FI; 251 } 252 253 Align IRTranslator::getMemOpAlign(const Instruction &I) { 254 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) 255 return SI->getAlign(); 256 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 257 return LI->getAlign(); 258 } 259 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 260 // TODO(PR27168): This instruction has no alignment attribute, but unlike 261 // the default alignment for load/store, the default here is to assume 262 // it has NATURAL alignment, not DataLayout-specified alignment. 263 const DataLayout &DL = AI->getModule()->getDataLayout(); 264 return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); 265 } 266 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 267 // TODO(PR27168): This instruction has no alignment attribute, but unlike 268 // the default alignment for load/store, the default here is to assume 269 // it has NATURAL alignment, not DataLayout-specified alignment. 270 const DataLayout &DL = AI->getModule()->getDataLayout(); 271 return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); 272 } 273 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 274 R << "unable to translate memop: " << ore::NV("Opcode", &I); 275 reportTranslationError(*MF, *TPC, *ORE, R); 276 return Align(1); 277 } 278 279 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 280 MachineBasicBlock *&MBB = BBToMBB[&BB]; 281 assert(MBB && "BasicBlock was not encountered before"); 282 return *MBB; 283 } 284 285 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 286 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 287 MachinePreds[Edge].push_back(NewPred); 288 } 289 290 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 291 MachineIRBuilder &MIRBuilder) { 292 // Get or create a virtual register for each value. 293 // Unless the value is a Constant => loadimm cst? 294 // or inline constant each time? 295 // Creation of a virtual register needs to have a size. 296 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 297 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 298 Register Res = getOrCreateVReg(U); 299 uint16_t Flags = 0; 300 if (isa<Instruction>(U)) { 301 const Instruction &I = cast<Instruction>(U); 302 Flags = MachineInstr::copyFlagsFromInstruction(I); 303 } 304 305 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 306 return true; 307 } 308 309 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U, 310 MachineIRBuilder &MIRBuilder) { 311 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 312 Register Res = getOrCreateVReg(U); 313 uint16_t Flags = 0; 314 if (isa<Instruction>(U)) { 315 const Instruction &I = cast<Instruction>(U); 316 Flags = MachineInstr::copyFlagsFromInstruction(I); 317 } 318 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags); 319 return true; 320 } 321 322 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 323 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder); 324 } 325 326 bool IRTranslator::translateCompare(const User &U, 327 MachineIRBuilder &MIRBuilder) { 328 auto *CI = dyn_cast<CmpInst>(&U); 329 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 330 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 331 Register Res = getOrCreateVReg(U); 332 CmpInst::Predicate Pred = 333 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 334 cast<ConstantExpr>(U).getPredicate()); 335 if (CmpInst::isIntPredicate(Pred)) 336 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 337 else if (Pred == CmpInst::FCMP_FALSE) 338 MIRBuilder.buildCopy( 339 Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); 340 else if (Pred == CmpInst::FCMP_TRUE) 341 MIRBuilder.buildCopy( 342 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); 343 else { 344 assert(CI && "Instruction should be CmpInst"); 345 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, 346 MachineInstr::copyFlagsFromInstruction(*CI)); 347 } 348 349 return true; 350 } 351 352 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 353 const ReturnInst &RI = cast<ReturnInst>(U); 354 const Value *Ret = RI.getReturnValue(); 355 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 356 Ret = nullptr; 357 358 ArrayRef<Register> VRegs; 359 if (Ret) 360 VRegs = getOrCreateVRegs(*Ret); 361 362 Register SwiftErrorVReg = 0; 363 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { 364 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( 365 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); 366 } 367 368 // The target may mess up with the insertion point, but 369 // this is not important as a return is the last instruction 370 // of the block anyway. 371 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg); 372 } 373 374 void IRTranslator::emitBranchForMergedCondition( 375 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 376 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 377 BranchProbability TProb, BranchProbability FProb, bool InvertCond) { 378 // If the leaf of the tree is a comparison, merge the condition into 379 // the caseblock. 380 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 381 CmpInst::Predicate Condition; 382 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 383 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 384 } else { 385 const FCmpInst *FC = cast<FCmpInst>(Cond); 386 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 387 } 388 389 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0), 390 BOp->getOperand(1), nullptr, TBB, FBB, CurBB, 391 CurBuilder->getDebugLoc(), TProb, FProb); 392 SL->SwitchCases.push_back(CB); 393 return; 394 } 395 396 // Create a CaseBlock record representing this branch. 397 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 398 SwitchCG::CaseBlock CB( 399 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()), 400 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb); 401 SL->SwitchCases.push_back(CB); 402 } 403 404 static bool isValInBlock(const Value *V, const BasicBlock *BB) { 405 if (const Instruction *I = dyn_cast<Instruction>(V)) 406 return I->getParent() == BB; 407 return true; 408 } 409 410 void IRTranslator::findMergedConditions( 411 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 412 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 413 Instruction::BinaryOps Opc, BranchProbability TProb, 414 BranchProbability FProb, bool InvertCond) { 415 using namespace PatternMatch; 416 assert((Opc == Instruction::And || Opc == Instruction::Or) && 417 "Expected Opc to be AND/OR"); 418 // Skip over not part of the tree and remember to invert op and operands at 419 // next level. 420 Value *NotCond; 421 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && 422 isValInBlock(NotCond, CurBB->getBasicBlock())) { 423 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 424 !InvertCond); 425 return; 426 } 427 428 const Instruction *BOp = dyn_cast<Instruction>(Cond); 429 const Value *BOpOp0, *BOpOp1; 430 // Compute the effective opcode for Cond, taking into account whether it needs 431 // to be inverted, e.g. 432 // and (not (or A, B)), C 433 // gets lowered as 434 // and (and (not A, not B), C) 435 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0; 436 if (BOp) { 437 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1))) 438 ? Instruction::And 439 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1))) 440 ? Instruction::Or 441 : (Instruction::BinaryOps)0); 442 if (InvertCond) { 443 if (BOpc == Instruction::And) 444 BOpc = Instruction::Or; 445 else if (BOpc == Instruction::Or) 446 BOpc = Instruction::And; 447 } 448 } 449 450 // If this node is not part of the or/and tree, emit it as a branch. 451 // Note that all nodes in the tree should have same opcode. 452 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); 453 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || 454 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) || 455 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) { 456 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb, 457 InvertCond); 458 return; 459 } 460 461 // Create TmpBB after CurBB. 462 MachineFunction::iterator BBI(CurBB); 463 MachineBasicBlock *TmpBB = 464 MF->CreateMachineBasicBlock(CurBB->getBasicBlock()); 465 CurBB->getParent()->insert(++BBI, TmpBB); 466 467 if (Opc == Instruction::Or) { 468 // Codegen X | Y as: 469 // BB1: 470 // jmp_if_X TBB 471 // jmp TmpBB 472 // TmpBB: 473 // jmp_if_Y TBB 474 // jmp FBB 475 // 476 477 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 478 // The requirement is that 479 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 480 // = TrueProb for original BB. 481 // Assuming the original probabilities are A and B, one choice is to set 482 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 483 // A/(1+B) and 2B/(1+B). This choice assumes that 484 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 485 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 486 // TmpBB, but the math is more complicated. 487 488 auto NewTrueProb = TProb / 2; 489 auto NewFalseProb = TProb / 2 + FProb; 490 // Emit the LHS condition. 491 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb, 492 NewFalseProb, InvertCond); 493 494 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 495 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 496 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 497 // Emit the RHS condition into TmpBB. 498 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 499 Probs[1], InvertCond); 500 } else { 501 assert(Opc == Instruction::And && "Unknown merge op!"); 502 // Codegen X & Y as: 503 // BB1: 504 // jmp_if_X TmpBB 505 // jmp FBB 506 // TmpBB: 507 // jmp_if_Y TBB 508 // jmp FBB 509 // 510 // This requires creation of TmpBB after CurBB. 511 512 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 513 // The requirement is that 514 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 515 // = FalseProb for original BB. 516 // Assuming the original probabilities are A and B, one choice is to set 517 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 518 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 519 // TrueProb for BB1 * FalseProb for TmpBB. 520 521 auto NewTrueProb = TProb + FProb / 2; 522 auto NewFalseProb = FProb / 2; 523 // Emit the LHS condition. 524 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb, 525 NewFalseProb, InvertCond); 526 527 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 528 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 529 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 530 // Emit the RHS condition into TmpBB. 531 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 532 Probs[1], InvertCond); 533 } 534 } 535 536 bool IRTranslator::shouldEmitAsBranches( 537 const std::vector<SwitchCG::CaseBlock> &Cases) { 538 // For multiple cases, it's better to emit as branches. 539 if (Cases.size() != 2) 540 return true; 541 542 // If this is two comparisons of the same values or'd or and'd together, they 543 // will get folded into a single comparison, so don't emit two blocks. 544 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 545 Cases[0].CmpRHS == Cases[1].CmpRHS) || 546 (Cases[0].CmpRHS == Cases[1].CmpLHS && 547 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 548 return false; 549 } 550 551 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 552 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 553 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 554 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred && 555 isa<Constant>(Cases[0].CmpRHS) && 556 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 557 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ && 558 Cases[0].TrueBB == Cases[1].ThisBB) 559 return false; 560 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE && 561 Cases[0].FalseBB == Cases[1].ThisBB) 562 return false; 563 } 564 565 return true; 566 } 567 568 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 569 const BranchInst &BrInst = cast<BranchInst>(U); 570 auto &CurMBB = MIRBuilder.getMBB(); 571 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0)); 572 573 if (BrInst.isUnconditional()) { 574 // If the unconditional target is the layout successor, fallthrough. 575 if (!CurMBB.isLayoutSuccessor(Succ0MBB)) 576 MIRBuilder.buildBr(*Succ0MBB); 577 578 // Link successors. 579 for (const BasicBlock *Succ : successors(&BrInst)) 580 CurMBB.addSuccessor(&getMBB(*Succ)); 581 return true; 582 } 583 584 // If this condition is one of the special cases we handle, do special stuff 585 // now. 586 const Value *CondVal = BrInst.getCondition(); 587 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1)); 588 589 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 590 591 // If this is a series of conditions that are or'd or and'd together, emit 592 // this as a sequence of branches instead of setcc's with and/or operations. 593 // As long as jumps are not expensive (exceptions for multi-use logic ops, 594 // unpredictable branches, and vector extracts because those jumps are likely 595 // expensive for any target), this should improve performance. 596 // For example, instead of something like: 597 // cmp A, B 598 // C = seteq 599 // cmp D, E 600 // F = setle 601 // or C, F 602 // jnz foo 603 // Emit: 604 // cmp A, B 605 // je foo 606 // cmp D, E 607 // jle foo 608 using namespace PatternMatch; 609 const Instruction *CondI = dyn_cast<Instruction>(CondVal); 610 if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() && 611 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) { 612 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0; 613 Value *Vec; 614 const Value *BOp0, *BOp1; 615 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1)))) 616 Opcode = Instruction::And; 617 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1)))) 618 Opcode = Instruction::Or; 619 620 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) && 621 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) { 622 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode, 623 getEdgeProbability(&CurMBB, Succ0MBB), 624 getEdgeProbability(&CurMBB, Succ1MBB), 625 /*InvertCond=*/false); 626 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!"); 627 628 // Allow some cases to be rejected. 629 if (shouldEmitAsBranches(SL->SwitchCases)) { 630 // Emit the branch for this block. 631 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder); 632 SL->SwitchCases.erase(SL->SwitchCases.begin()); 633 return true; 634 } 635 636 // Okay, we decided not to do this, remove any inserted MBB's and clear 637 // SwitchCases. 638 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I) 639 MF->erase(SL->SwitchCases[I].ThisBB); 640 641 SL->SwitchCases.clear(); 642 } 643 } 644 645 // Create a CaseBlock record representing this branch. 646 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal, 647 ConstantInt::getTrue(MF->getFunction().getContext()), 648 nullptr, Succ0MBB, Succ1MBB, &CurMBB, 649 CurBuilder->getDebugLoc()); 650 651 // Use emitSwitchCase to actually insert the fast branch sequence for this 652 // cond branch. 653 emitSwitchCase(CB, &CurMBB, *CurBuilder); 654 return true; 655 } 656 657 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, 658 MachineBasicBlock *Dst, 659 BranchProbability Prob) { 660 if (!FuncInfo.BPI) { 661 Src->addSuccessorWithoutProb(Dst); 662 return; 663 } 664 if (Prob.isUnknown()) 665 Prob = getEdgeProbability(Src, Dst); 666 Src->addSuccessor(Dst, Prob); 667 } 668 669 BranchProbability 670 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, 671 const MachineBasicBlock *Dst) const { 672 const BasicBlock *SrcBB = Src->getBasicBlock(); 673 const BasicBlock *DstBB = Dst->getBasicBlock(); 674 if (!FuncInfo.BPI) { 675 // If BPI is not available, set the default probability as 1 / N, where N is 676 // the number of successors. 677 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 678 return BranchProbability(1, SuccSize); 679 } 680 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); 681 } 682 683 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { 684 using namespace SwitchCG; 685 // Extract cases from the switch. 686 const SwitchInst &SI = cast<SwitchInst>(U); 687 BranchProbabilityInfo *BPI = FuncInfo.BPI; 688 CaseClusterVector Clusters; 689 Clusters.reserve(SI.getNumCases()); 690 for (auto &I : SI.cases()) { 691 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); 692 assert(Succ && "Could not find successor mbb in mapping"); 693 const ConstantInt *CaseVal = I.getCaseValue(); 694 BranchProbability Prob = 695 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 696 : BranchProbability(1, SI.getNumCases() + 1); 697 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 698 } 699 700 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); 701 702 // Cluster adjacent cases with the same destination. We do this at all 703 // optimization levels because it's cheap to do and will make codegen faster 704 // if there are many clusters. 705 sortAndRangeify(Clusters); 706 707 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); 708 709 // If there is only the default destination, jump there directly. 710 if (Clusters.empty()) { 711 SwitchMBB->addSuccessor(DefaultMBB); 712 if (DefaultMBB != SwitchMBB->getNextNode()) 713 MIB.buildBr(*DefaultMBB); 714 return true; 715 } 716 717 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); 718 SL->findBitTestClusters(Clusters, &SI); 719 720 LLVM_DEBUG({ 721 dbgs() << "Case clusters: "; 722 for (const CaseCluster &C : Clusters) { 723 if (C.Kind == CC_JumpTable) 724 dbgs() << "JT:"; 725 if (C.Kind == CC_BitTests) 726 dbgs() << "BT:"; 727 728 C.Low->getValue().print(dbgs(), true); 729 if (C.Low != C.High) { 730 dbgs() << '-'; 731 C.High->getValue().print(dbgs(), true); 732 } 733 dbgs() << ' '; 734 } 735 dbgs() << '\n'; 736 }); 737 738 assert(!Clusters.empty()); 739 SwitchWorkList WorkList; 740 CaseClusterIt First = Clusters.begin(); 741 CaseClusterIt Last = Clusters.end() - 1; 742 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 743 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 744 745 // FIXME: At the moment we don't do any splitting optimizations here like 746 // SelectionDAG does, so this worklist only has one entry. 747 while (!WorkList.empty()) { 748 SwitchWorkListItem W = WorkList.back(); 749 WorkList.pop_back(); 750 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) 751 return false; 752 } 753 return true; 754 } 755 756 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, 757 MachineBasicBlock *MBB) { 758 // Emit the code for the jump table 759 assert(JT.Reg != -1U && "Should lower JT Header first!"); 760 MachineIRBuilder MIB(*MBB->getParent()); 761 MIB.setMBB(*MBB); 762 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 763 764 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); 765 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 766 767 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); 768 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); 769 } 770 771 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, 772 SwitchCG::JumpTableHeader &JTH, 773 MachineBasicBlock *HeaderBB) { 774 MachineIRBuilder MIB(*HeaderBB->getParent()); 775 MIB.setMBB(*HeaderBB); 776 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 777 778 const Value &SValue = *JTH.SValue; 779 // Subtract the lowest switch case value from the value being switched on. 780 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); 781 Register SwitchOpReg = getOrCreateVReg(SValue); 782 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); 783 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); 784 785 // This value may be smaller or larger than the target's pointer type, and 786 // therefore require extension or truncating. 787 Type *PtrIRTy = SValue.getType()->getPointerTo(); 788 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); 789 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); 790 791 JT.Reg = Sub.getReg(0); 792 793 if (JTH.OmitRangeCheck) { 794 if (JT.MBB != HeaderBB->getNextNode()) 795 MIB.buildBr(*JT.MBB); 796 return true; 797 } 798 799 // Emit the range check for the jump table, and branch to the default block 800 // for the switch statement if the value being switched on exceeds the 801 // largest case in the switch. 802 auto Cst = getOrCreateVReg( 803 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); 804 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); 805 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); 806 807 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); 808 809 // Avoid emitting unnecessary branches to the next block. 810 if (JT.MBB != HeaderBB->getNextNode()) 811 BrCond = MIB.buildBr(*JT.MBB); 812 return true; 813 } 814 815 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, 816 MachineBasicBlock *SwitchBB, 817 MachineIRBuilder &MIB) { 818 Register CondLHS = getOrCreateVReg(*CB.CmpLHS); 819 Register Cond; 820 DebugLoc OldDbgLoc = MIB.getDebugLoc(); 821 MIB.setDebugLoc(CB.DbgLoc); 822 MIB.setMBB(*CB.ThisBB); 823 824 if (CB.PredInfo.NoCmp) { 825 // Branch or fall through to TrueBB. 826 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 827 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 828 CB.ThisBB); 829 CB.ThisBB->normalizeSuccProbs(); 830 if (CB.TrueBB != CB.ThisBB->getNextNode()) 831 MIB.buildBr(*CB.TrueBB); 832 MIB.setDebugLoc(OldDbgLoc); 833 return; 834 } 835 836 const LLT i1Ty = LLT::scalar(1); 837 // Build the compare. 838 if (!CB.CmpMHS) { 839 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS); 840 // For conditional branch lowering, we might try to do something silly like 841 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so, 842 // just re-use the existing condition vreg. 843 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && 844 CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) { 845 Cond = CondLHS; 846 } else { 847 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 848 if (CmpInst::isFPPredicate(CB.PredInfo.Pred)) 849 Cond = 850 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 851 else 852 Cond = 853 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 854 } 855 } else { 856 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && 857 "Can only handle SLE ranges"); 858 859 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 860 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 861 862 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); 863 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 864 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 865 Cond = 866 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); 867 } else { 868 const LLT CmpTy = MRI->getType(CmpOpReg); 869 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); 870 auto Diff = MIB.buildConstant(CmpTy, High - Low); 871 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); 872 } 873 } 874 875 // Update successor info 876 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 877 878 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 879 CB.ThisBB); 880 881 // TrueBB and FalseBB are always different unless the incoming IR is 882 // degenerate. This only happens when running llc on weird IR. 883 if (CB.TrueBB != CB.FalseBB) 884 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); 885 CB.ThisBB->normalizeSuccProbs(); 886 887 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, 888 CB.ThisBB); 889 890 MIB.buildBrCond(Cond, *CB.TrueBB); 891 MIB.buildBr(*CB.FalseBB); 892 MIB.setDebugLoc(OldDbgLoc); 893 } 894 895 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, 896 MachineBasicBlock *SwitchMBB, 897 MachineBasicBlock *CurMBB, 898 MachineBasicBlock *DefaultMBB, 899 MachineIRBuilder &MIB, 900 MachineFunction::iterator BBI, 901 BranchProbability UnhandledProbs, 902 SwitchCG::CaseClusterIt I, 903 MachineBasicBlock *Fallthrough, 904 bool FallthroughUnreachable) { 905 using namespace SwitchCG; 906 MachineFunction *CurMF = SwitchMBB->getParent(); 907 // FIXME: Optimize away range check based on pivot comparisons. 908 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 909 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 910 BranchProbability DefaultProb = W.DefaultProb; 911 912 // The jump block hasn't been inserted yet; insert it here. 913 MachineBasicBlock *JumpMBB = JT->MBB; 914 CurMF->insert(BBI, JumpMBB); 915 916 // Since the jump table block is separate from the switch block, we need 917 // to keep track of it as a machine predecessor to the default block, 918 // otherwise we lose the phi edges. 919 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 920 CurMBB); 921 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 922 JumpMBB); 923 924 auto JumpProb = I->Prob; 925 auto FallthroughProb = UnhandledProbs; 926 927 // If the default statement is a target of the jump table, we evenly 928 // distribute the default probability to successors of CurMBB. Also 929 // update the probability on the edge from JumpMBB to Fallthrough. 930 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 931 SE = JumpMBB->succ_end(); 932 SI != SE; ++SI) { 933 if (*SI == DefaultMBB) { 934 JumpProb += DefaultProb / 2; 935 FallthroughProb -= DefaultProb / 2; 936 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 937 JumpMBB->normalizeSuccProbs(); 938 } else { 939 // Also record edges from the jump table block to it's successors. 940 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, 941 JumpMBB); 942 } 943 } 944 945 // Skip the range check if the fallthrough block is unreachable. 946 if (FallthroughUnreachable) 947 JTH->OmitRangeCheck = true; 948 949 if (!JTH->OmitRangeCheck) 950 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 951 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 952 CurMBB->normalizeSuccProbs(); 953 954 // The jump table header will be inserted in our current block, do the 955 // range check, and fall through to our fallthrough block. 956 JTH->HeaderBB = CurMBB; 957 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 958 959 // If we're in the right place, emit the jump table header right now. 960 if (CurMBB == SwitchMBB) { 961 if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) 962 return false; 963 JTH->Emitted = true; 964 } 965 return true; 966 } 967 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, 968 Value *Cond, 969 MachineBasicBlock *Fallthrough, 970 bool FallthroughUnreachable, 971 BranchProbability UnhandledProbs, 972 MachineBasicBlock *CurMBB, 973 MachineIRBuilder &MIB, 974 MachineBasicBlock *SwitchMBB) { 975 using namespace SwitchCG; 976 const Value *RHS, *LHS, *MHS; 977 CmpInst::Predicate Pred; 978 if (I->Low == I->High) { 979 // Check Cond == I->Low. 980 Pred = CmpInst::ICMP_EQ; 981 LHS = Cond; 982 RHS = I->Low; 983 MHS = nullptr; 984 } else { 985 // Check I->Low <= Cond <= I->High. 986 Pred = CmpInst::ICMP_SLE; 987 LHS = I->Low; 988 MHS = Cond; 989 RHS = I->High; 990 } 991 992 // If Fallthrough is unreachable, fold away the comparison. 993 // The false probability is the sum of all unhandled cases. 994 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, 995 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); 996 997 emitSwitchCase(CB, SwitchMBB, MIB); 998 return true; 999 } 1000 1001 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, 1002 MachineBasicBlock *SwitchBB) { 1003 MachineIRBuilder &MIB = *CurBuilder; 1004 MIB.setMBB(*SwitchBB); 1005 1006 // Subtract the minimum value. 1007 Register SwitchOpReg = getOrCreateVReg(*B.SValue); 1008 1009 LLT SwitchOpTy = MRI->getType(SwitchOpReg); 1010 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0); 1011 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg); 1012 1013 // Ensure that the type will fit the mask value. 1014 LLT MaskTy = SwitchOpTy; 1015 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { 1016 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { 1017 // Switch table case range are encoded into series of masks. 1018 // Just use pointer type, it's guaranteed to fit. 1019 MaskTy = LLT::scalar(64); 1020 break; 1021 } 1022 } 1023 Register SubReg = RangeSub.getReg(0); 1024 if (SwitchOpTy != MaskTy) 1025 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0); 1026 1027 B.RegVT = getMVTForLLT(MaskTy); 1028 B.Reg = SubReg; 1029 1030 MachineBasicBlock *MBB = B.Cases[0].ThisBB; 1031 1032 if (!B.OmitRangeCheck) 1033 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 1034 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 1035 1036 SwitchBB->normalizeSuccProbs(); 1037 1038 if (!B.OmitRangeCheck) { 1039 // Conditional branch to the default block. 1040 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range); 1041 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1), 1042 RangeSub, RangeCst); 1043 MIB.buildBrCond(RangeCmp, *B.Default); 1044 } 1045 1046 // Avoid emitting unnecessary branches to the next block. 1047 if (MBB != SwitchBB->getNextNode()) 1048 MIB.buildBr(*MBB); 1049 } 1050 1051 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB, 1052 MachineBasicBlock *NextMBB, 1053 BranchProbability BranchProbToNext, 1054 Register Reg, SwitchCG::BitTestCase &B, 1055 MachineBasicBlock *SwitchBB) { 1056 MachineIRBuilder &MIB = *CurBuilder; 1057 MIB.setMBB(*SwitchBB); 1058 1059 LLT SwitchTy = getLLTForMVT(BB.RegVT); 1060 Register Cmp; 1061 unsigned PopCount = countPopulation(B.Mask); 1062 if (PopCount == 1) { 1063 // Testing for a single bit; just compare the shift count with what it 1064 // would need to be to shift a 1 bit in that position. 1065 auto MaskTrailingZeros = 1066 MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask)); 1067 Cmp = 1068 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros) 1069 .getReg(0); 1070 } else if (PopCount == BB.Range) { 1071 // There is only one zero bit in the range, test for it directly. 1072 auto MaskTrailingOnes = 1073 MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask)); 1074 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes) 1075 .getReg(0); 1076 } else { 1077 // Make desired shift. 1078 auto CstOne = MIB.buildConstant(SwitchTy, 1); 1079 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg); 1080 1081 // Emit bit tests and jumps. 1082 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask); 1083 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask); 1084 auto CstZero = MIB.buildConstant(SwitchTy, 0); 1085 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero) 1086 .getReg(0); 1087 } 1088 1089 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 1090 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 1091 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 1092 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 1093 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 1094 // one as they are relative probabilities (and thus work more like weights), 1095 // and hence we need to normalize them to let the sum of them become one. 1096 SwitchBB->normalizeSuccProbs(); 1097 1098 // Record the fact that the IR edge from the header to the bit test target 1099 // will go through our new block. Neeeded for PHIs to have nodes added. 1100 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()}, 1101 SwitchBB); 1102 1103 MIB.buildBrCond(Cmp, *B.TargetBB); 1104 1105 // Avoid emitting unnecessary branches to the next block. 1106 if (NextMBB != SwitchBB->getNextNode()) 1107 MIB.buildBr(*NextMBB); 1108 } 1109 1110 bool IRTranslator::lowerBitTestWorkItem( 1111 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, 1112 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, 1113 MachineIRBuilder &MIB, MachineFunction::iterator BBI, 1114 BranchProbability DefaultProb, BranchProbability UnhandledProbs, 1115 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough, 1116 bool FallthroughUnreachable) { 1117 using namespace SwitchCG; 1118 MachineFunction *CurMF = SwitchMBB->getParent(); 1119 // FIXME: Optimize away range check based on pivot comparisons. 1120 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; 1121 // The bit test blocks haven't been inserted yet; insert them here. 1122 for (BitTestCase &BTC : BTB->Cases) 1123 CurMF->insert(BBI, BTC.ThisBB); 1124 1125 // Fill in fields of the BitTestBlock. 1126 BTB->Parent = CurMBB; 1127 BTB->Default = Fallthrough; 1128 1129 BTB->DefaultProb = UnhandledProbs; 1130 // If the cases in bit test don't form a contiguous range, we evenly 1131 // distribute the probability on the edge to Fallthrough to two 1132 // successors of CurMBB. 1133 if (!BTB->ContiguousRange) { 1134 BTB->Prob += DefaultProb / 2; 1135 BTB->DefaultProb -= DefaultProb / 2; 1136 } 1137 1138 if (FallthroughUnreachable) { 1139 // Skip the range check if the fallthrough block is unreachable. 1140 BTB->OmitRangeCheck = true; 1141 } 1142 1143 // If we're in the right place, emit the bit test header right now. 1144 if (CurMBB == SwitchMBB) { 1145 emitBitTestHeader(*BTB, SwitchMBB); 1146 BTB->Emitted = true; 1147 } 1148 return true; 1149 } 1150 1151 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, 1152 Value *Cond, 1153 MachineBasicBlock *SwitchMBB, 1154 MachineBasicBlock *DefaultMBB, 1155 MachineIRBuilder &MIB) { 1156 using namespace SwitchCG; 1157 MachineFunction *CurMF = FuncInfo.MF; 1158 MachineBasicBlock *NextMBB = nullptr; 1159 MachineFunction::iterator BBI(W.MBB); 1160 if (++BBI != FuncInfo.MF->end()) 1161 NextMBB = &*BBI; 1162 1163 if (EnableOpts) { 1164 // Here, we order cases by probability so the most likely case will be 1165 // checked first. However, two clusters can have the same probability in 1166 // which case their relative ordering is non-deterministic. So we use Low 1167 // as a tie-breaker as clusters are guaranteed to never overlap. 1168 llvm::sort(W.FirstCluster, W.LastCluster + 1, 1169 [](const CaseCluster &a, const CaseCluster &b) { 1170 return a.Prob != b.Prob 1171 ? a.Prob > b.Prob 1172 : a.Low->getValue().slt(b.Low->getValue()); 1173 }); 1174 1175 // Rearrange the case blocks so that the last one falls through if possible 1176 // without changing the order of probabilities. 1177 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { 1178 --I; 1179 if (I->Prob > W.LastCluster->Prob) 1180 break; 1181 if (I->Kind == CC_Range && I->MBB == NextMBB) { 1182 std::swap(*I, *W.LastCluster); 1183 break; 1184 } 1185 } 1186 } 1187 1188 // Compute total probability. 1189 BranchProbability DefaultProb = W.DefaultProb; 1190 BranchProbability UnhandledProbs = DefaultProb; 1191 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 1192 UnhandledProbs += I->Prob; 1193 1194 MachineBasicBlock *CurMBB = W.MBB; 1195 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 1196 bool FallthroughUnreachable = false; 1197 MachineBasicBlock *Fallthrough; 1198 if (I == W.LastCluster) { 1199 // For the last cluster, fall through to the default destination. 1200 Fallthrough = DefaultMBB; 1201 FallthroughUnreachable = isa<UnreachableInst>( 1202 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 1203 } else { 1204 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 1205 CurMF->insert(BBI, Fallthrough); 1206 } 1207 UnhandledProbs -= I->Prob; 1208 1209 switch (I->Kind) { 1210 case CC_BitTests: { 1211 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1212 DefaultProb, UnhandledProbs, I, Fallthrough, 1213 FallthroughUnreachable)) { 1214 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch"); 1215 return false; 1216 } 1217 break; 1218 } 1219 1220 case CC_JumpTable: { 1221 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1222 UnhandledProbs, I, Fallthrough, 1223 FallthroughUnreachable)) { 1224 LLVM_DEBUG(dbgs() << "Failed to lower jump table"); 1225 return false; 1226 } 1227 break; 1228 } 1229 case CC_Range: { 1230 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, 1231 FallthroughUnreachable, UnhandledProbs, 1232 CurMBB, MIB, SwitchMBB)) { 1233 LLVM_DEBUG(dbgs() << "Failed to lower switch range"); 1234 return false; 1235 } 1236 break; 1237 } 1238 } 1239 CurMBB = Fallthrough; 1240 } 1241 1242 return true; 1243 } 1244 1245 bool IRTranslator::translateIndirectBr(const User &U, 1246 MachineIRBuilder &MIRBuilder) { 1247 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 1248 1249 const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); 1250 MIRBuilder.buildBrIndirect(Tgt); 1251 1252 // Link successors. 1253 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; 1254 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 1255 for (const BasicBlock *Succ : successors(&BrInst)) { 1256 // It's legal for indirectbr instructions to have duplicate blocks in the 1257 // destination list. We don't allow this in MIR. Skip anything that's 1258 // already a successor. 1259 if (!AddedSuccessors.insert(Succ).second) 1260 continue; 1261 CurBB.addSuccessor(&getMBB(*Succ)); 1262 } 1263 1264 return true; 1265 } 1266 1267 static bool isSwiftError(const Value *V) { 1268 if (auto Arg = dyn_cast<Argument>(V)) 1269 return Arg->hasSwiftErrorAttr(); 1270 if (auto AI = dyn_cast<AllocaInst>(V)) 1271 return AI->isSwiftError(); 1272 return false; 1273 } 1274 1275 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 1276 const LoadInst &LI = cast<LoadInst>(U); 1277 if (DL->getTypeStoreSize(LI.getType()) == 0) 1278 return true; 1279 1280 ArrayRef<Register> Regs = getOrCreateVRegs(LI); 1281 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 1282 Register Base = getOrCreateVReg(*LI.getPointerOperand()); 1283 1284 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); 1285 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1286 1287 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { 1288 assert(Regs.size() == 1 && "swifterror should be single pointer"); 1289 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), 1290 LI.getPointerOperand()); 1291 MIRBuilder.buildCopy(Regs[0], VReg); 1292 return true; 1293 } 1294 1295 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1296 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); 1297 1298 const MDNode *Ranges = 1299 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; 1300 for (unsigned i = 0; i < Regs.size(); ++i) { 1301 Register Addr; 1302 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1303 1304 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 1305 Align BaseAlign = getMemOpAlign(LI); 1306 AAMDNodes AAMetadata; 1307 LI.getAAMetadata(AAMetadata); 1308 auto MMO = MF->getMachineMemOperand( 1309 Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), 1310 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, 1311 LI.getSyncScopeID(), LI.getOrdering()); 1312 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 1313 } 1314 1315 return true; 1316 } 1317 1318 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 1319 const StoreInst &SI = cast<StoreInst>(U); 1320 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 1321 return true; 1322 1323 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); 1324 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 1325 Register Base = getOrCreateVReg(*SI.getPointerOperand()); 1326 1327 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); 1328 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1329 1330 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { 1331 assert(Vals.size() == 1 && "swifterror should be single pointer"); 1332 1333 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), 1334 SI.getPointerOperand()); 1335 MIRBuilder.buildCopy(VReg, Vals[0]); 1336 return true; 1337 } 1338 1339 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1340 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); 1341 1342 for (unsigned i = 0; i < Vals.size(); ++i) { 1343 Register Addr; 1344 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1345 1346 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 1347 Align BaseAlign = getMemOpAlign(SI); 1348 AAMDNodes AAMetadata; 1349 SI.getAAMetadata(AAMetadata); 1350 auto MMO = MF->getMachineMemOperand( 1351 Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), 1352 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, 1353 SI.getSyncScopeID(), SI.getOrdering()); 1354 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 1355 } 1356 return true; 1357 } 1358 1359 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 1360 const Value *Src = U.getOperand(0); 1361 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 1362 1363 // getIndexedOffsetInType is designed for GEPs, so the first index is the 1364 // usual array element rather than looking into the actual aggregate. 1365 SmallVector<Value *, 1> Indices; 1366 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 1367 1368 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 1369 for (auto Idx : EVI->indices()) 1370 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1371 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 1372 for (auto Idx : IVI->indices()) 1373 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1374 } else { 1375 for (unsigned i = 1; i < U.getNumOperands(); ++i) 1376 Indices.push_back(U.getOperand(i)); 1377 } 1378 1379 return 8 * static_cast<uint64_t>( 1380 DL.getIndexedOffsetInType(Src->getType(), Indices)); 1381 } 1382 1383 bool IRTranslator::translateExtractValue(const User &U, 1384 MachineIRBuilder &MIRBuilder) { 1385 const Value *Src = U.getOperand(0); 1386 uint64_t Offset = getOffsetFromIndices(U, *DL); 1387 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1388 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 1389 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 1390 auto &DstRegs = allocateVRegs(U); 1391 1392 for (unsigned i = 0; i < DstRegs.size(); ++i) 1393 DstRegs[i] = SrcRegs[Idx++]; 1394 1395 return true; 1396 } 1397 1398 bool IRTranslator::translateInsertValue(const User &U, 1399 MachineIRBuilder &MIRBuilder) { 1400 const Value *Src = U.getOperand(0); 1401 uint64_t Offset = getOffsetFromIndices(U, *DL); 1402 auto &DstRegs = allocateVRegs(U); 1403 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 1404 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1405 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 1406 auto InsertedIt = InsertedRegs.begin(); 1407 1408 for (unsigned i = 0; i < DstRegs.size(); ++i) { 1409 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 1410 DstRegs[i] = *InsertedIt++; 1411 else 1412 DstRegs[i] = SrcRegs[i]; 1413 } 1414 1415 return true; 1416 } 1417 1418 bool IRTranslator::translateSelect(const User &U, 1419 MachineIRBuilder &MIRBuilder) { 1420 Register Tst = getOrCreateVReg(*U.getOperand(0)); 1421 ArrayRef<Register> ResRegs = getOrCreateVRegs(U); 1422 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 1423 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 1424 1425 uint16_t Flags = 0; 1426 if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) 1427 Flags = MachineInstr::copyFlagsFromInstruction(*SI); 1428 1429 for (unsigned i = 0; i < ResRegs.size(); ++i) { 1430 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); 1431 } 1432 1433 return true; 1434 } 1435 1436 bool IRTranslator::translateCopy(const User &U, const Value &V, 1437 MachineIRBuilder &MIRBuilder) { 1438 Register Src = getOrCreateVReg(V); 1439 auto &Regs = *VMap.getVRegs(U); 1440 if (Regs.empty()) { 1441 Regs.push_back(Src); 1442 VMap.getOffsets(U)->push_back(0); 1443 } else { 1444 // If we already assigned a vreg for this instruction, we can't change that. 1445 // Emit a copy to satisfy the users we already emitted. 1446 MIRBuilder.buildCopy(Regs[0], Src); 1447 } 1448 return true; 1449 } 1450 1451 bool IRTranslator::translateBitCast(const User &U, 1452 MachineIRBuilder &MIRBuilder) { 1453 // If we're bitcasting to the source type, we can reuse the source vreg. 1454 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 1455 getLLTForType(*U.getType(), *DL)) 1456 return translateCopy(U, *U.getOperand(0), MIRBuilder); 1457 1458 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 1459 } 1460 1461 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 1462 MachineIRBuilder &MIRBuilder) { 1463 Register Op = getOrCreateVReg(*U.getOperand(0)); 1464 Register Res = getOrCreateVReg(U); 1465 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 1466 return true; 1467 } 1468 1469 bool IRTranslator::translateGetElementPtr(const User &U, 1470 MachineIRBuilder &MIRBuilder) { 1471 Value &Op0 = *U.getOperand(0); 1472 Register BaseReg = getOrCreateVReg(Op0); 1473 Type *PtrIRTy = Op0.getType(); 1474 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 1475 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1476 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1477 1478 // Normalize Vector GEP - all scalar operands should be converted to the 1479 // splat vector. 1480 unsigned VectorWidth = 0; 1481 if (auto *VT = dyn_cast<VectorType>(U.getType())) 1482 VectorWidth = cast<FixedVectorType>(VT)->getNumElements(); 1483 1484 // We might need to splat the base pointer into a vector if the offsets 1485 // are vectors. 1486 if (VectorWidth && !PtrTy.isVector()) { 1487 BaseReg = 1488 MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) 1489 .getReg(0); 1490 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); 1491 PtrTy = getLLTForType(*PtrIRTy, *DL); 1492 OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1493 OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1494 } 1495 1496 int64_t Offset = 0; 1497 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 1498 GTI != E; ++GTI) { 1499 const Value *Idx = GTI.getOperand(); 1500 if (StructType *StTy = GTI.getStructTypeOrNull()) { 1501 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 1502 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 1503 continue; 1504 } else { 1505 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 1506 1507 // If this is a scalar constant or a splat vector of constants, 1508 // handle it quickly. 1509 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 1510 Offset += ElementSize * CI->getSExtValue(); 1511 continue; 1512 } 1513 1514 if (Offset != 0) { 1515 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 1516 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) 1517 .getReg(0); 1518 Offset = 0; 1519 } 1520 1521 Register IdxReg = getOrCreateVReg(*Idx); 1522 LLT IdxTy = MRI->getType(IdxReg); 1523 if (IdxTy != OffsetTy) { 1524 if (!IdxTy.isVector() && VectorWidth) { 1525 IdxReg = MIRBuilder.buildSplatVector( 1526 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); 1527 } 1528 1529 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); 1530 } 1531 1532 // N = N + Idx * ElementSize; 1533 // Avoid doing it for ElementSize of 1. 1534 Register GepOffsetReg; 1535 if (ElementSize != 1) { 1536 auto ElementSizeMIB = MIRBuilder.buildConstant( 1537 getLLTForType(*OffsetIRTy, *DL), ElementSize); 1538 GepOffsetReg = 1539 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); 1540 } else 1541 GepOffsetReg = IdxReg; 1542 1543 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); 1544 } 1545 } 1546 1547 if (Offset != 0) { 1548 auto OffsetMIB = 1549 MIRBuilder.buildConstant(OffsetTy, Offset); 1550 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 1551 return true; 1552 } 1553 1554 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 1555 return true; 1556 } 1557 1558 bool IRTranslator::translateMemFunc(const CallInst &CI, 1559 MachineIRBuilder &MIRBuilder, 1560 unsigned Opcode) { 1561 1562 // If the source is undef, then just emit a nop. 1563 if (isa<UndefValue>(CI.getArgOperand(1))) 1564 return true; 1565 1566 SmallVector<Register, 3> SrcRegs; 1567 1568 unsigned MinPtrSize = UINT_MAX; 1569 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) { 1570 Register SrcReg = getOrCreateVReg(**AI); 1571 LLT SrcTy = MRI->getType(SrcReg); 1572 if (SrcTy.isPointer()) 1573 MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize); 1574 SrcRegs.push_back(SrcReg); 1575 } 1576 1577 LLT SizeTy = LLT::scalar(MinPtrSize); 1578 1579 // The size operand should be the minimum of the pointer sizes. 1580 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1]; 1581 if (MRI->getType(SizeOpReg) != SizeTy) 1582 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0); 1583 1584 auto ICall = MIRBuilder.buildInstr(Opcode); 1585 for (Register SrcReg : SrcRegs) 1586 ICall.addUse(SrcReg); 1587 1588 Align DstAlign; 1589 Align SrcAlign; 1590 unsigned IsVol = 1591 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) 1592 ->getZExtValue(); 1593 1594 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { 1595 DstAlign = MCI->getDestAlign().valueOrOne(); 1596 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1597 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { 1598 DstAlign = MMI->getDestAlign().valueOrOne(); 1599 SrcAlign = MMI->getSourceAlign().valueOrOne(); 1600 } else { 1601 auto *MSI = cast<MemSetInst>(&CI); 1602 DstAlign = MSI->getDestAlign().valueOrOne(); 1603 } 1604 1605 // We need to propagate the tail call flag from the IR inst as an argument. 1606 // Otherwise, we have to pessimize and assume later that we cannot tail call 1607 // any memory intrinsics. 1608 ICall.addImm(CI.isTailCall() ? 1 : 0); 1609 1610 // Create mem operands to store the alignment and volatile info. 1611 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 1612 ICall.addMemOperand(MF->getMachineMemOperand( 1613 MachinePointerInfo(CI.getArgOperand(0)), 1614 MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); 1615 if (Opcode != TargetOpcode::G_MEMSET) 1616 ICall.addMemOperand(MF->getMachineMemOperand( 1617 MachinePointerInfo(CI.getArgOperand(1)), 1618 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); 1619 1620 return true; 1621 } 1622 1623 void IRTranslator::getStackGuard(Register DstReg, 1624 MachineIRBuilder &MIRBuilder) { 1625 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1626 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 1627 auto MIB = 1628 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); 1629 1630 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1631 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 1632 if (!Global) 1633 return; 1634 1635 MachinePointerInfo MPInfo(Global); 1636 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 1637 MachineMemOperand::MODereferenceable; 1638 MachineMemOperand *MemRef = 1639 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 1640 DL->getPointerABIAlignment(0)); 1641 MIB.setMemRefs({MemRef}); 1642 } 1643 1644 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 1645 MachineIRBuilder &MIRBuilder) { 1646 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); 1647 MIRBuilder.buildInstr( 1648 Op, {ResRegs[0], ResRegs[1]}, 1649 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); 1650 1651 return true; 1652 } 1653 1654 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, 1655 MachineIRBuilder &MIRBuilder) { 1656 Register Dst = getOrCreateVReg(CI); 1657 Register Src0 = getOrCreateVReg(*CI.getOperand(0)); 1658 Register Src1 = getOrCreateVReg(*CI.getOperand(1)); 1659 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue(); 1660 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale }); 1661 return true; 1662 } 1663 1664 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 1665 switch (ID) { 1666 default: 1667 break; 1668 case Intrinsic::bswap: 1669 return TargetOpcode::G_BSWAP; 1670 case Intrinsic::bitreverse: 1671 return TargetOpcode::G_BITREVERSE; 1672 case Intrinsic::fshl: 1673 return TargetOpcode::G_FSHL; 1674 case Intrinsic::fshr: 1675 return TargetOpcode::G_FSHR; 1676 case Intrinsic::ceil: 1677 return TargetOpcode::G_FCEIL; 1678 case Intrinsic::cos: 1679 return TargetOpcode::G_FCOS; 1680 case Intrinsic::ctpop: 1681 return TargetOpcode::G_CTPOP; 1682 case Intrinsic::exp: 1683 return TargetOpcode::G_FEXP; 1684 case Intrinsic::exp2: 1685 return TargetOpcode::G_FEXP2; 1686 case Intrinsic::fabs: 1687 return TargetOpcode::G_FABS; 1688 case Intrinsic::copysign: 1689 return TargetOpcode::G_FCOPYSIGN; 1690 case Intrinsic::minnum: 1691 return TargetOpcode::G_FMINNUM; 1692 case Intrinsic::maxnum: 1693 return TargetOpcode::G_FMAXNUM; 1694 case Intrinsic::minimum: 1695 return TargetOpcode::G_FMINIMUM; 1696 case Intrinsic::maximum: 1697 return TargetOpcode::G_FMAXIMUM; 1698 case Intrinsic::canonicalize: 1699 return TargetOpcode::G_FCANONICALIZE; 1700 case Intrinsic::floor: 1701 return TargetOpcode::G_FFLOOR; 1702 case Intrinsic::fma: 1703 return TargetOpcode::G_FMA; 1704 case Intrinsic::log: 1705 return TargetOpcode::G_FLOG; 1706 case Intrinsic::log2: 1707 return TargetOpcode::G_FLOG2; 1708 case Intrinsic::log10: 1709 return TargetOpcode::G_FLOG10; 1710 case Intrinsic::nearbyint: 1711 return TargetOpcode::G_FNEARBYINT; 1712 case Intrinsic::pow: 1713 return TargetOpcode::G_FPOW; 1714 case Intrinsic::powi: 1715 return TargetOpcode::G_FPOWI; 1716 case Intrinsic::rint: 1717 return TargetOpcode::G_FRINT; 1718 case Intrinsic::round: 1719 return TargetOpcode::G_INTRINSIC_ROUND; 1720 case Intrinsic::roundeven: 1721 return TargetOpcode::G_INTRINSIC_ROUNDEVEN; 1722 case Intrinsic::sin: 1723 return TargetOpcode::G_FSIN; 1724 case Intrinsic::sqrt: 1725 return TargetOpcode::G_FSQRT; 1726 case Intrinsic::trunc: 1727 return TargetOpcode::G_INTRINSIC_TRUNC; 1728 case Intrinsic::readcyclecounter: 1729 return TargetOpcode::G_READCYCLECOUNTER; 1730 case Intrinsic::ptrmask: 1731 return TargetOpcode::G_PTRMASK; 1732 case Intrinsic::lrint: 1733 return TargetOpcode::G_INTRINSIC_LRINT; 1734 // FADD/FMUL require checking the FMF, so are handled elsewhere. 1735 case Intrinsic::vector_reduce_fmin: 1736 return TargetOpcode::G_VECREDUCE_FMIN; 1737 case Intrinsic::vector_reduce_fmax: 1738 return TargetOpcode::G_VECREDUCE_FMAX; 1739 case Intrinsic::vector_reduce_add: 1740 return TargetOpcode::G_VECREDUCE_ADD; 1741 case Intrinsic::vector_reduce_mul: 1742 return TargetOpcode::G_VECREDUCE_MUL; 1743 case Intrinsic::vector_reduce_and: 1744 return TargetOpcode::G_VECREDUCE_AND; 1745 case Intrinsic::vector_reduce_or: 1746 return TargetOpcode::G_VECREDUCE_OR; 1747 case Intrinsic::vector_reduce_xor: 1748 return TargetOpcode::G_VECREDUCE_XOR; 1749 case Intrinsic::vector_reduce_smax: 1750 return TargetOpcode::G_VECREDUCE_SMAX; 1751 case Intrinsic::vector_reduce_smin: 1752 return TargetOpcode::G_VECREDUCE_SMIN; 1753 case Intrinsic::vector_reduce_umax: 1754 return TargetOpcode::G_VECREDUCE_UMAX; 1755 case Intrinsic::vector_reduce_umin: 1756 return TargetOpcode::G_VECREDUCE_UMIN; 1757 } 1758 return Intrinsic::not_intrinsic; 1759 } 1760 1761 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 1762 Intrinsic::ID ID, 1763 MachineIRBuilder &MIRBuilder) { 1764 1765 unsigned Op = getSimpleIntrinsicOpcode(ID); 1766 1767 // Is this a simple intrinsic? 1768 if (Op == Intrinsic::not_intrinsic) 1769 return false; 1770 1771 // Yes. Let's translate it. 1772 SmallVector<llvm::SrcOp, 4> VRegs; 1773 for (auto &Arg : CI.arg_operands()) 1774 VRegs.push_back(getOrCreateVReg(*Arg)); 1775 1776 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 1777 MachineInstr::copyFlagsFromInstruction(CI)); 1778 return true; 1779 } 1780 1781 // TODO: Include ConstainedOps.def when all strict instructions are defined. 1782 static unsigned getConstrainedOpcode(Intrinsic::ID ID) { 1783 switch (ID) { 1784 case Intrinsic::experimental_constrained_fadd: 1785 return TargetOpcode::G_STRICT_FADD; 1786 case Intrinsic::experimental_constrained_fsub: 1787 return TargetOpcode::G_STRICT_FSUB; 1788 case Intrinsic::experimental_constrained_fmul: 1789 return TargetOpcode::G_STRICT_FMUL; 1790 case Intrinsic::experimental_constrained_fdiv: 1791 return TargetOpcode::G_STRICT_FDIV; 1792 case Intrinsic::experimental_constrained_frem: 1793 return TargetOpcode::G_STRICT_FREM; 1794 case Intrinsic::experimental_constrained_fma: 1795 return TargetOpcode::G_STRICT_FMA; 1796 case Intrinsic::experimental_constrained_sqrt: 1797 return TargetOpcode::G_STRICT_FSQRT; 1798 default: 1799 return 0; 1800 } 1801 } 1802 1803 bool IRTranslator::translateConstrainedFPIntrinsic( 1804 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { 1805 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); 1806 1807 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID()); 1808 if (!Opcode) 1809 return false; 1810 1811 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI); 1812 if (EB == fp::ExceptionBehavior::ebIgnore) 1813 Flags |= MachineInstr::NoFPExcept; 1814 1815 SmallVector<llvm::SrcOp, 4> VRegs; 1816 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0))); 1817 if (!FPI.isUnaryOp()) 1818 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1))); 1819 if (FPI.isTernaryOp()) 1820 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2))); 1821 1822 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags); 1823 return true; 1824 } 1825 1826 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 1827 MachineIRBuilder &MIRBuilder) { 1828 1829 // If this is a simple intrinsic (that is, we just need to add a def of 1830 // a vreg, and uses for each arg operand, then translate it. 1831 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 1832 return true; 1833 1834 switch (ID) { 1835 default: 1836 break; 1837 case Intrinsic::lifetime_start: 1838 case Intrinsic::lifetime_end: { 1839 // No stack colouring in O0, discard region information. 1840 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 1841 return true; 1842 1843 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 1844 : TargetOpcode::LIFETIME_END; 1845 1846 // Get the underlying objects for the location passed on the lifetime 1847 // marker. 1848 SmallVector<const Value *, 4> Allocas; 1849 getUnderlyingObjects(CI.getArgOperand(1), Allocas); 1850 1851 // Iterate over each underlying object, creating lifetime markers for each 1852 // static alloca. Quit if we find a non-static alloca. 1853 for (const Value *V : Allocas) { 1854 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 1855 if (!AI) 1856 continue; 1857 1858 if (!AI->isStaticAlloca()) 1859 return true; 1860 1861 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 1862 } 1863 return true; 1864 } 1865 case Intrinsic::dbg_declare: { 1866 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 1867 assert(DI.getVariable() && "Missing variable"); 1868 1869 const Value *Address = DI.getAddress(); 1870 if (!Address || isa<UndefValue>(Address)) { 1871 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 1872 return true; 1873 } 1874 1875 assert(DI.getVariable()->isValidLocationForIntrinsic( 1876 MIRBuilder.getDebugLoc()) && 1877 "Expected inlined-at fields to agree"); 1878 auto AI = dyn_cast<AllocaInst>(Address); 1879 if (AI && AI->isStaticAlloca()) { 1880 // Static allocas are tracked at the MF level, no need for DBG_VALUE 1881 // instructions (in fact, they get ignored if they *do* exist). 1882 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 1883 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 1884 } else { 1885 // A dbg.declare describes the address of a source variable, so lower it 1886 // into an indirect DBG_VALUE. 1887 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 1888 DI.getVariable(), DI.getExpression()); 1889 } 1890 return true; 1891 } 1892 case Intrinsic::dbg_label: { 1893 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 1894 assert(DI.getLabel() && "Missing label"); 1895 1896 assert(DI.getLabel()->isValidLocationForIntrinsic( 1897 MIRBuilder.getDebugLoc()) && 1898 "Expected inlined-at fields to agree"); 1899 1900 MIRBuilder.buildDbgLabel(DI.getLabel()); 1901 return true; 1902 } 1903 case Intrinsic::vaend: 1904 // No target I know of cares about va_end. Certainly no in-tree target 1905 // does. Simplest intrinsic ever! 1906 return true; 1907 case Intrinsic::vastart: { 1908 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1909 Value *Ptr = CI.getArgOperand(0); 1910 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 1911 1912 // FIXME: Get alignment 1913 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) 1914 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), 1915 MachineMemOperand::MOStore, 1916 ListSize, Align(1))); 1917 return true; 1918 } 1919 case Intrinsic::dbg_value: { 1920 // This form of DBG_VALUE is target-independent. 1921 const DbgValueInst &DI = cast<DbgValueInst>(CI); 1922 const Value *V = DI.getValue(); 1923 assert(DI.getVariable()->isValidLocationForIntrinsic( 1924 MIRBuilder.getDebugLoc()) && 1925 "Expected inlined-at fields to agree"); 1926 if (!V) { 1927 // Currently the optimizer can produce this; insert an undef to 1928 // help debugging. Probably the optimizer should not do this. 1929 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 1930 } else if (const auto *CI = dyn_cast<Constant>(V)) { 1931 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 1932 } else { 1933 for (Register Reg : getOrCreateVRegs(*V)) { 1934 // FIXME: This does not handle register-indirect values at offset 0. The 1935 // direct/indirect thing shouldn't really be handled by something as 1936 // implicit as reg+noreg vs reg+imm in the first place, but it seems 1937 // pretty baked in right now. 1938 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 1939 } 1940 } 1941 return true; 1942 } 1943 case Intrinsic::uadd_with_overflow: 1944 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 1945 case Intrinsic::sadd_with_overflow: 1946 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 1947 case Intrinsic::usub_with_overflow: 1948 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 1949 case Intrinsic::ssub_with_overflow: 1950 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 1951 case Intrinsic::umul_with_overflow: 1952 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 1953 case Intrinsic::smul_with_overflow: 1954 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 1955 case Intrinsic::uadd_sat: 1956 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); 1957 case Intrinsic::sadd_sat: 1958 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); 1959 case Intrinsic::usub_sat: 1960 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); 1961 case Intrinsic::ssub_sat: 1962 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); 1963 case Intrinsic::ushl_sat: 1964 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder); 1965 case Intrinsic::sshl_sat: 1966 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder); 1967 case Intrinsic::umin: 1968 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder); 1969 case Intrinsic::umax: 1970 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder); 1971 case Intrinsic::smin: 1972 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder); 1973 case Intrinsic::smax: 1974 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder); 1975 case Intrinsic::abs: 1976 // TODO: Preserve "int min is poison" arg in GMIR? 1977 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder); 1978 case Intrinsic::smul_fix: 1979 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder); 1980 case Intrinsic::umul_fix: 1981 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder); 1982 case Intrinsic::smul_fix_sat: 1983 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder); 1984 case Intrinsic::umul_fix_sat: 1985 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder); 1986 case Intrinsic::sdiv_fix: 1987 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder); 1988 case Intrinsic::udiv_fix: 1989 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder); 1990 case Intrinsic::sdiv_fix_sat: 1991 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); 1992 case Intrinsic::udiv_fix_sat: 1993 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); 1994 case Intrinsic::fmuladd: { 1995 const TargetMachine &TM = MF->getTarget(); 1996 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1997 Register Dst = getOrCreateVReg(CI); 1998 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 1999 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 2000 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 2001 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 2002 TLI.isFMAFasterThanFMulAndFAdd(*MF, 2003 TLI.getValueType(*DL, CI.getType()))) { 2004 // TODO: Revisit this to see if we should move this part of the 2005 // lowering to the combiner. 2006 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, 2007 MachineInstr::copyFlagsFromInstruction(CI)); 2008 } else { 2009 LLT Ty = getLLTForType(*CI.getType(), *DL); 2010 auto FMul = MIRBuilder.buildFMul( 2011 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); 2012 MIRBuilder.buildFAdd(Dst, FMul, Op2, 2013 MachineInstr::copyFlagsFromInstruction(CI)); 2014 } 2015 return true; 2016 } 2017 case Intrinsic::convert_from_fp16: 2018 // FIXME: This intrinsic should probably be removed from the IR. 2019 MIRBuilder.buildFPExt(getOrCreateVReg(CI), 2020 getOrCreateVReg(*CI.getArgOperand(0)), 2021 MachineInstr::copyFlagsFromInstruction(CI)); 2022 return true; 2023 case Intrinsic::convert_to_fp16: 2024 // FIXME: This intrinsic should probably be removed from the IR. 2025 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI), 2026 getOrCreateVReg(*CI.getArgOperand(0)), 2027 MachineInstr::copyFlagsFromInstruction(CI)); 2028 return true; 2029 case Intrinsic::memcpy: 2030 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY); 2031 case Intrinsic::memmove: 2032 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE); 2033 case Intrinsic::memset: 2034 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET); 2035 case Intrinsic::eh_typeid_for: { 2036 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 2037 Register Reg = getOrCreateVReg(CI); 2038 unsigned TypeID = MF->getTypeIDFor(GV); 2039 MIRBuilder.buildConstant(Reg, TypeID); 2040 return true; 2041 } 2042 case Intrinsic::objectsize: 2043 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2044 2045 case Intrinsic::is_constant: 2046 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2047 2048 case Intrinsic::stackguard: 2049 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 2050 return true; 2051 case Intrinsic::stackprotector: { 2052 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2053 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); 2054 getStackGuard(GuardVal, MIRBuilder); 2055 2056 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 2057 int FI = getOrCreateFrameIndex(*Slot); 2058 MF->getFrameInfo().setStackProtectorIndex(FI); 2059 2060 MIRBuilder.buildStore( 2061 GuardVal, getOrCreateVReg(*Slot), 2062 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 2063 MachineMemOperand::MOStore | 2064 MachineMemOperand::MOVolatile, 2065 PtrTy.getSizeInBits() / 8, Align(8))); 2066 return true; 2067 } 2068 case Intrinsic::stacksave: { 2069 // Save the stack pointer to the location provided by the intrinsic. 2070 Register Reg = getOrCreateVReg(CI); 2071 Register StackPtr = MF->getSubtarget() 2072 .getTargetLowering() 2073 ->getStackPointerRegisterToSaveRestore(); 2074 2075 // If the target doesn't specify a stack pointer, then fall back. 2076 if (!StackPtr) 2077 return false; 2078 2079 MIRBuilder.buildCopy(Reg, StackPtr); 2080 return true; 2081 } 2082 case Intrinsic::stackrestore: { 2083 // Restore the stack pointer from the location provided by the intrinsic. 2084 Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); 2085 Register StackPtr = MF->getSubtarget() 2086 .getTargetLowering() 2087 ->getStackPointerRegisterToSaveRestore(); 2088 2089 // If the target doesn't specify a stack pointer, then fall back. 2090 if (!StackPtr) 2091 return false; 2092 2093 MIRBuilder.buildCopy(StackPtr, Reg); 2094 return true; 2095 } 2096 case Intrinsic::cttz: 2097 case Intrinsic::ctlz: { 2098 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 2099 bool isTrailing = ID == Intrinsic::cttz; 2100 unsigned Opcode = isTrailing 2101 ? Cst->isZero() ? TargetOpcode::G_CTTZ 2102 : TargetOpcode::G_CTTZ_ZERO_UNDEF 2103 : Cst->isZero() ? TargetOpcode::G_CTLZ 2104 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 2105 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, 2106 {getOrCreateVReg(*CI.getArgOperand(0))}); 2107 return true; 2108 } 2109 case Intrinsic::invariant_start: { 2110 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2111 Register Undef = MRI->createGenericVirtualRegister(PtrTy); 2112 MIRBuilder.buildUndef(Undef); 2113 return true; 2114 } 2115 case Intrinsic::invariant_end: 2116 return true; 2117 case Intrinsic::expect: 2118 case Intrinsic::annotation: 2119 case Intrinsic::ptr_annotation: 2120 case Intrinsic::launder_invariant_group: 2121 case Intrinsic::strip_invariant_group: { 2122 // Drop the intrinsic, but forward the value. 2123 MIRBuilder.buildCopy(getOrCreateVReg(CI), 2124 getOrCreateVReg(*CI.getArgOperand(0))); 2125 return true; 2126 } 2127 case Intrinsic::assume: 2128 case Intrinsic::experimental_noalias_scope_decl: 2129 case Intrinsic::var_annotation: 2130 case Intrinsic::sideeffect: 2131 // Discard annotate attributes, assumptions, and artificial side-effects. 2132 return true; 2133 case Intrinsic::read_volatile_register: 2134 case Intrinsic::read_register: { 2135 Value *Arg = CI.getArgOperand(0); 2136 MIRBuilder 2137 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) 2138 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); 2139 return true; 2140 } 2141 case Intrinsic::write_register: { 2142 Value *Arg = CI.getArgOperand(0); 2143 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) 2144 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) 2145 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 2146 return true; 2147 } 2148 case Intrinsic::localescape: { 2149 MachineBasicBlock &EntryMBB = MF->front(); 2150 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName()); 2151 2152 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 2153 // is the same on all targets. 2154 for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) { 2155 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts(); 2156 if (isa<ConstantPointerNull>(Arg)) 2157 continue; // Skip null pointers. They represent a hole in index space. 2158 2159 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg)); 2160 MCSymbol *FrameAllocSym = 2161 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName, 2162 Idx); 2163 2164 // This should be inserted at the start of the entry block. 2165 auto LocalEscape = 2166 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE) 2167 .addSym(FrameAllocSym) 2168 .addFrameIndex(FI); 2169 2170 EntryMBB.insert(EntryMBB.begin(), LocalEscape); 2171 } 2172 2173 return true; 2174 } 2175 case Intrinsic::vector_reduce_fadd: 2176 case Intrinsic::vector_reduce_fmul: { 2177 // Need to check for the reassoc flag to decide whether we want a 2178 // sequential reduction opcode or not. 2179 Register Dst = getOrCreateVReg(CI); 2180 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0)); 2181 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1)); 2182 unsigned Opc = 0; 2183 if (!CI.hasAllowReassoc()) { 2184 // The sequential ordering case. 2185 Opc = ID == Intrinsic::vector_reduce_fadd 2186 ? TargetOpcode::G_VECREDUCE_SEQ_FADD 2187 : TargetOpcode::G_VECREDUCE_SEQ_FMUL; 2188 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc}, 2189 MachineInstr::copyFlagsFromInstruction(CI)); 2190 return true; 2191 } 2192 // We split the operation into a separate G_FADD/G_FMUL + the reduce, 2193 // since the associativity doesn't matter. 2194 unsigned ScalarOpc; 2195 if (ID == Intrinsic::vector_reduce_fadd) { 2196 Opc = TargetOpcode::G_VECREDUCE_FADD; 2197 ScalarOpc = TargetOpcode::G_FADD; 2198 } else { 2199 Opc = TargetOpcode::G_VECREDUCE_FMUL; 2200 ScalarOpc = TargetOpcode::G_FMUL; 2201 } 2202 LLT DstTy = MRI->getType(Dst); 2203 auto Rdx = MIRBuilder.buildInstr( 2204 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI)); 2205 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx}, 2206 MachineInstr::copyFlagsFromInstruction(CI)); 2207 2208 return true; 2209 } 2210 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 2211 case Intrinsic::INTRINSIC: 2212 #include "llvm/IR/ConstrainedOps.def" 2213 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI), 2214 MIRBuilder); 2215 2216 } 2217 return false; 2218 } 2219 2220 bool IRTranslator::translateInlineAsm(const CallBase &CB, 2221 MachineIRBuilder &MIRBuilder) { 2222 2223 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); 2224 2225 if (!ALI) { 2226 LLVM_DEBUG( 2227 dbgs() << "Inline asm lowering is not supported for this target yet\n"); 2228 return false; 2229 } 2230 2231 return ALI->lowerInlineAsm( 2232 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); 2233 } 2234 2235 bool IRTranslator::translateCallBase(const CallBase &CB, 2236 MachineIRBuilder &MIRBuilder) { 2237 ArrayRef<Register> Res = getOrCreateVRegs(CB); 2238 2239 SmallVector<ArrayRef<Register>, 8> Args; 2240 Register SwiftInVReg = 0; 2241 Register SwiftErrorVReg = 0; 2242 for (auto &Arg : CB.args()) { 2243 if (CLI->supportSwiftError() && isSwiftError(Arg)) { 2244 assert(SwiftInVReg == 0 && "Expected only one swift error argument"); 2245 LLT Ty = getLLTForType(*Arg->getType(), *DL); 2246 SwiftInVReg = MRI->createGenericVirtualRegister(Ty); 2247 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( 2248 &CB, &MIRBuilder.getMBB(), Arg)); 2249 Args.emplace_back(makeArrayRef(SwiftInVReg)); 2250 SwiftErrorVReg = 2251 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); 2252 continue; 2253 } 2254 Args.push_back(getOrCreateVRegs(*Arg)); 2255 } 2256 2257 // We don't set HasCalls on MFI here yet because call lowering may decide to 2258 // optimize into tail calls. Instead, we defer that to selection where a final 2259 // scan is done to check if any instructions are calls. 2260 bool Success = 2261 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, 2262 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); 2263 2264 // Check if we just inserted a tail call. 2265 if (Success) { 2266 assert(!HasTailCall && "Can't tail call return twice from block?"); 2267 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 2268 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); 2269 } 2270 2271 return Success; 2272 } 2273 2274 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 2275 const CallInst &CI = cast<CallInst>(U); 2276 auto TII = MF->getTarget().getIntrinsicInfo(); 2277 const Function *F = CI.getCalledFunction(); 2278 2279 // FIXME: support Windows dllimport function calls. 2280 if (F && (F->hasDLLImportStorageClass() || 2281 (MF->getTarget().getTargetTriple().isOSWindows() && 2282 F->hasExternalWeakLinkage()))) 2283 return false; 2284 2285 // FIXME: support control flow guard targets. 2286 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2287 return false; 2288 2289 if (CI.isInlineAsm()) 2290 return translateInlineAsm(CI, MIRBuilder); 2291 2292 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2293 if (F && F->isIntrinsic()) { 2294 ID = F->getIntrinsicID(); 2295 if (TII && ID == Intrinsic::not_intrinsic) 2296 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 2297 } 2298 2299 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) 2300 return translateCallBase(CI, MIRBuilder); 2301 2302 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 2303 2304 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 2305 return true; 2306 2307 ArrayRef<Register> ResultRegs; 2308 if (!CI.getType()->isVoidTy()) 2309 ResultRegs = getOrCreateVRegs(CI); 2310 2311 // Ignore the callsite attributes. Backend code is most likely not expecting 2312 // an intrinsic to sometimes have side effects and sometimes not. 2313 MachineInstrBuilder MIB = 2314 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); 2315 if (isa<FPMathOperator>(CI)) 2316 MIB->copyIRFlags(CI); 2317 2318 for (auto &Arg : enumerate(CI.arg_operands())) { 2319 // If this is required to be an immediate, don't materialize it in a 2320 // register. 2321 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { 2322 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { 2323 // imm arguments are more convenient than cimm (and realistically 2324 // probably sufficient), so use them. 2325 assert(CI->getBitWidth() <= 64 && 2326 "large intrinsic immediates not handled"); 2327 MIB.addImm(CI->getSExtValue()); 2328 } else { 2329 MIB.addFPImm(cast<ConstantFP>(Arg.value())); 2330 } 2331 } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) { 2332 auto *MDN = dyn_cast<MDNode>(MD->getMetadata()); 2333 if (!MDN) // This was probably an MDString. 2334 return false; 2335 MIB.addMetadata(MDN); 2336 } else { 2337 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); 2338 if (VRegs.size() > 1) 2339 return false; 2340 MIB.addUse(VRegs[0]); 2341 } 2342 } 2343 2344 // Add a MachineMemOperand if it is a target mem intrinsic. 2345 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 2346 TargetLowering::IntrinsicInfo Info; 2347 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 2348 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 2349 Align Alignment = Info.align.getValueOr( 2350 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); 2351 2352 uint64_t Size = Info.memVT.getStoreSize(); 2353 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 2354 Info.flags, Size, Alignment)); 2355 } 2356 2357 return true; 2358 } 2359 2360 bool IRTranslator::findUnwindDestinations( 2361 const BasicBlock *EHPadBB, 2362 BranchProbability Prob, 2363 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 2364 &UnwindDests) { 2365 EHPersonality Personality = classifyEHPersonality( 2366 EHPadBB->getParent()->getFunction().getPersonalityFn()); 2367 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; 2368 bool IsCoreCLR = Personality == EHPersonality::CoreCLR; 2369 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; 2370 bool IsSEH = isAsynchronousEHPersonality(Personality); 2371 2372 if (IsWasmCXX) { 2373 // Ignore this for now. 2374 return false; 2375 } 2376 2377 while (EHPadBB) { 2378 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 2379 BasicBlock *NewEHPadBB = nullptr; 2380 if (isa<LandingPadInst>(Pad)) { 2381 // Stop on landingpads. They are not funclets. 2382 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob); 2383 break; 2384 } 2385 if (isa<CleanupPadInst>(Pad)) { 2386 // Stop on cleanup pads. Cleanups are always funclet entries for all known 2387 // personalities. 2388 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob); 2389 UnwindDests.back().first->setIsEHScopeEntry(); 2390 UnwindDests.back().first->setIsEHFuncletEntry(); 2391 break; 2392 } 2393 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 2394 // Add the catchpad handlers to the possible destinations. 2395 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 2396 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob); 2397 // For MSVC++ and the CLR, catchblocks are funclets and need prologues. 2398 if (IsMSVCCXX || IsCoreCLR) 2399 UnwindDests.back().first->setIsEHFuncletEntry(); 2400 if (!IsSEH) 2401 UnwindDests.back().first->setIsEHScopeEntry(); 2402 } 2403 NewEHPadBB = CatchSwitch->getUnwindDest(); 2404 } else { 2405 continue; 2406 } 2407 2408 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2409 if (BPI && NewEHPadBB) 2410 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); 2411 EHPadBB = NewEHPadBB; 2412 } 2413 return true; 2414 } 2415 2416 bool IRTranslator::translateInvoke(const User &U, 2417 MachineIRBuilder &MIRBuilder) { 2418 const InvokeInst &I = cast<InvokeInst>(U); 2419 MCContext &Context = MF->getContext(); 2420 2421 const BasicBlock *ReturnBB = I.getSuccessor(0); 2422 const BasicBlock *EHPadBB = I.getSuccessor(1); 2423 2424 const Function *Fn = I.getCalledFunction(); 2425 if (I.isInlineAsm()) 2426 return false; 2427 2428 // FIXME: support invoking patchpoint and statepoint intrinsics. 2429 if (Fn && Fn->isIntrinsic()) 2430 return false; 2431 2432 // FIXME: support whatever these are. 2433 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 2434 return false; 2435 2436 // FIXME: support control flow guard targets. 2437 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2438 return false; 2439 2440 // FIXME: support Windows exception handling. 2441 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI())) 2442 return false; 2443 2444 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 2445 // the region covered by the try. 2446 MCSymbol *BeginSymbol = Context.createTempSymbol(); 2447 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 2448 2449 if (!translateCallBase(I, MIRBuilder)) 2450 return false; 2451 2452 MCSymbol *EndSymbol = Context.createTempSymbol(); 2453 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 2454 2455 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 2456 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2457 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(); 2458 BranchProbability EHPadBBProb = 2459 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) 2460 : BranchProbability::getZero(); 2461 2462 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests)) 2463 return false; 2464 2465 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 2466 &ReturnMBB = getMBB(*ReturnBB); 2467 // Update successor info. 2468 addSuccessorWithProb(InvokeMBB, &ReturnMBB); 2469 for (auto &UnwindDest : UnwindDests) { 2470 UnwindDest.first->setIsEHPad(); 2471 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); 2472 } 2473 InvokeMBB->normalizeSuccProbs(); 2474 2475 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 2476 MIRBuilder.buildBr(ReturnMBB); 2477 return true; 2478 } 2479 2480 bool IRTranslator::translateCallBr(const User &U, 2481 MachineIRBuilder &MIRBuilder) { 2482 // FIXME: Implement this. 2483 return false; 2484 } 2485 2486 bool IRTranslator::translateLandingPad(const User &U, 2487 MachineIRBuilder &MIRBuilder) { 2488 const LandingPadInst &LP = cast<LandingPadInst>(U); 2489 2490 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 2491 2492 MBB.setIsEHPad(); 2493 2494 // If there aren't registers to copy the values into (e.g., during SjLj 2495 // exceptions), then don't bother. 2496 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2497 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 2498 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 2499 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 2500 return true; 2501 2502 // If landingpad's return type is token type, we don't create DAG nodes 2503 // for its exception pointer and selector value. The extraction of exception 2504 // pointer or selector value from token type landingpads is not currently 2505 // supported. 2506 if (LP.getType()->isTokenTy()) 2507 return true; 2508 2509 // Add a label to mark the beginning of the landing pad. Deletion of the 2510 // landing pad can thus be detected via the MachineModuleInfo. 2511 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 2512 .addSym(MF->addLandingPad(&MBB)); 2513 2514 // If the unwinder does not preserve all registers, ensure that the 2515 // function marks the clobbered registers as used. 2516 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); 2517 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF)) 2518 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask); 2519 2520 LLT Ty = getLLTForType(*LP.getType(), *DL); 2521 Register Undef = MRI->createGenericVirtualRegister(Ty); 2522 MIRBuilder.buildUndef(Undef); 2523 2524 SmallVector<LLT, 2> Tys; 2525 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 2526 Tys.push_back(getLLTForType(*Ty, *DL)); 2527 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 2528 2529 // Mark exception register as live in. 2530 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 2531 if (!ExceptionReg) 2532 return false; 2533 2534 MBB.addLiveIn(ExceptionReg); 2535 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); 2536 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 2537 2538 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 2539 if (!SelectorReg) 2540 return false; 2541 2542 MBB.addLiveIn(SelectorReg); 2543 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 2544 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 2545 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 2546 2547 return true; 2548 } 2549 2550 bool IRTranslator::translateAlloca(const User &U, 2551 MachineIRBuilder &MIRBuilder) { 2552 auto &AI = cast<AllocaInst>(U); 2553 2554 if (AI.isSwiftError()) 2555 return true; 2556 2557 if (AI.isStaticAlloca()) { 2558 Register Res = getOrCreateVReg(AI); 2559 int FI = getOrCreateFrameIndex(AI); 2560 MIRBuilder.buildFrameIndex(Res, FI); 2561 return true; 2562 } 2563 2564 // FIXME: support stack probing for Windows. 2565 if (MF->getTarget().getTargetTriple().isOSWindows()) 2566 return false; 2567 2568 // Now we're in the harder dynamic case. 2569 Register NumElts = getOrCreateVReg(*AI.getArraySize()); 2570 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 2571 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 2572 if (MRI->getType(NumElts) != IntPtrTy) { 2573 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 2574 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 2575 NumElts = ExtElts; 2576 } 2577 2578 Type *Ty = AI.getAllocatedType(); 2579 2580 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 2581 Register TySize = 2582 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); 2583 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 2584 2585 // Round the size of the allocation up to the stack alignment size 2586 // by add SA-1 to the size. This doesn't overflow because we're computing 2587 // an address inside an alloca. 2588 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); 2589 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); 2590 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, 2591 MachineInstr::NoUWrap); 2592 auto AlignCst = 2593 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); 2594 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); 2595 2596 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); 2597 if (Alignment <= StackAlign) 2598 Alignment = Align(1); 2599 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); 2600 2601 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); 2602 assert(MF->getFrameInfo().hasVarSizedObjects()); 2603 return true; 2604 } 2605 2606 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 2607 // FIXME: We may need more info about the type. Because of how LLT works, 2608 // we're completely discarding the i64/double distinction here (amongst 2609 // others). Fortunately the ABIs I know of where that matters don't use va_arg 2610 // anyway but that's not guaranteed. 2611 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, 2612 {getOrCreateVReg(*U.getOperand(0)), 2613 DL->getABITypeAlign(U.getType()).value()}); 2614 return true; 2615 } 2616 2617 bool IRTranslator::translateInsertElement(const User &U, 2618 MachineIRBuilder &MIRBuilder) { 2619 // If it is a <1 x Ty> vector, use the scalar as it is 2620 // not a legal vector type in LLT. 2621 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1) 2622 return translateCopy(U, *U.getOperand(1), MIRBuilder); 2623 2624 Register Res = getOrCreateVReg(U); 2625 Register Val = getOrCreateVReg(*U.getOperand(0)); 2626 Register Elt = getOrCreateVReg(*U.getOperand(1)); 2627 Register Idx = getOrCreateVReg(*U.getOperand(2)); 2628 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 2629 return true; 2630 } 2631 2632 bool IRTranslator::translateExtractElement(const User &U, 2633 MachineIRBuilder &MIRBuilder) { 2634 // If it is a <1 x Ty> vector, use the scalar as it is 2635 // not a legal vector type in LLT. 2636 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1) 2637 return translateCopy(U, *U.getOperand(0), MIRBuilder); 2638 2639 Register Res = getOrCreateVReg(U); 2640 Register Val = getOrCreateVReg(*U.getOperand(0)); 2641 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2642 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 2643 Register Idx; 2644 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 2645 if (CI->getBitWidth() != PreferredVecIdxWidth) { 2646 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 2647 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 2648 Idx = getOrCreateVReg(*NewIdxCI); 2649 } 2650 } 2651 if (!Idx) 2652 Idx = getOrCreateVReg(*U.getOperand(1)); 2653 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 2654 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 2655 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); 2656 } 2657 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 2658 return true; 2659 } 2660 2661 bool IRTranslator::translateShuffleVector(const User &U, 2662 MachineIRBuilder &MIRBuilder) { 2663 ArrayRef<int> Mask; 2664 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) 2665 Mask = SVI->getShuffleMask(); 2666 else 2667 Mask = cast<ConstantExpr>(U).getShuffleMask(); 2668 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); 2669 MIRBuilder 2670 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, 2671 {getOrCreateVReg(*U.getOperand(0)), 2672 getOrCreateVReg(*U.getOperand(1))}) 2673 .addShuffleMask(MaskAlloc); 2674 return true; 2675 } 2676 2677 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 2678 const PHINode &PI = cast<PHINode>(U); 2679 2680 SmallVector<MachineInstr *, 4> Insts; 2681 for (auto Reg : getOrCreateVRegs(PI)) { 2682 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 2683 Insts.push_back(MIB.getInstr()); 2684 } 2685 2686 PendingPHIs.emplace_back(&PI, std::move(Insts)); 2687 return true; 2688 } 2689 2690 bool IRTranslator::translateAtomicCmpXchg(const User &U, 2691 MachineIRBuilder &MIRBuilder) { 2692 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 2693 2694 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2695 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2696 2697 Type *ResType = I.getType(); 2698 Type *ValType = ResType->Type::getStructElementType(0); 2699 2700 auto Res = getOrCreateVRegs(I); 2701 Register OldValRes = Res[0]; 2702 Register SuccessRes = Res[1]; 2703 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2704 Register Cmp = getOrCreateVReg(*I.getCompareOperand()); 2705 Register NewVal = getOrCreateVReg(*I.getNewValOperand()); 2706 2707 AAMDNodes AAMetadata; 2708 I.getAAMetadata(AAMetadata); 2709 2710 MIRBuilder.buildAtomicCmpXchgWithSuccess( 2711 OldValRes, SuccessRes, Addr, Cmp, NewVal, 2712 *MF->getMachineMemOperand( 2713 MachinePointerInfo(I.getPointerOperand()), Flags, 2714 DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, 2715 I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); 2716 return true; 2717 } 2718 2719 bool IRTranslator::translateAtomicRMW(const User &U, 2720 MachineIRBuilder &MIRBuilder) { 2721 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 2722 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2723 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2724 2725 Type *ResType = I.getType(); 2726 2727 Register Res = getOrCreateVReg(I); 2728 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2729 Register Val = getOrCreateVReg(*I.getValOperand()); 2730 2731 unsigned Opcode = 0; 2732 switch (I.getOperation()) { 2733 default: 2734 return false; 2735 case AtomicRMWInst::Xchg: 2736 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 2737 break; 2738 case AtomicRMWInst::Add: 2739 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 2740 break; 2741 case AtomicRMWInst::Sub: 2742 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 2743 break; 2744 case AtomicRMWInst::And: 2745 Opcode = TargetOpcode::G_ATOMICRMW_AND; 2746 break; 2747 case AtomicRMWInst::Nand: 2748 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 2749 break; 2750 case AtomicRMWInst::Or: 2751 Opcode = TargetOpcode::G_ATOMICRMW_OR; 2752 break; 2753 case AtomicRMWInst::Xor: 2754 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 2755 break; 2756 case AtomicRMWInst::Max: 2757 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 2758 break; 2759 case AtomicRMWInst::Min: 2760 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 2761 break; 2762 case AtomicRMWInst::UMax: 2763 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 2764 break; 2765 case AtomicRMWInst::UMin: 2766 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 2767 break; 2768 case AtomicRMWInst::FAdd: 2769 Opcode = TargetOpcode::G_ATOMICRMW_FADD; 2770 break; 2771 case AtomicRMWInst::FSub: 2772 Opcode = TargetOpcode::G_ATOMICRMW_FSUB; 2773 break; 2774 } 2775 2776 AAMDNodes AAMetadata; 2777 I.getAAMetadata(AAMetadata); 2778 2779 MIRBuilder.buildAtomicRMW( 2780 Opcode, Res, Addr, Val, 2781 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 2782 Flags, DL->getTypeStoreSize(ResType), 2783 getMemOpAlign(I), AAMetadata, nullptr, 2784 I.getSyncScopeID(), I.getOrdering())); 2785 return true; 2786 } 2787 2788 bool IRTranslator::translateFence(const User &U, 2789 MachineIRBuilder &MIRBuilder) { 2790 const FenceInst &Fence = cast<FenceInst>(U); 2791 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), 2792 Fence.getSyncScopeID()); 2793 return true; 2794 } 2795 2796 bool IRTranslator::translateFreeze(const User &U, 2797 MachineIRBuilder &MIRBuilder) { 2798 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); 2799 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); 2800 2801 assert(DstRegs.size() == SrcRegs.size() && 2802 "Freeze with different source and destination type?"); 2803 2804 for (unsigned I = 0; I < DstRegs.size(); ++I) { 2805 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); 2806 } 2807 2808 return true; 2809 } 2810 2811 void IRTranslator::finishPendingPhis() { 2812 #ifndef NDEBUG 2813 DILocationVerifier Verifier; 2814 GISelObserverWrapper WrapperObserver(&Verifier); 2815 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2816 #endif // ifndef NDEBUG 2817 for (auto &Phi : PendingPHIs) { 2818 const PHINode *PI = Phi.first; 2819 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 2820 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); 2821 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 2822 #ifndef NDEBUG 2823 Verifier.setCurrentInst(PI); 2824 #endif // ifndef NDEBUG 2825 2826 SmallSet<const MachineBasicBlock *, 16> SeenPreds; 2827 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 2828 auto IRPred = PI->getIncomingBlock(i); 2829 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 2830 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 2831 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) 2832 continue; 2833 SeenPreds.insert(Pred); 2834 for (unsigned j = 0; j < ValRegs.size(); ++j) { 2835 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 2836 MIB.addUse(ValRegs[j]); 2837 MIB.addMBB(Pred); 2838 } 2839 } 2840 } 2841 } 2842 } 2843 2844 bool IRTranslator::valueIsSplit(const Value &V, 2845 SmallVectorImpl<uint64_t> *Offsets) { 2846 SmallVector<LLT, 4> SplitTys; 2847 if (Offsets && !Offsets->empty()) 2848 Offsets->clear(); 2849 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 2850 return SplitTys.size() > 1; 2851 } 2852 2853 bool IRTranslator::translate(const Instruction &Inst) { 2854 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 2855 // We only emit constants into the entry block from here. To prevent jumpy 2856 // debug behaviour set the line to 0. 2857 if (const DebugLoc &DL = Inst.getDebugLoc()) 2858 EntryBuilder->setDebugLoc(DILocation::get( 2859 Inst.getContext(), 0, 0, DL.getScope(), DL.getInlinedAt())); 2860 else 2861 EntryBuilder->setDebugLoc(DebugLoc()); 2862 2863 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2864 if (TLI.fallBackToDAGISel(Inst)) 2865 return false; 2866 2867 switch (Inst.getOpcode()) { 2868 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2869 case Instruction::OPCODE: \ 2870 return translate##OPCODE(Inst, *CurBuilder.get()); 2871 #include "llvm/IR/Instruction.def" 2872 default: 2873 return false; 2874 } 2875 } 2876 2877 bool IRTranslator::translate(const Constant &C, Register Reg) { 2878 if (auto CI = dyn_cast<ConstantInt>(&C)) 2879 EntryBuilder->buildConstant(Reg, *CI); 2880 else if (auto CF = dyn_cast<ConstantFP>(&C)) 2881 EntryBuilder->buildFConstant(Reg, *CF); 2882 else if (isa<UndefValue>(C)) 2883 EntryBuilder->buildUndef(Reg); 2884 else if (isa<ConstantPointerNull>(C)) 2885 EntryBuilder->buildConstant(Reg, 0); 2886 else if (auto GV = dyn_cast<GlobalValue>(&C)) 2887 EntryBuilder->buildGlobalValue(Reg, GV); 2888 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 2889 if (!CAZ->getType()->isVectorTy()) 2890 return false; 2891 // Return the scalar if it is a <1 x Ty> vector. 2892 if (CAZ->getNumElements() == 1) 2893 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); 2894 SmallVector<Register, 4> Ops; 2895 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 2896 Constant &Elt = *CAZ->getElementValue(i); 2897 Ops.push_back(getOrCreateVReg(Elt)); 2898 } 2899 EntryBuilder->buildBuildVector(Reg, Ops); 2900 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 2901 // Return the scalar if it is a <1 x Ty> vector. 2902 if (CV->getNumElements() == 1) 2903 return translateCopy(C, *CV->getElementAsConstant(0), 2904 *EntryBuilder.get()); 2905 SmallVector<Register, 4> Ops; 2906 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 2907 Constant &Elt = *CV->getElementAsConstant(i); 2908 Ops.push_back(getOrCreateVReg(Elt)); 2909 } 2910 EntryBuilder->buildBuildVector(Reg, Ops); 2911 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 2912 switch(CE->getOpcode()) { 2913 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2914 case Instruction::OPCODE: \ 2915 return translate##OPCODE(*CE, *EntryBuilder.get()); 2916 #include "llvm/IR/Instruction.def" 2917 default: 2918 return false; 2919 } 2920 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 2921 if (CV->getNumOperands() == 1) 2922 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); 2923 SmallVector<Register, 4> Ops; 2924 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 2925 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 2926 } 2927 EntryBuilder->buildBuildVector(Reg, Ops); 2928 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 2929 EntryBuilder->buildBlockAddress(Reg, BA); 2930 } else 2931 return false; 2932 2933 return true; 2934 } 2935 2936 void IRTranslator::finalizeBasicBlock() { 2937 for (auto &BTB : SL->BitTestCases) { 2938 // Emit header first, if it wasn't already emitted. 2939 if (!BTB.Emitted) 2940 emitBitTestHeader(BTB, BTB.Parent); 2941 2942 BranchProbability UnhandledProb = BTB.Prob; 2943 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { 2944 UnhandledProb -= BTB.Cases[j].ExtraProb; 2945 // Set the current basic block to the mbb we wish to insert the code into 2946 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB; 2947 // If all cases cover a contiguous range, it is not necessary to jump to 2948 // the default block after the last bit test fails. This is because the 2949 // range check during bit test header creation has guaranteed that every 2950 // case here doesn't go outside the range. In this case, there is no need 2951 // to perform the last bit test, as it will always be true. Instead, make 2952 // the second-to-last bit-test fall through to the target of the last bit 2953 // test, and delete the last bit test. 2954 2955 MachineBasicBlock *NextMBB; 2956 if (BTB.ContiguousRange && j + 2 == ej) { 2957 // Second-to-last bit-test with contiguous range: fall through to the 2958 // target of the final bit test. 2959 NextMBB = BTB.Cases[j + 1].TargetBB; 2960 } else if (j + 1 == ej) { 2961 // For the last bit test, fall through to Default. 2962 NextMBB = BTB.Default; 2963 } else { 2964 // Otherwise, fall through to the next bit test. 2965 NextMBB = BTB.Cases[j + 1].ThisBB; 2966 } 2967 2968 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB); 2969 2970 // FIXME delete this block below? 2971 if (BTB.ContiguousRange && j + 2 == ej) { 2972 // Since we're not going to use the final bit test, remove it. 2973 BTB.Cases.pop_back(); 2974 break; 2975 } 2976 } 2977 // This is "default" BB. We have two jumps to it. From "header" BB and from 2978 // last "case" BB, unless the latter was skipped. 2979 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(), 2980 BTB.Default->getBasicBlock()}; 2981 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent); 2982 if (!BTB.ContiguousRange) { 2983 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB); 2984 } 2985 } 2986 SL->BitTestCases.clear(); 2987 2988 for (auto &JTCase : SL->JTCases) { 2989 // Emit header first, if it wasn't already emitted. 2990 if (!JTCase.first.Emitted) 2991 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); 2992 2993 emitJumpTable(JTCase.second, JTCase.second.MBB); 2994 } 2995 SL->JTCases.clear(); 2996 2997 for (auto &SwCase : SL->SwitchCases) 2998 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); 2999 SL->SwitchCases.clear(); 3000 } 3001 3002 void IRTranslator::finalizeFunction() { 3003 // Release the memory used by the different maps we 3004 // needed during the translation. 3005 PendingPHIs.clear(); 3006 VMap.reset(); 3007 FrameIndices.clear(); 3008 MachinePreds.clear(); 3009 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 3010 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 3011 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 3012 EntryBuilder.reset(); 3013 CurBuilder.reset(); 3014 FuncInfo.clear(); 3015 } 3016 3017 /// Returns true if a BasicBlock \p BB within a variadic function contains a 3018 /// variadic musttail call. 3019 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { 3020 if (!IsVarArg) 3021 return false; 3022 3023 // Walk the block backwards, because tail calls usually only appear at the end 3024 // of a block. 3025 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { 3026 const auto *CI = dyn_cast<CallInst>(&I); 3027 return CI && CI->isMustTailCall(); 3028 }); 3029 } 3030 3031 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 3032 MF = &CurMF; 3033 const Function &F = MF->getFunction(); 3034 if (F.empty()) 3035 return false; 3036 GISelCSEAnalysisWrapper &Wrapper = 3037 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 3038 // Set the CSEConfig and run the analysis. 3039 GISelCSEInfo *CSEInfo = nullptr; 3040 TPC = &getAnalysis<TargetPassConfig>(); 3041 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 3042 ? EnableCSEInIRTranslator 3043 : TPC->isGISelCSEEnabled(); 3044 3045 if (EnableCSE) { 3046 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 3047 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 3048 EntryBuilder->setCSEInfo(CSEInfo); 3049 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 3050 CurBuilder->setCSEInfo(CSEInfo); 3051 } else { 3052 EntryBuilder = std::make_unique<MachineIRBuilder>(); 3053 CurBuilder = std::make_unique<MachineIRBuilder>(); 3054 } 3055 CLI = MF->getSubtarget().getCallLowering(); 3056 CurBuilder->setMF(*MF); 3057 EntryBuilder->setMF(*MF); 3058 MRI = &MF->getRegInfo(); 3059 DL = &F.getParent()->getDataLayout(); 3060 ORE = std::make_unique<OptimizationRemarkEmitter>(&F); 3061 const TargetMachine &TM = MF->getTarget(); 3062 TM.resetTargetOptions(F); 3063 EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F); 3064 FuncInfo.MF = MF; 3065 if (EnableOpts) 3066 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 3067 else 3068 FuncInfo.BPI = nullptr; 3069 3070 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF); 3071 3072 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 3073 3074 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); 3075 SL->init(TLI, TM, *DL); 3076 3077 3078 3079 assert(PendingPHIs.empty() && "stale PHIs"); 3080 3081 if (!DL->isLittleEndian()) { 3082 // Currently we don't properly handle big endian code. 3083 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3084 F.getSubprogram(), &F.getEntryBlock()); 3085 R << "unable to translate in big endian mode"; 3086 reportTranslationError(*MF, *TPC, *ORE, R); 3087 } 3088 3089 // Release the per-function state when we return, whether we succeeded or not. 3090 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 3091 3092 // Setup a separate basic-block for the arguments and constants 3093 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 3094 MF->push_back(EntryBB); 3095 EntryBuilder->setMBB(*EntryBB); 3096 3097 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); 3098 SwiftError.setFunction(CurMF); 3099 SwiftError.createEntriesInEntryBlock(DbgLoc); 3100 3101 bool IsVarArg = F.isVarArg(); 3102 bool HasMustTailInVarArgFn = false; 3103 3104 // Create all blocks, in IR order, to preserve the layout. 3105 for (const BasicBlock &BB: F) { 3106 auto *&MBB = BBToMBB[&BB]; 3107 3108 MBB = MF->CreateMachineBasicBlock(&BB); 3109 MF->push_back(MBB); 3110 3111 if (BB.hasAddressTaken()) 3112 MBB->setHasAddressTaken(); 3113 3114 if (!HasMustTailInVarArgFn) 3115 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); 3116 } 3117 3118 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); 3119 3120 // Make our arguments/constants entry block fallthrough to the IR entry block. 3121 EntryBB->addSuccessor(&getMBB(F.front())); 3122 3123 if (CLI->fallBackToDAGISel(F)) { 3124 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3125 F.getSubprogram(), &F.getEntryBlock()); 3126 R << "unable to lower function: " << ore::NV("Prototype", F.getType()); 3127 reportTranslationError(*MF, *TPC, *ORE, R); 3128 return false; 3129 } 3130 3131 // Lower the actual args into this basic block. 3132 SmallVector<ArrayRef<Register>, 8> VRegArgs; 3133 for (const Argument &Arg: F.args()) { 3134 if (DL->getTypeStoreSize(Arg.getType()).isZero()) 3135 continue; // Don't handle zero sized types. 3136 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); 3137 VRegArgs.push_back(VRegs); 3138 3139 if (Arg.hasSwiftErrorAttr()) { 3140 assert(VRegs.size() == 1 && "Too many vregs for Swift error"); 3141 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); 3142 } 3143 } 3144 3145 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) { 3146 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3147 F.getSubprogram(), &F.getEntryBlock()); 3148 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 3149 reportTranslationError(*MF, *TPC, *ORE, R); 3150 return false; 3151 } 3152 3153 // Need to visit defs before uses when translating instructions. 3154 GISelObserverWrapper WrapperObserver; 3155 if (EnableCSE && CSEInfo) 3156 WrapperObserver.addObserver(CSEInfo); 3157 { 3158 ReversePostOrderTraversal<const Function *> RPOT(&F); 3159 #ifndef NDEBUG 3160 DILocationVerifier Verifier; 3161 WrapperObserver.addObserver(&Verifier); 3162 #endif // ifndef NDEBUG 3163 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 3164 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); 3165 for (const BasicBlock *BB : RPOT) { 3166 MachineBasicBlock &MBB = getMBB(*BB); 3167 // Set the insertion point of all the following translations to 3168 // the end of this basic block. 3169 CurBuilder->setMBB(MBB); 3170 HasTailCall = false; 3171 for (const Instruction &Inst : *BB) { 3172 // If we translated a tail call in the last step, then we know 3173 // everything after the call is either a return, or something that is 3174 // handled by the call itself. (E.g. a lifetime marker or assume 3175 // intrinsic.) In this case, we should stop translating the block and 3176 // move on. 3177 if (HasTailCall) 3178 break; 3179 #ifndef NDEBUG 3180 Verifier.setCurrentInst(&Inst); 3181 #endif // ifndef NDEBUG 3182 if (translate(Inst)) 3183 continue; 3184 3185 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3186 Inst.getDebugLoc(), BB); 3187 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 3188 3189 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 3190 std::string InstStrStorage; 3191 raw_string_ostream InstStr(InstStrStorage); 3192 InstStr << Inst; 3193 3194 R << ": '" << InstStr.str() << "'"; 3195 } 3196 3197 reportTranslationError(*MF, *TPC, *ORE, R); 3198 return false; 3199 } 3200 3201 finalizeBasicBlock(); 3202 } 3203 #ifndef NDEBUG 3204 WrapperObserver.removeObserver(&Verifier); 3205 #endif 3206 } 3207 3208 finishPendingPhis(); 3209 3210 SwiftError.propagateVRegs(); 3211 3212 // Merge the argument lowering and constants block with its single 3213 // successor, the LLVM-IR entry block. We want the basic block to 3214 // be maximal. 3215 assert(EntryBB->succ_size() == 1 && 3216 "Custom BB used for lowering should have only one successor"); 3217 // Get the successor of the current entry block. 3218 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 3219 assert(NewEntryBB.pred_size() == 1 && 3220 "LLVM-IR entry block has a predecessor!?"); 3221 // Move all the instruction from the current entry block to the 3222 // new entry block. 3223 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 3224 EntryBB->end()); 3225 3226 // Update the live-in information for the new entry block. 3227 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 3228 NewEntryBB.addLiveIn(LiveIn); 3229 NewEntryBB.sortUniqueLiveIns(); 3230 3231 // Get rid of the now empty basic block. 3232 EntryBB->removeSuccessor(&NewEntryBB); 3233 MF->remove(EntryBB); 3234 MF->DeleteMachineBasicBlock(EntryBB); 3235 3236 assert(&MF->front() == &NewEntryBB && 3237 "New entry wasn't next in the list of basic block!"); 3238 3239 // Initialize stack protector information. 3240 StackProtector &SP = getAnalysis<StackProtector>(); 3241 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 3242 3243 return false; 3244 } 3245