xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (revision 62987288060ff68c817b7056815aa9fb8ba8ecd7)
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/CodeGen/Analysis.h"
26 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
27 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
28 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
29 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
30 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
31 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RuntimeLibcallUtil.h"
42 #include "llvm/CodeGen/StackProtector.h"
43 #include "llvm/CodeGen/SwitchLoweringUtils.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetOpcodes.h"
48 #include "llvm/CodeGen/TargetPassConfig.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/CodeGenTypes/LowLevelType.h"
52 #include "llvm/IR/BasicBlock.h"
53 #include "llvm/IR/CFG.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DerivedTypes.h"
58 #include "llvm/IR/DiagnosticInfo.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GetElementPtrTypeIterator.h"
61 #include "llvm/IR/InlineAsm.h"
62 #include "llvm/IR/InstrTypes.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Intrinsics.h"
66 #include "llvm/IR/IntrinsicsAMDGPU.h"
67 #include "llvm/IR/LLVMContext.h"
68 #include "llvm/IR/Metadata.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/Statepoint.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/InitializePasses.h"
75 #include "llvm/MC/MCContext.h"
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CodeGen.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include "llvm/Target/TargetIntrinsicInfo.h"
84 #include "llvm/Target/TargetMachine.h"
85 #include "llvm/Transforms/Utils/Local.h"
86 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
87 #include <algorithm>
88 #include <cassert>
89 #include <cstdint>
90 #include <iterator>
91 #include <optional>
92 #include <string>
93 #include <utility>
94 #include <vector>
95 
96 #define DEBUG_TYPE "irtranslator"
97 
98 using namespace llvm;
99 
100 static cl::opt<bool>
101     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
102                             cl::desc("Should enable CSE in irtranslator"),
103                             cl::Optional, cl::init(false));
104 char IRTranslator::ID = 0;
105 
106 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
107                 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)108 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
109 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
110 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
111 INITIALIZE_PASS_DEPENDENCY(StackProtector)
112 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
113 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
114                 false, false)
115 
116 static void reportTranslationError(MachineFunction &MF,
117                                    const TargetPassConfig &TPC,
118                                    OptimizationRemarkEmitter &ORE,
119                                    OptimizationRemarkMissed &R) {
120   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
121 
122   // Print the function name explicitly if we don't have a debug location (which
123   // makes the diagnostic less useful) or if we're going to emit a raw error.
124   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
125     R << (" (in function: " + MF.getName() + ")").str();
126 
127   if (TPC.isGlobalISelAbortEnabled())
128     report_fatal_error(Twine(R.getMsg()));
129   else
130     ORE.emit(R);
131 }
132 
IRTranslator(CodeGenOptLevel optlevel)133 IRTranslator::IRTranslator(CodeGenOptLevel optlevel)
134     : MachineFunctionPass(ID), OptLevel(optlevel) {}
135 
136 #ifndef NDEBUG
137 namespace {
138 /// Verify that every instruction created has the same DILocation as the
139 /// instruction being translated.
140 class DILocationVerifier : public GISelChangeObserver {
141   const Instruction *CurrInst = nullptr;
142 
143 public:
144   DILocationVerifier() = default;
145   ~DILocationVerifier() = default;
146 
getCurrentInst() const147   const Instruction *getCurrentInst() const { return CurrInst; }
setCurrentInst(const Instruction * Inst)148   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
149 
erasingInstr(MachineInstr & MI)150   void erasingInstr(MachineInstr &MI) override {}
changingInstr(MachineInstr & MI)151   void changingInstr(MachineInstr &MI) override {}
changedInstr(MachineInstr & MI)152   void changedInstr(MachineInstr &MI) override {}
153 
createdInstr(MachineInstr & MI)154   void createdInstr(MachineInstr &MI) override {
155     assert(getCurrentInst() && "Inserted instruction without a current MI");
156 
157     // Only print the check message if we're actually checking it.
158 #ifndef NDEBUG
159     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
160                       << " was copied to " << MI);
161 #endif
162     // We allow insts in the entry block to have no debug loc because
163     // they could have originated from constants, and we don't want a jumpy
164     // debug experience.
165     assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
166             (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
167             (MI.isDebugInstr())) &&
168            "Line info was not transferred to all instructions");
169   }
170 };
171 } // namespace
172 #endif // ifndef NDEBUG
173 
174 
getAnalysisUsage(AnalysisUsage & AU) const175 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
176   AU.addRequired<StackProtector>();
177   AU.addRequired<TargetPassConfig>();
178   AU.addRequired<GISelCSEAnalysisWrapperPass>();
179   AU.addRequired<AssumptionCacheTracker>();
180   if (OptLevel != CodeGenOptLevel::None) {
181     AU.addRequired<BranchProbabilityInfoWrapperPass>();
182     AU.addRequired<AAResultsWrapperPass>();
183   }
184   AU.addRequired<TargetLibraryInfoWrapperPass>();
185   AU.addPreserved<TargetLibraryInfoWrapperPass>();
186   getSelectionDAGFallbackAnalysisUsage(AU);
187   MachineFunctionPass::getAnalysisUsage(AU);
188 }
189 
190 IRTranslator::ValueToVRegInfo::VRegListT &
allocateVRegs(const Value & Val)191 IRTranslator::allocateVRegs(const Value &Val) {
192   auto VRegsIt = VMap.findVRegs(Val);
193   if (VRegsIt != VMap.vregs_end())
194     return *VRegsIt->second;
195   auto *Regs = VMap.getVRegs(Val);
196   auto *Offsets = VMap.getOffsets(Val);
197   SmallVector<LLT, 4> SplitTys;
198   computeValueLLTs(*DL, *Val.getType(), SplitTys,
199                    Offsets->empty() ? Offsets : nullptr);
200   for (unsigned i = 0; i < SplitTys.size(); ++i)
201     Regs->push_back(0);
202   return *Regs;
203 }
204 
getOrCreateVRegs(const Value & Val)205 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
206   auto VRegsIt = VMap.findVRegs(Val);
207   if (VRegsIt != VMap.vregs_end())
208     return *VRegsIt->second;
209 
210   if (Val.getType()->isVoidTy())
211     return *VMap.getVRegs(Val);
212 
213   // Create entry for this type.
214   auto *VRegs = VMap.getVRegs(Val);
215   auto *Offsets = VMap.getOffsets(Val);
216 
217   if (!Val.getType()->isTokenTy())
218     assert(Val.getType()->isSized() &&
219            "Don't know how to create an empty vreg");
220 
221   SmallVector<LLT, 4> SplitTys;
222   computeValueLLTs(*DL, *Val.getType(), SplitTys,
223                    Offsets->empty() ? Offsets : nullptr);
224 
225   if (!isa<Constant>(Val)) {
226     for (auto Ty : SplitTys)
227       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
228     return *VRegs;
229   }
230 
231   if (Val.getType()->isAggregateType()) {
232     // UndefValue, ConstantAggregateZero
233     auto &C = cast<Constant>(Val);
234     unsigned Idx = 0;
235     while (auto Elt = C.getAggregateElement(Idx++)) {
236       auto EltRegs = getOrCreateVRegs(*Elt);
237       llvm::copy(EltRegs, std::back_inserter(*VRegs));
238     }
239   } else {
240     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
241     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
242     bool Success = translate(cast<Constant>(Val), VRegs->front());
243     if (!Success) {
244       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
245                                  MF->getFunction().getSubprogram(),
246                                  &MF->getFunction().getEntryBlock());
247       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
248       reportTranslationError(*MF, *TPC, *ORE, R);
249       return *VRegs;
250     }
251   }
252 
253   return *VRegs;
254 }
255 
getOrCreateFrameIndex(const AllocaInst & AI)256 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
257   auto MapEntry = FrameIndices.find(&AI);
258   if (MapEntry != FrameIndices.end())
259     return MapEntry->second;
260 
261   uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
262   uint64_t Size =
263       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
264 
265   // Always allocate at least one byte.
266   Size = std::max<uint64_t>(Size, 1u);
267 
268   int &FI = FrameIndices[&AI];
269   FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
270   return FI;
271 }
272 
getMemOpAlign(const Instruction & I)273 Align IRTranslator::getMemOpAlign(const Instruction &I) {
274   if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
275     return SI->getAlign();
276   if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
277     return LI->getAlign();
278   if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
279     return AI->getAlign();
280   if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
281     return AI->getAlign();
282 
283   OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
284   R << "unable to translate memop: " << ore::NV("Opcode", &I);
285   reportTranslationError(*MF, *TPC, *ORE, R);
286   return Align(1);
287 }
288 
getMBB(const BasicBlock & BB)289 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
290   MachineBasicBlock *&MBB = BBToMBB[&BB];
291   assert(MBB && "BasicBlock was not encountered before");
292   return *MBB;
293 }
294 
addMachineCFGPred(CFGEdge Edge,MachineBasicBlock * NewPred)295 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
296   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
297   MachinePreds[Edge].push_back(NewPred);
298 }
299 
translateBinaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)300 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
301                                      MachineIRBuilder &MIRBuilder) {
302   // Get or create a virtual register for each value.
303   // Unless the value is a Constant => loadimm cst?
304   // or inline constant each time?
305   // Creation of a virtual register needs to have a size.
306   Register Op0 = getOrCreateVReg(*U.getOperand(0));
307   Register Op1 = getOrCreateVReg(*U.getOperand(1));
308   Register Res = getOrCreateVReg(U);
309   uint32_t Flags = 0;
310   if (isa<Instruction>(U)) {
311     const Instruction &I = cast<Instruction>(U);
312     Flags = MachineInstr::copyFlagsFromInstruction(I);
313   }
314 
315   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
316   return true;
317 }
318 
translateUnaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)319 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
320                                     MachineIRBuilder &MIRBuilder) {
321   Register Op0 = getOrCreateVReg(*U.getOperand(0));
322   Register Res = getOrCreateVReg(U);
323   uint32_t Flags = 0;
324   if (isa<Instruction>(U)) {
325     const Instruction &I = cast<Instruction>(U);
326     Flags = MachineInstr::copyFlagsFromInstruction(I);
327   }
328   MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
329   return true;
330 }
331 
translateFNeg(const User & U,MachineIRBuilder & MIRBuilder)332 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
333   return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
334 }
335 
translateCompare(const User & U,MachineIRBuilder & MIRBuilder)336 bool IRTranslator::translateCompare(const User &U,
337                                     MachineIRBuilder &MIRBuilder) {
338   auto *CI = cast<CmpInst>(&U);
339   Register Op0 = getOrCreateVReg(*U.getOperand(0));
340   Register Op1 = getOrCreateVReg(*U.getOperand(1));
341   Register Res = getOrCreateVReg(U);
342   CmpInst::Predicate Pred = CI->getPredicate();
343   if (CmpInst::isIntPredicate(Pred))
344     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
345   else if (Pred == CmpInst::FCMP_FALSE)
346     MIRBuilder.buildCopy(
347         Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
348   else if (Pred == CmpInst::FCMP_TRUE)
349     MIRBuilder.buildCopy(
350         Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
351   else {
352     uint32_t Flags = 0;
353     if (CI)
354       Flags = MachineInstr::copyFlagsFromInstruction(*CI);
355     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
356   }
357 
358   return true;
359 }
360 
translateRet(const User & U,MachineIRBuilder & MIRBuilder)361 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
362   const ReturnInst &RI = cast<ReturnInst>(U);
363   const Value *Ret = RI.getReturnValue();
364   if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
365     Ret = nullptr;
366 
367   ArrayRef<Register> VRegs;
368   if (Ret)
369     VRegs = getOrCreateVRegs(*Ret);
370 
371   Register SwiftErrorVReg = 0;
372   if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
373     SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
374         &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
375   }
376 
377   // The target may mess up with the insertion point, but
378   // this is not important as a return is the last instruction
379   // of the block anyway.
380   return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
381 }
382 
emitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,BranchProbability TProb,BranchProbability FProb,bool InvertCond)383 void IRTranslator::emitBranchForMergedCondition(
384     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
385     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
386     BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
387   // If the leaf of the tree is a comparison, merge the condition into
388   // the caseblock.
389   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
390     CmpInst::Predicate Condition;
391     if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
392       Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
393     } else {
394       const FCmpInst *FC = cast<FCmpInst>(Cond);
395       Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
396     }
397 
398     SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
399                            BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
400                            CurBuilder->getDebugLoc(), TProb, FProb);
401     SL->SwitchCases.push_back(CB);
402     return;
403   }
404 
405   // Create a CaseBlock record representing this branch.
406   CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
407   SwitchCG::CaseBlock CB(
408       Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
409       nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
410   SL->SwitchCases.push_back(CB);
411 }
412 
isValInBlock(const Value * V,const BasicBlock * BB)413 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
414   if (const Instruction *I = dyn_cast<Instruction>(V))
415     return I->getParent() == BB;
416   return true;
417 }
418 
findMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,Instruction::BinaryOps Opc,BranchProbability TProb,BranchProbability FProb,bool InvertCond)419 void IRTranslator::findMergedConditions(
420     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
421     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
422     Instruction::BinaryOps Opc, BranchProbability TProb,
423     BranchProbability FProb, bool InvertCond) {
424   using namespace PatternMatch;
425   assert((Opc == Instruction::And || Opc == Instruction::Or) &&
426          "Expected Opc to be AND/OR");
427   // Skip over not part of the tree and remember to invert op and operands at
428   // next level.
429   Value *NotCond;
430   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
431       isValInBlock(NotCond, CurBB->getBasicBlock())) {
432     findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
433                          !InvertCond);
434     return;
435   }
436 
437   const Instruction *BOp = dyn_cast<Instruction>(Cond);
438   const Value *BOpOp0, *BOpOp1;
439   // Compute the effective opcode for Cond, taking into account whether it needs
440   // to be inverted, e.g.
441   //   and (not (or A, B)), C
442   // gets lowered as
443   //   and (and (not A, not B), C)
444   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
445   if (BOp) {
446     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
447                ? Instruction::And
448                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
449                       ? Instruction::Or
450                       : (Instruction::BinaryOps)0);
451     if (InvertCond) {
452       if (BOpc == Instruction::And)
453         BOpc = Instruction::Or;
454       else if (BOpc == Instruction::Or)
455         BOpc = Instruction::And;
456     }
457   }
458 
459   // If this node is not part of the or/and tree, emit it as a branch.
460   // Note that all nodes in the tree should have same opcode.
461   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
462   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
463       !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
464       !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
465     emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
466                                  InvertCond);
467     return;
468   }
469 
470   //  Create TmpBB after CurBB.
471   MachineFunction::iterator BBI(CurBB);
472   MachineBasicBlock *TmpBB =
473       MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
474   CurBB->getParent()->insert(++BBI, TmpBB);
475 
476   if (Opc == Instruction::Or) {
477     // Codegen X | Y as:
478     // BB1:
479     //   jmp_if_X TBB
480     //   jmp TmpBB
481     // TmpBB:
482     //   jmp_if_Y TBB
483     //   jmp FBB
484     //
485 
486     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
487     // The requirement is that
488     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
489     //     = TrueProb for original BB.
490     // Assuming the original probabilities are A and B, one choice is to set
491     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
492     // A/(1+B) and 2B/(1+B). This choice assumes that
493     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
494     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
495     // TmpBB, but the math is more complicated.
496 
497     auto NewTrueProb = TProb / 2;
498     auto NewFalseProb = TProb / 2 + FProb;
499     // Emit the LHS condition.
500     findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
501                          NewFalseProb, InvertCond);
502 
503     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
504     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
505     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
506     // Emit the RHS condition into TmpBB.
507     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
508                          Probs[1], InvertCond);
509   } else {
510     assert(Opc == Instruction::And && "Unknown merge op!");
511     // Codegen X & Y as:
512     // BB1:
513     //   jmp_if_X TmpBB
514     //   jmp FBB
515     // TmpBB:
516     //   jmp_if_Y TBB
517     //   jmp FBB
518     //
519     //  This requires creation of TmpBB after CurBB.
520 
521     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
522     // The requirement is that
523     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
524     //     = FalseProb for original BB.
525     // Assuming the original probabilities are A and B, one choice is to set
526     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
527     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
528     // TrueProb for BB1 * FalseProb for TmpBB.
529 
530     auto NewTrueProb = TProb + FProb / 2;
531     auto NewFalseProb = FProb / 2;
532     // Emit the LHS condition.
533     findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
534                          NewFalseProb, InvertCond);
535 
536     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
537     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
538     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
539     // Emit the RHS condition into TmpBB.
540     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
541                          Probs[1], InvertCond);
542   }
543 }
544 
shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> & Cases)545 bool IRTranslator::shouldEmitAsBranches(
546     const std::vector<SwitchCG::CaseBlock> &Cases) {
547   // For multiple cases, it's better to emit as branches.
548   if (Cases.size() != 2)
549     return true;
550 
551   // If this is two comparisons of the same values or'd or and'd together, they
552   // will get folded into a single comparison, so don't emit two blocks.
553   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
554        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
555       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
556        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
557     return false;
558   }
559 
560   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
561   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
562   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
563       Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
564       isa<Constant>(Cases[0].CmpRHS) &&
565       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
566     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
567         Cases[0].TrueBB == Cases[1].ThisBB)
568       return false;
569     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
570         Cases[0].FalseBB == Cases[1].ThisBB)
571       return false;
572   }
573 
574   return true;
575 }
576 
translateBr(const User & U,MachineIRBuilder & MIRBuilder)577 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
578   const BranchInst &BrInst = cast<BranchInst>(U);
579   auto &CurMBB = MIRBuilder.getMBB();
580   auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
581 
582   if (BrInst.isUnconditional()) {
583     // If the unconditional target is the layout successor, fallthrough.
584     if (OptLevel == CodeGenOptLevel::None ||
585         !CurMBB.isLayoutSuccessor(Succ0MBB))
586       MIRBuilder.buildBr(*Succ0MBB);
587 
588     // Link successors.
589     for (const BasicBlock *Succ : successors(&BrInst))
590       CurMBB.addSuccessor(&getMBB(*Succ));
591     return true;
592   }
593 
594   // If this condition is one of the special cases we handle, do special stuff
595   // now.
596   const Value *CondVal = BrInst.getCondition();
597   MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
598 
599   // If this is a series of conditions that are or'd or and'd together, emit
600   // this as a sequence of branches instead of setcc's with and/or operations.
601   // As long as jumps are not expensive (exceptions for multi-use logic ops,
602   // unpredictable branches, and vector extracts because those jumps are likely
603   // expensive for any target), this should improve performance.
604   // For example, instead of something like:
605   //     cmp A, B
606   //     C = seteq
607   //     cmp D, E
608   //     F = setle
609   //     or C, F
610   //     jnz foo
611   // Emit:
612   //     cmp A, B
613   //     je foo
614   //     cmp D, E
615   //     jle foo
616   using namespace PatternMatch;
617   const Instruction *CondI = dyn_cast<Instruction>(CondVal);
618   if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
619       !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
620     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
621     Value *Vec;
622     const Value *BOp0, *BOp1;
623     if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
624       Opcode = Instruction::And;
625     else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
626       Opcode = Instruction::Or;
627 
628     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
629                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
630       findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
631                            getEdgeProbability(&CurMBB, Succ0MBB),
632                            getEdgeProbability(&CurMBB, Succ1MBB),
633                            /*InvertCond=*/false);
634       assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
635 
636       // Allow some cases to be rejected.
637       if (shouldEmitAsBranches(SL->SwitchCases)) {
638         // Emit the branch for this block.
639         emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
640         SL->SwitchCases.erase(SL->SwitchCases.begin());
641         return true;
642       }
643 
644       // Okay, we decided not to do this, remove any inserted MBB's and clear
645       // SwitchCases.
646       for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
647         MF->erase(SL->SwitchCases[I].ThisBB);
648 
649       SL->SwitchCases.clear();
650     }
651   }
652 
653   // Create a CaseBlock record representing this branch.
654   SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
655                          ConstantInt::getTrue(MF->getFunction().getContext()),
656                          nullptr, Succ0MBB, Succ1MBB, &CurMBB,
657                          CurBuilder->getDebugLoc());
658 
659   // Use emitSwitchCase to actually insert the fast branch sequence for this
660   // cond branch.
661   emitSwitchCase(CB, &CurMBB, *CurBuilder);
662   return true;
663 }
664 
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)665 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
666                                         MachineBasicBlock *Dst,
667                                         BranchProbability Prob) {
668   if (!FuncInfo.BPI) {
669     Src->addSuccessorWithoutProb(Dst);
670     return;
671   }
672   if (Prob.isUnknown())
673     Prob = getEdgeProbability(Src, Dst);
674   Src->addSuccessor(Dst, Prob);
675 }
676 
677 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const678 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
679                                  const MachineBasicBlock *Dst) const {
680   const BasicBlock *SrcBB = Src->getBasicBlock();
681   const BasicBlock *DstBB = Dst->getBasicBlock();
682   if (!FuncInfo.BPI) {
683     // If BPI is not available, set the default probability as 1 / N, where N is
684     // the number of successors.
685     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
686     return BranchProbability(1, SuccSize);
687   }
688   return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
689 }
690 
translateSwitch(const User & U,MachineIRBuilder & MIB)691 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
692   using namespace SwitchCG;
693   // Extract cases from the switch.
694   const SwitchInst &SI = cast<SwitchInst>(U);
695   BranchProbabilityInfo *BPI = FuncInfo.BPI;
696   CaseClusterVector Clusters;
697   Clusters.reserve(SI.getNumCases());
698   for (const auto &I : SI.cases()) {
699     MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
700     assert(Succ && "Could not find successor mbb in mapping");
701     const ConstantInt *CaseVal = I.getCaseValue();
702     BranchProbability Prob =
703         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
704             : BranchProbability(1, SI.getNumCases() + 1);
705     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
706   }
707 
708   MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
709 
710   // Cluster adjacent cases with the same destination. We do this at all
711   // optimization levels because it's cheap to do and will make codegen faster
712   // if there are many clusters.
713   sortAndRangeify(Clusters);
714 
715   MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
716 
717   // If there is only the default destination, jump there directly.
718   if (Clusters.empty()) {
719     SwitchMBB->addSuccessor(DefaultMBB);
720     if (DefaultMBB != SwitchMBB->getNextNode())
721       MIB.buildBr(*DefaultMBB);
722     return true;
723   }
724 
725   SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);
726   SL->findBitTestClusters(Clusters, &SI);
727 
728   LLVM_DEBUG({
729     dbgs() << "Case clusters: ";
730     for (const CaseCluster &C : Clusters) {
731       if (C.Kind == CC_JumpTable)
732         dbgs() << "JT:";
733       if (C.Kind == CC_BitTests)
734         dbgs() << "BT:";
735 
736       C.Low->getValue().print(dbgs(), true);
737       if (C.Low != C.High) {
738         dbgs() << '-';
739         C.High->getValue().print(dbgs(), true);
740       }
741       dbgs() << ' ';
742     }
743     dbgs() << '\n';
744   });
745 
746   assert(!Clusters.empty());
747   SwitchWorkList WorkList;
748   CaseClusterIt First = Clusters.begin();
749   CaseClusterIt Last = Clusters.end() - 1;
750   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
751   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
752 
753   while (!WorkList.empty()) {
754     SwitchWorkListItem W = WorkList.pop_back_val();
755 
756     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
757     // For optimized builds, lower large range as a balanced binary tree.
758     if (NumClusters > 3 &&
759         MF->getTarget().getOptLevel() != CodeGenOptLevel::None &&
760         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
761       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
762       continue;
763     }
764 
765     if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
766       return false;
767   }
768   return true;
769 }
770 
splitWorkItem(SwitchCG::SwitchWorkList & WorkList,const SwitchCG::SwitchWorkListItem & W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineIRBuilder & MIB)771 void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
772                                  const SwitchCG::SwitchWorkListItem &W,
773                                  Value *Cond, MachineBasicBlock *SwitchMBB,
774                                  MachineIRBuilder &MIB) {
775   using namespace SwitchCG;
776   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
777          "Clusters not sorted?");
778   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
779 
780   auto [LastLeft, FirstRight, LeftProb, RightProb] =
781       SL->computeSplitWorkItemInfo(W);
782 
783   // Use the first element on the right as pivot since we will make less-than
784   // comparisons against it.
785   CaseClusterIt PivotCluster = FirstRight;
786   assert(PivotCluster > W.FirstCluster);
787   assert(PivotCluster <= W.LastCluster);
788 
789   CaseClusterIt FirstLeft = W.FirstCluster;
790   CaseClusterIt LastRight = W.LastCluster;
791 
792   const ConstantInt *Pivot = PivotCluster->Low;
793 
794   // New blocks will be inserted immediately after the current one.
795   MachineFunction::iterator BBI(W.MBB);
796   ++BBI;
797 
798   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
799   // we can branch to its destination directly if it's squeezed exactly in
800   // between the known lower bound and Pivot - 1.
801   MachineBasicBlock *LeftMBB;
802   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
803       FirstLeft->Low == W.GE &&
804       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
805     LeftMBB = FirstLeft->MBB;
806   } else {
807     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
808     FuncInfo.MF->insert(BBI, LeftMBB);
809     WorkList.push_back(
810         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
811   }
812 
813   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
814   // single cluster, RHS.Low == Pivot, and we can branch to its destination
815   // directly if RHS.High equals the current upper bound.
816   MachineBasicBlock *RightMBB;
817   if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
818       (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
819     RightMBB = FirstRight->MBB;
820   } else {
821     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
822     FuncInfo.MF->insert(BBI, RightMBB);
823     WorkList.push_back(
824         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
825   }
826 
827   // Create the CaseBlock record that will be used to lower the branch.
828   CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
829                LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
830                RightProb);
831 
832   if (W.MBB == SwitchMBB)
833     emitSwitchCase(CB, SwitchMBB, MIB);
834   else
835     SL->SwitchCases.push_back(CB);
836 }
837 
emitJumpTable(SwitchCG::JumpTable & JT,MachineBasicBlock * MBB)838 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
839                                  MachineBasicBlock *MBB) {
840   // Emit the code for the jump table
841   assert(JT.Reg != -1U && "Should lower JT Header first!");
842   MachineIRBuilder MIB(*MBB->getParent());
843   MIB.setMBB(*MBB);
844   MIB.setDebugLoc(CurBuilder->getDebugLoc());
845 
846   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
847   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
848 
849   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
850   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
851 }
852 
emitJumpTableHeader(SwitchCG::JumpTable & JT,SwitchCG::JumpTableHeader & JTH,MachineBasicBlock * HeaderBB)853 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
854                                        SwitchCG::JumpTableHeader &JTH,
855                                        MachineBasicBlock *HeaderBB) {
856   MachineIRBuilder MIB(*HeaderBB->getParent());
857   MIB.setMBB(*HeaderBB);
858   MIB.setDebugLoc(CurBuilder->getDebugLoc());
859 
860   const Value &SValue = *JTH.SValue;
861   // Subtract the lowest switch case value from the value being switched on.
862   const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
863   Register SwitchOpReg = getOrCreateVReg(SValue);
864   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
865   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
866 
867   // This value may be smaller or larger than the target's pointer type, and
868   // therefore require extension or truncating.
869   auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
870   const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
871   Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
872 
873   JT.Reg = Sub.getReg(0);
874 
875   if (JTH.FallthroughUnreachable) {
876     if (JT.MBB != HeaderBB->getNextNode())
877       MIB.buildBr(*JT.MBB);
878     return true;
879   }
880 
881   // Emit the range check for the jump table, and branch to the default block
882   // for the switch statement if the value being switched on exceeds the
883   // largest case in the switch.
884   auto Cst = getOrCreateVReg(
885       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
886   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
887   auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
888 
889   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
890 
891   // Avoid emitting unnecessary branches to the next block.
892   if (JT.MBB != HeaderBB->getNextNode())
893     BrCond = MIB.buildBr(*JT.MBB);
894   return true;
895 }
896 
emitSwitchCase(SwitchCG::CaseBlock & CB,MachineBasicBlock * SwitchBB,MachineIRBuilder & MIB)897 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
898                                   MachineBasicBlock *SwitchBB,
899                                   MachineIRBuilder &MIB) {
900   Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
901   Register Cond;
902   DebugLoc OldDbgLoc = MIB.getDebugLoc();
903   MIB.setDebugLoc(CB.DbgLoc);
904   MIB.setMBB(*CB.ThisBB);
905 
906   if (CB.PredInfo.NoCmp) {
907     // Branch or fall through to TrueBB.
908     addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
909     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
910                       CB.ThisBB);
911     CB.ThisBB->normalizeSuccProbs();
912     if (CB.TrueBB != CB.ThisBB->getNextNode())
913       MIB.buildBr(*CB.TrueBB);
914     MIB.setDebugLoc(OldDbgLoc);
915     return;
916   }
917 
918   const LLT i1Ty = LLT::scalar(1);
919   // Build the compare.
920   if (!CB.CmpMHS) {
921     const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
922     // For conditional branch lowering, we might try to do something silly like
923     // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
924     // just re-use the existing condition vreg.
925     if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
926         CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
927       Cond = CondLHS;
928     } else {
929       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
930       if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
931         Cond =
932             MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
933       else
934         Cond =
935             MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
936     }
937   } else {
938     assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
939            "Can only handle SLE ranges");
940 
941     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
942     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
943 
944     Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
945     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
946       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
947       Cond =
948           MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
949     } else {
950       const LLT CmpTy = MRI->getType(CmpOpReg);
951       auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
952       auto Diff = MIB.buildConstant(CmpTy, High - Low);
953       Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
954     }
955   }
956 
957   // Update successor info
958   addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
959 
960   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
961                     CB.ThisBB);
962 
963   // TrueBB and FalseBB are always different unless the incoming IR is
964   // degenerate. This only happens when running llc on weird IR.
965   if (CB.TrueBB != CB.FalseBB)
966     addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
967   CB.ThisBB->normalizeSuccProbs();
968 
969   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
970                     CB.ThisBB);
971 
972   MIB.buildBrCond(Cond, *CB.TrueBB);
973   MIB.buildBr(*CB.FalseBB);
974   MIB.setDebugLoc(OldDbgLoc);
975 }
976 
lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)977 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
978                                           MachineBasicBlock *SwitchMBB,
979                                           MachineBasicBlock *CurMBB,
980                                           MachineBasicBlock *DefaultMBB,
981                                           MachineIRBuilder &MIB,
982                                           MachineFunction::iterator BBI,
983                                           BranchProbability UnhandledProbs,
984                                           SwitchCG::CaseClusterIt I,
985                                           MachineBasicBlock *Fallthrough,
986                                           bool FallthroughUnreachable) {
987   using namespace SwitchCG;
988   MachineFunction *CurMF = SwitchMBB->getParent();
989   // FIXME: Optimize away range check based on pivot comparisons.
990   JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
991   SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
992   BranchProbability DefaultProb = W.DefaultProb;
993 
994   // The jump block hasn't been inserted yet; insert it here.
995   MachineBasicBlock *JumpMBB = JT->MBB;
996   CurMF->insert(BBI, JumpMBB);
997 
998   // Since the jump table block is separate from the switch block, we need
999   // to keep track of it as a machine predecessor to the default block,
1000   // otherwise we lose the phi edges.
1001   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1002                     CurMBB);
1003   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1004                     JumpMBB);
1005 
1006   auto JumpProb = I->Prob;
1007   auto FallthroughProb = UnhandledProbs;
1008 
1009   // If the default statement is a target of the jump table, we evenly
1010   // distribute the default probability to successors of CurMBB. Also
1011   // update the probability on the edge from JumpMBB to Fallthrough.
1012   for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1013                                         SE = JumpMBB->succ_end();
1014        SI != SE; ++SI) {
1015     if (*SI == DefaultMBB) {
1016       JumpProb += DefaultProb / 2;
1017       FallthroughProb -= DefaultProb / 2;
1018       JumpMBB->setSuccProbability(SI, DefaultProb / 2);
1019       JumpMBB->normalizeSuccProbs();
1020     } else {
1021       // Also record edges from the jump table block to it's successors.
1022       addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1023                         JumpMBB);
1024     }
1025   }
1026 
1027   if (FallthroughUnreachable)
1028     JTH->FallthroughUnreachable = true;
1029 
1030   if (!JTH->FallthroughUnreachable)
1031     addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1032   addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1033   CurMBB->normalizeSuccProbs();
1034 
1035   // The jump table header will be inserted in our current block, do the
1036   // range check, and fall through to our fallthrough block.
1037   JTH->HeaderBB = CurMBB;
1038   JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1039 
1040   // If we're in the right place, emit the jump table header right now.
1041   if (CurMBB == SwitchMBB) {
1042     if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1043       return false;
1044     JTH->Emitted = true;
1045   }
1046   return true;
1047 }
lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,Value * Cond,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable,BranchProbability UnhandledProbs,MachineBasicBlock * CurMBB,MachineIRBuilder & MIB,MachineBasicBlock * SwitchMBB)1048 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1049                                             Value *Cond,
1050                                             MachineBasicBlock *Fallthrough,
1051                                             bool FallthroughUnreachable,
1052                                             BranchProbability UnhandledProbs,
1053                                             MachineBasicBlock *CurMBB,
1054                                             MachineIRBuilder &MIB,
1055                                             MachineBasicBlock *SwitchMBB) {
1056   using namespace SwitchCG;
1057   const Value *RHS, *LHS, *MHS;
1058   CmpInst::Predicate Pred;
1059   if (I->Low == I->High) {
1060     // Check Cond == I->Low.
1061     Pred = CmpInst::ICMP_EQ;
1062     LHS = Cond;
1063     RHS = I->Low;
1064     MHS = nullptr;
1065   } else {
1066     // Check I->Low <= Cond <= I->High.
1067     Pred = CmpInst::ICMP_SLE;
1068     LHS = I->Low;
1069     MHS = Cond;
1070     RHS = I->High;
1071   }
1072 
1073   // If Fallthrough is unreachable, fold away the comparison.
1074   // The false probability is the sum of all unhandled cases.
1075   CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1076                CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1077 
1078   emitSwitchCase(CB, SwitchMBB, MIB);
1079   return true;
1080 }
1081 
emitBitTestHeader(SwitchCG::BitTestBlock & B,MachineBasicBlock * SwitchBB)1082 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1083                                      MachineBasicBlock *SwitchBB) {
1084   MachineIRBuilder &MIB = *CurBuilder;
1085   MIB.setMBB(*SwitchBB);
1086 
1087   // Subtract the minimum value.
1088   Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1089 
1090   LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1091   Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1092   auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1093 
1094   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
1095   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1096 
1097   LLT MaskTy = SwitchOpTy;
1098   if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1099       !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1100     MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1101   else {
1102     // Ensure that the type will fit the mask value.
1103     for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1104       if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1105         // Switch table case range are encoded into series of masks.
1106         // Just use pointer type, it's guaranteed to fit.
1107         MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1108         break;
1109       }
1110     }
1111   }
1112   Register SubReg = RangeSub.getReg(0);
1113   if (SwitchOpTy != MaskTy)
1114     SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1115 
1116   B.RegVT = getMVTForLLT(MaskTy);
1117   B.Reg = SubReg;
1118 
1119   MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1120 
1121   if (!B.FallthroughUnreachable)
1122     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1123   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1124 
1125   SwitchBB->normalizeSuccProbs();
1126 
1127   if (!B.FallthroughUnreachable) {
1128     // Conditional branch to the default block.
1129     auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1130     auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1131                                   RangeSub, RangeCst);
1132     MIB.buildBrCond(RangeCmp, *B.Default);
1133   }
1134 
1135   // Avoid emitting unnecessary branches to the next block.
1136   if (MBB != SwitchBB->getNextNode())
1137     MIB.buildBr(*MBB);
1138 }
1139 
emitBitTestCase(SwitchCG::BitTestBlock & BB,MachineBasicBlock * NextMBB,BranchProbability BranchProbToNext,Register Reg,SwitchCG::BitTestCase & B,MachineBasicBlock * SwitchBB)1140 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1141                                    MachineBasicBlock *NextMBB,
1142                                    BranchProbability BranchProbToNext,
1143                                    Register Reg, SwitchCG::BitTestCase &B,
1144                                    MachineBasicBlock *SwitchBB) {
1145   MachineIRBuilder &MIB = *CurBuilder;
1146   MIB.setMBB(*SwitchBB);
1147 
1148   LLT SwitchTy = getLLTForMVT(BB.RegVT);
1149   Register Cmp;
1150   unsigned PopCount = llvm::popcount(B.Mask);
1151   if (PopCount == 1) {
1152     // Testing for a single bit; just compare the shift count with what it
1153     // would need to be to shift a 1 bit in that position.
1154     auto MaskTrailingZeros =
1155         MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1156     Cmp =
1157         MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1158             .getReg(0);
1159   } else if (PopCount == BB.Range) {
1160     // There is only one zero bit in the range, test for it directly.
1161     auto MaskTrailingOnes =
1162         MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1163     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1164               .getReg(0);
1165   } else {
1166     // Make desired shift.
1167     auto CstOne = MIB.buildConstant(SwitchTy, 1);
1168     auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1169 
1170     // Emit bit tests and jumps.
1171     auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1172     auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1173     auto CstZero = MIB.buildConstant(SwitchTy, 0);
1174     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1175               .getReg(0);
1176   }
1177 
1178   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1179   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1180   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1181   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1182   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1183   // one as they are relative probabilities (and thus work more like weights),
1184   // and hence we need to normalize them to let the sum of them become one.
1185   SwitchBB->normalizeSuccProbs();
1186 
1187   // Record the fact that the IR edge from the header to the bit test target
1188   // will go through our new block. Neeeded for PHIs to have nodes added.
1189   addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1190                     SwitchBB);
1191 
1192   MIB.buildBrCond(Cmp, *B.TargetBB);
1193 
1194   // Avoid emitting unnecessary branches to the next block.
1195   if (NextMBB != SwitchBB->getNextNode())
1196     MIB.buildBr(*NextMBB);
1197 }
1198 
lowerBitTestWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability DefaultProb,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)1199 bool IRTranslator::lowerBitTestWorkItem(
1200     SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1201     MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1202     MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1203     BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1204     SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1205     bool FallthroughUnreachable) {
1206   using namespace SwitchCG;
1207   MachineFunction *CurMF = SwitchMBB->getParent();
1208   // FIXME: Optimize away range check based on pivot comparisons.
1209   BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1210   // The bit test blocks haven't been inserted yet; insert them here.
1211   for (BitTestCase &BTC : BTB->Cases)
1212     CurMF->insert(BBI, BTC.ThisBB);
1213 
1214   // Fill in fields of the BitTestBlock.
1215   BTB->Parent = CurMBB;
1216   BTB->Default = Fallthrough;
1217 
1218   BTB->DefaultProb = UnhandledProbs;
1219   // If the cases in bit test don't form a contiguous range, we evenly
1220   // distribute the probability on the edge to Fallthrough to two
1221   // successors of CurMBB.
1222   if (!BTB->ContiguousRange) {
1223     BTB->Prob += DefaultProb / 2;
1224     BTB->DefaultProb -= DefaultProb / 2;
1225   }
1226 
1227   if (FallthroughUnreachable)
1228     BTB->FallthroughUnreachable = true;
1229 
1230   // If we're in the right place, emit the bit test header right now.
1231   if (CurMBB == SwitchMBB) {
1232     emitBitTestHeader(*BTB, SwitchMBB);
1233     BTB->Emitted = true;
1234   }
1235   return true;
1236 }
1237 
lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB)1238 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1239                                        Value *Cond,
1240                                        MachineBasicBlock *SwitchMBB,
1241                                        MachineBasicBlock *DefaultMBB,
1242                                        MachineIRBuilder &MIB) {
1243   using namespace SwitchCG;
1244   MachineFunction *CurMF = FuncInfo.MF;
1245   MachineBasicBlock *NextMBB = nullptr;
1246   MachineFunction::iterator BBI(W.MBB);
1247   if (++BBI != FuncInfo.MF->end())
1248     NextMBB = &*BBI;
1249 
1250   if (EnableOpts) {
1251     // Here, we order cases by probability so the most likely case will be
1252     // checked first. However, two clusters can have the same probability in
1253     // which case their relative ordering is non-deterministic. So we use Low
1254     // as a tie-breaker as clusters are guaranteed to never overlap.
1255     llvm::sort(W.FirstCluster, W.LastCluster + 1,
1256                [](const CaseCluster &a, const CaseCluster &b) {
1257                  return a.Prob != b.Prob
1258                             ? a.Prob > b.Prob
1259                             : a.Low->getValue().slt(b.Low->getValue());
1260                });
1261 
1262     // Rearrange the case blocks so that the last one falls through if possible
1263     // without changing the order of probabilities.
1264     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1265       --I;
1266       if (I->Prob > W.LastCluster->Prob)
1267         break;
1268       if (I->Kind == CC_Range && I->MBB == NextMBB) {
1269         std::swap(*I, *W.LastCluster);
1270         break;
1271       }
1272     }
1273   }
1274 
1275   // Compute total probability.
1276   BranchProbability DefaultProb = W.DefaultProb;
1277   BranchProbability UnhandledProbs = DefaultProb;
1278   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1279     UnhandledProbs += I->Prob;
1280 
1281   MachineBasicBlock *CurMBB = W.MBB;
1282   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1283     bool FallthroughUnreachable = false;
1284     MachineBasicBlock *Fallthrough;
1285     if (I == W.LastCluster) {
1286       // For the last cluster, fall through to the default destination.
1287       Fallthrough = DefaultMBB;
1288       FallthroughUnreachable = isa<UnreachableInst>(
1289           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1290     } else {
1291       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1292       CurMF->insert(BBI, Fallthrough);
1293     }
1294     UnhandledProbs -= I->Prob;
1295 
1296     switch (I->Kind) {
1297     case CC_BitTests: {
1298       if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1299                                 DefaultProb, UnhandledProbs, I, Fallthrough,
1300                                 FallthroughUnreachable)) {
1301         LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1302         return false;
1303       }
1304       break;
1305     }
1306 
1307     case CC_JumpTable: {
1308       if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1309                                   UnhandledProbs, I, Fallthrough,
1310                                   FallthroughUnreachable)) {
1311         LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1312         return false;
1313       }
1314       break;
1315     }
1316     case CC_Range: {
1317       if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1318                                     FallthroughUnreachable, UnhandledProbs,
1319                                     CurMBB, MIB, SwitchMBB)) {
1320         LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1321         return false;
1322       }
1323       break;
1324     }
1325     }
1326     CurMBB = Fallthrough;
1327   }
1328 
1329   return true;
1330 }
1331 
translateIndirectBr(const User & U,MachineIRBuilder & MIRBuilder)1332 bool IRTranslator::translateIndirectBr(const User &U,
1333                                        MachineIRBuilder &MIRBuilder) {
1334   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1335 
1336   const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1337   MIRBuilder.buildBrIndirect(Tgt);
1338 
1339   // Link successors.
1340   SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1341   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1342   for (const BasicBlock *Succ : successors(&BrInst)) {
1343     // It's legal for indirectbr instructions to have duplicate blocks in the
1344     // destination list. We don't allow this in MIR. Skip anything that's
1345     // already a successor.
1346     if (!AddedSuccessors.insert(Succ).second)
1347       continue;
1348     CurBB.addSuccessor(&getMBB(*Succ));
1349   }
1350 
1351   return true;
1352 }
1353 
isSwiftError(const Value * V)1354 static bool isSwiftError(const Value *V) {
1355   if (auto Arg = dyn_cast<Argument>(V))
1356     return Arg->hasSwiftErrorAttr();
1357   if (auto AI = dyn_cast<AllocaInst>(V))
1358     return AI->isSwiftError();
1359   return false;
1360 }
1361 
translateLoad(const User & U,MachineIRBuilder & MIRBuilder)1362 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1363   const LoadInst &LI = cast<LoadInst>(U);
1364   TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
1365   if (StoreSize.isZero())
1366     return true;
1367 
1368   ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1369   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1370   Register Base = getOrCreateVReg(*LI.getPointerOperand());
1371   AAMDNodes AAInfo = LI.getAAMetadata();
1372 
1373   const Value *Ptr = LI.getPointerOperand();
1374   Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1375   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1376 
1377   if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1378     assert(Regs.size() == 1 && "swifterror should be single pointer");
1379     Register VReg =
1380         SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1381     MIRBuilder.buildCopy(Regs[0], VReg);
1382     return true;
1383   }
1384 
1385   MachineMemOperand::Flags Flags =
1386       TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1387   if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1388     if (AA->pointsToConstantMemory(
1389             MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1390       Flags |= MachineMemOperand::MOInvariant;
1391     }
1392   }
1393 
1394   const MDNode *Ranges =
1395       Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1396   for (unsigned i = 0; i < Regs.size(); ++i) {
1397     Register Addr;
1398     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1399 
1400     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1401     Align BaseAlign = getMemOpAlign(LI);
1402     auto MMO = MF->getMachineMemOperand(
1403         Ptr, Flags, MRI->getType(Regs[i]),
1404         commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1405         LI.getSyncScopeID(), LI.getOrdering());
1406     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1407   }
1408 
1409   return true;
1410 }
1411 
translateStore(const User & U,MachineIRBuilder & MIRBuilder)1412 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1413   const StoreInst &SI = cast<StoreInst>(U);
1414   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())
1415     return true;
1416 
1417   ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1418   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1419   Register Base = getOrCreateVReg(*SI.getPointerOperand());
1420 
1421   Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1422   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1423 
1424   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1425     assert(Vals.size() == 1 && "swifterror should be single pointer");
1426 
1427     Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1428                                                     SI.getPointerOperand());
1429     MIRBuilder.buildCopy(VReg, Vals[0]);
1430     return true;
1431   }
1432 
1433   MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, *DL);
1434 
1435   for (unsigned i = 0; i < Vals.size(); ++i) {
1436     Register Addr;
1437     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1438 
1439     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1440     Align BaseAlign = getMemOpAlign(SI);
1441     auto MMO = MF->getMachineMemOperand(
1442         Ptr, Flags, MRI->getType(Vals[i]),
1443         commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1444         SI.getSyncScopeID(), SI.getOrdering());
1445     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1446   }
1447   return true;
1448 }
1449 
getOffsetFromIndices(const User & U,const DataLayout & DL)1450 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1451   const Value *Src = U.getOperand(0);
1452   Type *Int32Ty = Type::getInt32Ty(U.getContext());
1453 
1454   // getIndexedOffsetInType is designed for GEPs, so the first index is the
1455   // usual array element rather than looking into the actual aggregate.
1456   SmallVector<Value *, 1> Indices;
1457   Indices.push_back(ConstantInt::get(Int32Ty, 0));
1458 
1459   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1460     for (auto Idx : EVI->indices())
1461       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1462   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1463     for (auto Idx : IVI->indices())
1464       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1465   } else {
1466     for (unsigned i = 1; i < U.getNumOperands(); ++i)
1467       Indices.push_back(U.getOperand(i));
1468   }
1469 
1470   return 8 * static_cast<uint64_t>(
1471                  DL.getIndexedOffsetInType(Src->getType(), Indices));
1472 }
1473 
translateExtractValue(const User & U,MachineIRBuilder & MIRBuilder)1474 bool IRTranslator::translateExtractValue(const User &U,
1475                                          MachineIRBuilder &MIRBuilder) {
1476   const Value *Src = U.getOperand(0);
1477   uint64_t Offset = getOffsetFromIndices(U, *DL);
1478   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1479   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1480   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1481   auto &DstRegs = allocateVRegs(U);
1482 
1483   for (unsigned i = 0; i < DstRegs.size(); ++i)
1484     DstRegs[i] = SrcRegs[Idx++];
1485 
1486   return true;
1487 }
1488 
translateInsertValue(const User & U,MachineIRBuilder & MIRBuilder)1489 bool IRTranslator::translateInsertValue(const User &U,
1490                                         MachineIRBuilder &MIRBuilder) {
1491   const Value *Src = U.getOperand(0);
1492   uint64_t Offset = getOffsetFromIndices(U, *DL);
1493   auto &DstRegs = allocateVRegs(U);
1494   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1495   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1496   ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1497   auto *InsertedIt = InsertedRegs.begin();
1498 
1499   for (unsigned i = 0; i < DstRegs.size(); ++i) {
1500     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1501       DstRegs[i] = *InsertedIt++;
1502     else
1503       DstRegs[i] = SrcRegs[i];
1504   }
1505 
1506   return true;
1507 }
1508 
translateSelect(const User & U,MachineIRBuilder & MIRBuilder)1509 bool IRTranslator::translateSelect(const User &U,
1510                                    MachineIRBuilder &MIRBuilder) {
1511   Register Tst = getOrCreateVReg(*U.getOperand(0));
1512   ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1513   ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1514   ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1515 
1516   uint32_t Flags = 0;
1517   if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1518     Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1519 
1520   for (unsigned i = 0; i < ResRegs.size(); ++i) {
1521     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1522   }
1523 
1524   return true;
1525 }
1526 
translateCopy(const User & U,const Value & V,MachineIRBuilder & MIRBuilder)1527 bool IRTranslator::translateCopy(const User &U, const Value &V,
1528                                  MachineIRBuilder &MIRBuilder) {
1529   Register Src = getOrCreateVReg(V);
1530   auto &Regs = *VMap.getVRegs(U);
1531   if (Regs.empty()) {
1532     Regs.push_back(Src);
1533     VMap.getOffsets(U)->push_back(0);
1534   } else {
1535     // If we already assigned a vreg for this instruction, we can't change that.
1536     // Emit a copy to satisfy the users we already emitted.
1537     MIRBuilder.buildCopy(Regs[0], Src);
1538   }
1539   return true;
1540 }
1541 
translateBitCast(const User & U,MachineIRBuilder & MIRBuilder)1542 bool IRTranslator::translateBitCast(const User &U,
1543                                     MachineIRBuilder &MIRBuilder) {
1544   // If we're bitcasting to the source type, we can reuse the source vreg.
1545   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1546       getLLTForType(*U.getType(), *DL)) {
1547     // If the source is a ConstantInt then it was probably created by
1548     // ConstantHoisting and we should leave it alone.
1549     if (isa<ConstantInt>(U.getOperand(0)))
1550       return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1551                            MIRBuilder);
1552     return translateCopy(U, *U.getOperand(0), MIRBuilder);
1553   }
1554 
1555   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1556 }
1557 
translateCast(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)1558 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1559                                  MachineIRBuilder &MIRBuilder) {
1560   if (U.getType()->getScalarType()->isBFloatTy() ||
1561       U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1562     return false;
1563 
1564   uint32_t Flags = 0;
1565   if (const Instruction *I = dyn_cast<Instruction>(&U))
1566     Flags = MachineInstr::copyFlagsFromInstruction(*I);
1567 
1568   Register Op = getOrCreateVReg(*U.getOperand(0));
1569   Register Res = getOrCreateVReg(U);
1570   MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
1571   return true;
1572 }
1573 
translateGetElementPtr(const User & U,MachineIRBuilder & MIRBuilder)1574 bool IRTranslator::translateGetElementPtr(const User &U,
1575                                           MachineIRBuilder &MIRBuilder) {
1576   Value &Op0 = *U.getOperand(0);
1577   Register BaseReg = getOrCreateVReg(Op0);
1578   Type *PtrIRTy = Op0.getType();
1579   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1580   Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1581   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1582 
1583   uint32_t Flags = 0;
1584   if (const Instruction *I = dyn_cast<Instruction>(&U))
1585     Flags = MachineInstr::copyFlagsFromInstruction(*I);
1586 
1587   // Normalize Vector GEP - all scalar operands should be converted to the
1588   // splat vector.
1589   unsigned VectorWidth = 0;
1590 
1591   // True if we should use a splat vector; using VectorWidth alone is not
1592   // sufficient.
1593   bool WantSplatVector = false;
1594   if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1595     VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1596     // We don't produce 1 x N vectors; those are treated as scalars.
1597     WantSplatVector = VectorWidth > 1;
1598   }
1599 
1600   // We might need to splat the base pointer into a vector if the offsets
1601   // are vectors.
1602   if (WantSplatVector && !PtrTy.isVector()) {
1603     BaseReg = MIRBuilder
1604                   .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
1605                                          BaseReg)
1606                   .getReg(0);
1607     PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1608     PtrTy = getLLTForType(*PtrIRTy, *DL);
1609     OffsetIRTy = DL->getIndexType(PtrIRTy);
1610     OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1611   }
1612 
1613   int64_t Offset = 0;
1614   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1615        GTI != E; ++GTI) {
1616     const Value *Idx = GTI.getOperand();
1617     if (StructType *StTy = GTI.getStructTypeOrNull()) {
1618       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1619       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1620       continue;
1621     } else {
1622       uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1623 
1624       // If this is a scalar constant or a splat vector of constants,
1625       // handle it quickly.
1626       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1627         if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1628           Offset += ElementSize * *Val;
1629           continue;
1630         }
1631       }
1632 
1633       if (Offset != 0) {
1634         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1635         BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1636                       .getReg(0);
1637         Offset = 0;
1638       }
1639 
1640       Register IdxReg = getOrCreateVReg(*Idx);
1641       LLT IdxTy = MRI->getType(IdxReg);
1642       if (IdxTy != OffsetTy) {
1643         if (!IdxTy.isVector() && WantSplatVector) {
1644           IdxReg = MIRBuilder
1645                        .buildSplatBuildVector(OffsetTy.changeElementType(IdxTy),
1646                                               IdxReg)
1647                        .getReg(0);
1648         }
1649 
1650         IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1651       }
1652 
1653       // N = N + Idx * ElementSize;
1654       // Avoid doing it for ElementSize of 1.
1655       Register GepOffsetReg;
1656       if (ElementSize != 1) {
1657         auto ElementSizeMIB = MIRBuilder.buildConstant(
1658             getLLTForType(*OffsetIRTy, *DL), ElementSize);
1659         GepOffsetReg =
1660             MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1661       } else
1662         GepOffsetReg = IdxReg;
1663 
1664       BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1665     }
1666   }
1667 
1668   if (Offset != 0) {
1669     auto OffsetMIB =
1670         MIRBuilder.buildConstant(OffsetTy, Offset);
1671 
1672     if (int64_t(Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1673       Flags |= MachineInstr::MIFlag::NoUWrap;
1674 
1675     MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1676                            Flags);
1677     return true;
1678   }
1679 
1680   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1681   return true;
1682 }
1683 
translateMemFunc(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned Opcode)1684 bool IRTranslator::translateMemFunc(const CallInst &CI,
1685                                     MachineIRBuilder &MIRBuilder,
1686                                     unsigned Opcode) {
1687   const Value *SrcPtr = CI.getArgOperand(1);
1688   // If the source is undef, then just emit a nop.
1689   if (isa<UndefValue>(SrcPtr))
1690     return true;
1691 
1692   SmallVector<Register, 3> SrcRegs;
1693 
1694   unsigned MinPtrSize = UINT_MAX;
1695   for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1696     Register SrcReg = getOrCreateVReg(**AI);
1697     LLT SrcTy = MRI->getType(SrcReg);
1698     if (SrcTy.isPointer())
1699       MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1700     SrcRegs.push_back(SrcReg);
1701   }
1702 
1703   LLT SizeTy = LLT::scalar(MinPtrSize);
1704 
1705   // The size operand should be the minimum of the pointer sizes.
1706   Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1707   if (MRI->getType(SizeOpReg) != SizeTy)
1708     SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1709 
1710   auto ICall = MIRBuilder.buildInstr(Opcode);
1711   for (Register SrcReg : SrcRegs)
1712     ICall.addUse(SrcReg);
1713 
1714   Align DstAlign;
1715   Align SrcAlign;
1716   unsigned IsVol =
1717       cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1718 
1719   ConstantInt *CopySize = nullptr;
1720 
1721   if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1722     DstAlign = MCI->getDestAlign().valueOrOne();
1723     SrcAlign = MCI->getSourceAlign().valueOrOne();
1724     CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1725   } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1726     DstAlign = MCI->getDestAlign().valueOrOne();
1727     SrcAlign = MCI->getSourceAlign().valueOrOne();
1728     CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1729   } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1730     DstAlign = MMI->getDestAlign().valueOrOne();
1731     SrcAlign = MMI->getSourceAlign().valueOrOne();
1732     CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1733   } else {
1734     auto *MSI = cast<MemSetInst>(&CI);
1735     DstAlign = MSI->getDestAlign().valueOrOne();
1736   }
1737 
1738   if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1739     // We need to propagate the tail call flag from the IR inst as an argument.
1740     // Otherwise, we have to pessimize and assume later that we cannot tail call
1741     // any memory intrinsics.
1742     ICall.addImm(CI.isTailCall() ? 1 : 0);
1743   }
1744 
1745   // Create mem operands to store the alignment and volatile info.
1746   MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1747   MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1748   if (IsVol) {
1749     LoadFlags |= MachineMemOperand::MOVolatile;
1750     StoreFlags |= MachineMemOperand::MOVolatile;
1751   }
1752 
1753   AAMDNodes AAInfo = CI.getAAMetadata();
1754   if (AA && CopySize &&
1755       AA->pointsToConstantMemory(MemoryLocation(
1756           SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1757     LoadFlags |= MachineMemOperand::MOInvariant;
1758 
1759     // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1760     // but the previous usage implied it did. Probably should check
1761     // isDereferenceableAndAlignedPointer.
1762     LoadFlags |= MachineMemOperand::MODereferenceable;
1763   }
1764 
1765   ICall.addMemOperand(
1766       MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1767                                StoreFlags, 1, DstAlign, AAInfo));
1768   if (Opcode != TargetOpcode::G_MEMSET)
1769     ICall.addMemOperand(MF->getMachineMemOperand(
1770         MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1771 
1772   return true;
1773 }
1774 
translateTrap(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned Opcode)1775 bool IRTranslator::translateTrap(const CallInst &CI,
1776                                  MachineIRBuilder &MIRBuilder,
1777                                  unsigned Opcode) {
1778   StringRef TrapFuncName =
1779       CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1780   if (TrapFuncName.empty()) {
1781     if (Opcode == TargetOpcode::G_UBSANTRAP) {
1782       uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
1783       MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
1784     } else {
1785       MIRBuilder.buildInstr(Opcode);
1786     }
1787     return true;
1788   }
1789 
1790   CallLowering::CallLoweringInfo Info;
1791   if (Opcode == TargetOpcode::G_UBSANTRAP)
1792     Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
1793                              CI.getArgOperand(0)->getType(), 0});
1794 
1795   Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
1796   Info.CB = &CI;
1797   Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
1798   return CLI->lowerCall(MIRBuilder, Info);
1799 }
1800 
translateVectorInterleave2Intrinsic(const CallInst & CI,MachineIRBuilder & MIRBuilder)1801 bool IRTranslator::translateVectorInterleave2Intrinsic(
1802     const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1803   assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1804          "This function can only be called on the interleave2 intrinsic!");
1805   // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1806   Register Op0 = getOrCreateVReg(*CI.getOperand(0));
1807   Register Op1 = getOrCreateVReg(*CI.getOperand(1));
1808   Register Res = getOrCreateVReg(CI);
1809 
1810   LLT OpTy = MRI->getType(Op0);
1811   MIRBuilder.buildShuffleVector(Res, Op0, Op1,
1812                                 createInterleaveMask(OpTy.getNumElements(), 2));
1813 
1814   return true;
1815 }
1816 
translateVectorDeinterleave2Intrinsic(const CallInst & CI,MachineIRBuilder & MIRBuilder)1817 bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1818     const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1819   assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1820          "This function can only be called on the deinterleave2 intrinsic!");
1821   // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1822   // SelectionDAG).
1823   Register Op = getOrCreateVReg(*CI.getOperand(0));
1824   auto Undef = MIRBuilder.buildUndef(MRI->getType(Op));
1825   ArrayRef<Register> Res = getOrCreateVRegs(CI);
1826 
1827   LLT ResTy = MRI->getType(Res[0]);
1828   MIRBuilder.buildShuffleVector(Res[0], Op, Undef,
1829                                 createStrideMask(0, 2, ResTy.getNumElements()));
1830   MIRBuilder.buildShuffleVector(Res[1], Op, Undef,
1831                                 createStrideMask(1, 2, ResTy.getNumElements()));
1832 
1833   return true;
1834 }
1835 
getStackGuard(Register DstReg,MachineIRBuilder & MIRBuilder)1836 void IRTranslator::getStackGuard(Register DstReg,
1837                                  MachineIRBuilder &MIRBuilder) {
1838   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1839   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1840   auto MIB =
1841       MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1842 
1843   Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1844   if (!Global)
1845     return;
1846 
1847   unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1848   LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1849 
1850   MachinePointerInfo MPInfo(Global);
1851   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1852                MachineMemOperand::MODereferenceable;
1853   MachineMemOperand *MemRef = MF->getMachineMemOperand(
1854       MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1855   MIB.setMemRefs({MemRef});
1856 }
1857 
translateOverflowIntrinsic(const CallInst & CI,unsigned Op,MachineIRBuilder & MIRBuilder)1858 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1859                                               MachineIRBuilder &MIRBuilder) {
1860   ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1861   MIRBuilder.buildInstr(
1862       Op, {ResRegs[0], ResRegs[1]},
1863       {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1864 
1865   return true;
1866 }
1867 
translateFixedPointIntrinsic(unsigned Op,const CallInst & CI,MachineIRBuilder & MIRBuilder)1868 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1869                                                 MachineIRBuilder &MIRBuilder) {
1870   Register Dst = getOrCreateVReg(CI);
1871   Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1872   Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1873   uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1874   MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1875   return true;
1876 }
1877 
getSimpleIntrinsicOpcode(Intrinsic::ID ID)1878 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1879   switch (ID) {
1880     default:
1881       break;
1882     case Intrinsic::acos:
1883       return TargetOpcode::G_FACOS;
1884     case Intrinsic::asin:
1885       return TargetOpcode::G_FASIN;
1886     case Intrinsic::atan:
1887       return TargetOpcode::G_FATAN;
1888     case Intrinsic::bswap:
1889       return TargetOpcode::G_BSWAP;
1890     case Intrinsic::bitreverse:
1891       return TargetOpcode::G_BITREVERSE;
1892     case Intrinsic::fshl:
1893       return TargetOpcode::G_FSHL;
1894     case Intrinsic::fshr:
1895       return TargetOpcode::G_FSHR;
1896     case Intrinsic::ceil:
1897       return TargetOpcode::G_FCEIL;
1898     case Intrinsic::cos:
1899       return TargetOpcode::G_FCOS;
1900     case Intrinsic::cosh:
1901       return TargetOpcode::G_FCOSH;
1902     case Intrinsic::ctpop:
1903       return TargetOpcode::G_CTPOP;
1904     case Intrinsic::exp:
1905       return TargetOpcode::G_FEXP;
1906     case Intrinsic::exp2:
1907       return TargetOpcode::G_FEXP2;
1908     case Intrinsic::exp10:
1909       return TargetOpcode::G_FEXP10;
1910     case Intrinsic::fabs:
1911       return TargetOpcode::G_FABS;
1912     case Intrinsic::copysign:
1913       return TargetOpcode::G_FCOPYSIGN;
1914     case Intrinsic::minnum:
1915       return TargetOpcode::G_FMINNUM;
1916     case Intrinsic::maxnum:
1917       return TargetOpcode::G_FMAXNUM;
1918     case Intrinsic::minimum:
1919       return TargetOpcode::G_FMINIMUM;
1920     case Intrinsic::maximum:
1921       return TargetOpcode::G_FMAXIMUM;
1922     case Intrinsic::canonicalize:
1923       return TargetOpcode::G_FCANONICALIZE;
1924     case Intrinsic::floor:
1925       return TargetOpcode::G_FFLOOR;
1926     case Intrinsic::fma:
1927       return TargetOpcode::G_FMA;
1928     case Intrinsic::log:
1929       return TargetOpcode::G_FLOG;
1930     case Intrinsic::log2:
1931       return TargetOpcode::G_FLOG2;
1932     case Intrinsic::log10:
1933       return TargetOpcode::G_FLOG10;
1934     case Intrinsic::ldexp:
1935       return TargetOpcode::G_FLDEXP;
1936     case Intrinsic::nearbyint:
1937       return TargetOpcode::G_FNEARBYINT;
1938     case Intrinsic::pow:
1939       return TargetOpcode::G_FPOW;
1940     case Intrinsic::powi:
1941       return TargetOpcode::G_FPOWI;
1942     case Intrinsic::rint:
1943       return TargetOpcode::G_FRINT;
1944     case Intrinsic::round:
1945       return TargetOpcode::G_INTRINSIC_ROUND;
1946     case Intrinsic::roundeven:
1947       return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1948     case Intrinsic::sin:
1949       return TargetOpcode::G_FSIN;
1950     case Intrinsic::sinh:
1951       return TargetOpcode::G_FSINH;
1952     case Intrinsic::sqrt:
1953       return TargetOpcode::G_FSQRT;
1954     case Intrinsic::tan:
1955       return TargetOpcode::G_FTAN;
1956     case Intrinsic::tanh:
1957       return TargetOpcode::G_FTANH;
1958     case Intrinsic::trunc:
1959       return TargetOpcode::G_INTRINSIC_TRUNC;
1960     case Intrinsic::readcyclecounter:
1961       return TargetOpcode::G_READCYCLECOUNTER;
1962     case Intrinsic::readsteadycounter:
1963       return TargetOpcode::G_READSTEADYCOUNTER;
1964     case Intrinsic::ptrmask:
1965       return TargetOpcode::G_PTRMASK;
1966     case Intrinsic::lrint:
1967       return TargetOpcode::G_INTRINSIC_LRINT;
1968     case Intrinsic::llrint:
1969       return TargetOpcode::G_INTRINSIC_LLRINT;
1970     // FADD/FMUL require checking the FMF, so are handled elsewhere.
1971     case Intrinsic::vector_reduce_fmin:
1972       return TargetOpcode::G_VECREDUCE_FMIN;
1973     case Intrinsic::vector_reduce_fmax:
1974       return TargetOpcode::G_VECREDUCE_FMAX;
1975     case Intrinsic::vector_reduce_fminimum:
1976       return TargetOpcode::G_VECREDUCE_FMINIMUM;
1977     case Intrinsic::vector_reduce_fmaximum:
1978       return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1979     case Intrinsic::vector_reduce_add:
1980       return TargetOpcode::G_VECREDUCE_ADD;
1981     case Intrinsic::vector_reduce_mul:
1982       return TargetOpcode::G_VECREDUCE_MUL;
1983     case Intrinsic::vector_reduce_and:
1984       return TargetOpcode::G_VECREDUCE_AND;
1985     case Intrinsic::vector_reduce_or:
1986       return TargetOpcode::G_VECREDUCE_OR;
1987     case Intrinsic::vector_reduce_xor:
1988       return TargetOpcode::G_VECREDUCE_XOR;
1989     case Intrinsic::vector_reduce_smax:
1990       return TargetOpcode::G_VECREDUCE_SMAX;
1991     case Intrinsic::vector_reduce_smin:
1992       return TargetOpcode::G_VECREDUCE_SMIN;
1993     case Intrinsic::vector_reduce_umax:
1994       return TargetOpcode::G_VECREDUCE_UMAX;
1995     case Intrinsic::vector_reduce_umin:
1996       return TargetOpcode::G_VECREDUCE_UMIN;
1997     case Intrinsic::experimental_vector_compress:
1998       return TargetOpcode::G_VECTOR_COMPRESS;
1999     case Intrinsic::lround:
2000       return TargetOpcode::G_LROUND;
2001     case Intrinsic::llround:
2002       return TargetOpcode::G_LLROUND;
2003     case Intrinsic::get_fpenv:
2004       return TargetOpcode::G_GET_FPENV;
2005     case Intrinsic::get_fpmode:
2006       return TargetOpcode::G_GET_FPMODE;
2007   }
2008   return Intrinsic::not_intrinsic;
2009 }
2010 
translateSimpleIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2011 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2012                                             Intrinsic::ID ID,
2013                                             MachineIRBuilder &MIRBuilder) {
2014 
2015   unsigned Op = getSimpleIntrinsicOpcode(ID);
2016 
2017   // Is this a simple intrinsic?
2018   if (Op == Intrinsic::not_intrinsic)
2019     return false;
2020 
2021   // Yes. Let's translate it.
2022   SmallVector<llvm::SrcOp, 4> VRegs;
2023   for (const auto &Arg : CI.args())
2024     VRegs.push_back(getOrCreateVReg(*Arg));
2025 
2026   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
2027                         MachineInstr::copyFlagsFromInstruction(CI));
2028   return true;
2029 }
2030 
2031 // TODO: Include ConstainedOps.def when all strict instructions are defined.
getConstrainedOpcode(Intrinsic::ID ID)2032 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
2033   switch (ID) {
2034   case Intrinsic::experimental_constrained_fadd:
2035     return TargetOpcode::G_STRICT_FADD;
2036   case Intrinsic::experimental_constrained_fsub:
2037     return TargetOpcode::G_STRICT_FSUB;
2038   case Intrinsic::experimental_constrained_fmul:
2039     return TargetOpcode::G_STRICT_FMUL;
2040   case Intrinsic::experimental_constrained_fdiv:
2041     return TargetOpcode::G_STRICT_FDIV;
2042   case Intrinsic::experimental_constrained_frem:
2043     return TargetOpcode::G_STRICT_FREM;
2044   case Intrinsic::experimental_constrained_fma:
2045     return TargetOpcode::G_STRICT_FMA;
2046   case Intrinsic::experimental_constrained_sqrt:
2047     return TargetOpcode::G_STRICT_FSQRT;
2048   case Intrinsic::experimental_constrained_ldexp:
2049     return TargetOpcode::G_STRICT_FLDEXP;
2050   default:
2051     return 0;
2052   }
2053 }
2054 
translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic & FPI,MachineIRBuilder & MIRBuilder)2055 bool IRTranslator::translateConstrainedFPIntrinsic(
2056   const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2057   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
2058 
2059   unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
2060   if (!Opcode)
2061     return false;
2062 
2063   uint32_t Flags = MachineInstr::copyFlagsFromInstruction(FPI);
2064   if (EB == fp::ExceptionBehavior::ebIgnore)
2065     Flags |= MachineInstr::NoFPExcept;
2066 
2067   SmallVector<llvm::SrcOp, 4> VRegs;
2068   for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2069     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(I)));
2070 
2071   MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
2072   return true;
2073 }
2074 
getArgPhysReg(Argument & Arg)2075 std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2076   auto VRegs = getOrCreateVRegs(Arg);
2077   if (VRegs.size() != 1)
2078     return std::nullopt;
2079 
2080   // Arguments are lowered as a copy of a livein physical register.
2081   auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2082   if (!VRegDef || !VRegDef->isCopy())
2083     return std::nullopt;
2084   return VRegDef->getOperand(1).getReg().asMCReg();
2085 }
2086 
translateIfEntryValueArgument(bool isDeclare,Value * Val,const DILocalVariable * Var,const DIExpression * Expr,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)2087 bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2088                                                  const DILocalVariable *Var,
2089                                                  const DIExpression *Expr,
2090                                                  const DebugLoc &DL,
2091                                                  MachineIRBuilder &MIRBuilder) {
2092   auto *Arg = dyn_cast<Argument>(Val);
2093   if (!Arg)
2094     return false;
2095 
2096   if (!Expr->isEntryValue())
2097     return false;
2098 
2099   std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2100   if (!PhysReg) {
2101     LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2102                       << ": expression is entry_value but "
2103                       << "couldn't find a physical register\n");
2104     LLVM_DEBUG(dbgs() << *Var << "\n");
2105     return true;
2106   }
2107 
2108   if (isDeclare) {
2109     // Append an op deref to account for the fact that this is a dbg_declare.
2110     Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2111     MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2112   } else {
2113     MIRBuilder.buildDirectDbgValue(*PhysReg, Var, Expr);
2114   }
2115 
2116   return true;
2117 }
2118 
getConvOpcode(Intrinsic::ID ID)2119 static unsigned getConvOpcode(Intrinsic::ID ID) {
2120   switch (ID) {
2121   default:
2122     llvm_unreachable("Unexpected intrinsic");
2123   case Intrinsic::experimental_convergence_anchor:
2124     return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2125   case Intrinsic::experimental_convergence_entry:
2126     return TargetOpcode::CONVERGENCECTRL_ENTRY;
2127   case Intrinsic::experimental_convergence_loop:
2128     return TargetOpcode::CONVERGENCECTRL_LOOP;
2129   }
2130 }
2131 
translateConvergenceControlIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2132 bool IRTranslator::translateConvergenceControlIntrinsic(
2133     const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2134   MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
2135   Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2136   MIB.addDef(OutputReg);
2137 
2138   if (ID == Intrinsic::experimental_convergence_loop) {
2139     auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
2140     assert(Bundle && "Expected a convergence control token.");
2141     Register InputReg =
2142         getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2143     MIB.addUse(InputReg);
2144   }
2145 
2146   return true;
2147 }
2148 
translateKnownIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2149 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2150                                            MachineIRBuilder &MIRBuilder) {
2151   if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2152     if (ORE->enabled()) {
2153       if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
2154         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2155         R.visit(MI);
2156       }
2157     }
2158   }
2159 
2160   // If this is a simple intrinsic (that is, we just need to add a def of
2161   // a vreg, and uses for each arg operand, then translate it.
2162   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2163     return true;
2164 
2165   switch (ID) {
2166   default:
2167     break;
2168   case Intrinsic::lifetime_start:
2169   case Intrinsic::lifetime_end: {
2170     // No stack colouring in O0, discard region information.
2171     if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None)
2172       return true;
2173 
2174     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2175                                                   : TargetOpcode::LIFETIME_END;
2176 
2177     // Get the underlying objects for the location passed on the lifetime
2178     // marker.
2179     SmallVector<const Value *, 4> Allocas;
2180     getUnderlyingObjects(CI.getArgOperand(1), Allocas);
2181 
2182     // Iterate over each underlying object, creating lifetime markers for each
2183     // static alloca. Quit if we find a non-static alloca.
2184     for (const Value *V : Allocas) {
2185       const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2186       if (!AI)
2187         continue;
2188 
2189       if (!AI->isStaticAlloca())
2190         return true;
2191 
2192       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2193     }
2194     return true;
2195   }
2196   case Intrinsic::dbg_declare: {
2197     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2198     assert(DI.getVariable() && "Missing variable");
2199     translateDbgDeclareRecord(DI.getAddress(), DI.hasArgList(), DI.getVariable(),
2200                        DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2201     return true;
2202   }
2203   case Intrinsic::dbg_label: {
2204     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2205     assert(DI.getLabel() && "Missing label");
2206 
2207     assert(DI.getLabel()->isValidLocationForIntrinsic(
2208                MIRBuilder.getDebugLoc()) &&
2209            "Expected inlined-at fields to agree");
2210 
2211     MIRBuilder.buildDbgLabel(DI.getLabel());
2212     return true;
2213   }
2214   case Intrinsic::vaend:
2215     // No target I know of cares about va_end. Certainly no in-tree target
2216     // does. Simplest intrinsic ever!
2217     return true;
2218   case Intrinsic::vastart: {
2219     Value *Ptr = CI.getArgOperand(0);
2220     unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2221     Align Alignment = getKnownAlignment(Ptr, *DL);
2222 
2223     MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2224         .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2225                                                 MachineMemOperand::MOStore,
2226                                                 ListSize, Alignment));
2227     return true;
2228   }
2229   case Intrinsic::dbg_assign:
2230     // A dbg.assign is a dbg.value with more information about stack locations,
2231     // typically produced during optimisation of variables with leaked
2232     // addresses. We can treat it like a normal dbg_value intrinsic here; to
2233     // benefit from the full analysis of stack/SSA locations, GlobalISel would
2234     // need to register for and use the AssignmentTrackingAnalysis pass.
2235     [[fallthrough]];
2236   case Intrinsic::dbg_value: {
2237     // This form of DBG_VALUE is target-independent.
2238     const DbgValueInst &DI = cast<DbgValueInst>(CI);
2239     translateDbgValueRecord(DI.getValue(), DI.hasArgList(), DI.getVariable(),
2240                        DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2241     return true;
2242   }
2243   case Intrinsic::uadd_with_overflow:
2244     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2245   case Intrinsic::sadd_with_overflow:
2246     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2247   case Intrinsic::usub_with_overflow:
2248     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2249   case Intrinsic::ssub_with_overflow:
2250     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2251   case Intrinsic::umul_with_overflow:
2252     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2253   case Intrinsic::smul_with_overflow:
2254     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2255   case Intrinsic::uadd_sat:
2256     return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2257   case Intrinsic::sadd_sat:
2258     return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2259   case Intrinsic::usub_sat:
2260     return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2261   case Intrinsic::ssub_sat:
2262     return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2263   case Intrinsic::ushl_sat:
2264     return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2265   case Intrinsic::sshl_sat:
2266     return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2267   case Intrinsic::umin:
2268     return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2269   case Intrinsic::umax:
2270     return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2271   case Intrinsic::smin:
2272     return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2273   case Intrinsic::smax:
2274     return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2275   case Intrinsic::abs:
2276     // TODO: Preserve "int min is poison" arg in GMIR?
2277     return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2278   case Intrinsic::smul_fix:
2279     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2280   case Intrinsic::umul_fix:
2281     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2282   case Intrinsic::smul_fix_sat:
2283     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2284   case Intrinsic::umul_fix_sat:
2285     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2286   case Intrinsic::sdiv_fix:
2287     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2288   case Intrinsic::udiv_fix:
2289     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2290   case Intrinsic::sdiv_fix_sat:
2291     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2292   case Intrinsic::udiv_fix_sat:
2293     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2294   case Intrinsic::fmuladd: {
2295     const TargetMachine &TM = MF->getTarget();
2296     Register Dst = getOrCreateVReg(CI);
2297     Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2298     Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2299     Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2300     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2301         TLI->isFMAFasterThanFMulAndFAdd(*MF,
2302                                         TLI->getValueType(*DL, CI.getType()))) {
2303       // TODO: Revisit this to see if we should move this part of the
2304       // lowering to the combiner.
2305       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2306                           MachineInstr::copyFlagsFromInstruction(CI));
2307     } else {
2308       LLT Ty = getLLTForType(*CI.getType(), *DL);
2309       auto FMul = MIRBuilder.buildFMul(
2310           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2311       MIRBuilder.buildFAdd(Dst, FMul, Op2,
2312                            MachineInstr::copyFlagsFromInstruction(CI));
2313     }
2314     return true;
2315   }
2316   case Intrinsic::convert_from_fp16:
2317     // FIXME: This intrinsic should probably be removed from the IR.
2318     MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2319                           getOrCreateVReg(*CI.getArgOperand(0)),
2320                           MachineInstr::copyFlagsFromInstruction(CI));
2321     return true;
2322   case Intrinsic::convert_to_fp16:
2323     // FIXME: This intrinsic should probably be removed from the IR.
2324     MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2325                             getOrCreateVReg(*CI.getArgOperand(0)),
2326                             MachineInstr::copyFlagsFromInstruction(CI));
2327     return true;
2328   case Intrinsic::frexp: {
2329     ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2330     MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2331                            getOrCreateVReg(*CI.getArgOperand(0)),
2332                            MachineInstr::copyFlagsFromInstruction(CI));
2333     return true;
2334   }
2335   case Intrinsic::memcpy_inline:
2336     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2337   case Intrinsic::memcpy:
2338     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2339   case Intrinsic::memmove:
2340     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2341   case Intrinsic::memset:
2342     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2343   case Intrinsic::eh_typeid_for: {
2344     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2345     Register Reg = getOrCreateVReg(CI);
2346     unsigned TypeID = MF->getTypeIDFor(GV);
2347     MIRBuilder.buildConstant(Reg, TypeID);
2348     return true;
2349   }
2350   case Intrinsic::objectsize:
2351     llvm_unreachable("llvm.objectsize.* should have been lowered already");
2352 
2353   case Intrinsic::is_constant:
2354     llvm_unreachable("llvm.is.constant.* should have been lowered already");
2355 
2356   case Intrinsic::stackguard:
2357     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2358     return true;
2359   case Intrinsic::stackprotector: {
2360     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2361     Register GuardVal;
2362     if (TLI->useLoadStackGuardNode()) {
2363       GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2364       getStackGuard(GuardVal, MIRBuilder);
2365     } else
2366       GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2367 
2368     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2369     int FI = getOrCreateFrameIndex(*Slot);
2370     MF->getFrameInfo().setStackProtectorIndex(FI);
2371 
2372     MIRBuilder.buildStore(
2373         GuardVal, getOrCreateVReg(*Slot),
2374         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2375                                   MachineMemOperand::MOStore |
2376                                       MachineMemOperand::MOVolatile,
2377                                   PtrTy, Align(8)));
2378     return true;
2379   }
2380   case Intrinsic::stacksave: {
2381     MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2382     return true;
2383   }
2384   case Intrinsic::stackrestore: {
2385     MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2386                           {getOrCreateVReg(*CI.getArgOperand(0))});
2387     return true;
2388   }
2389   case Intrinsic::cttz:
2390   case Intrinsic::ctlz: {
2391     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2392     bool isTrailing = ID == Intrinsic::cttz;
2393     unsigned Opcode = isTrailing
2394                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
2395                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
2396                           : Cst->isZero() ? TargetOpcode::G_CTLZ
2397                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2398     MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2399                           {getOrCreateVReg(*CI.getArgOperand(0))});
2400     return true;
2401   }
2402   case Intrinsic::invariant_start: {
2403     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2404     Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2405     MIRBuilder.buildUndef(Undef);
2406     return true;
2407   }
2408   case Intrinsic::invariant_end:
2409     return true;
2410   case Intrinsic::expect:
2411   case Intrinsic::annotation:
2412   case Intrinsic::ptr_annotation:
2413   case Intrinsic::launder_invariant_group:
2414   case Intrinsic::strip_invariant_group: {
2415     // Drop the intrinsic, but forward the value.
2416     MIRBuilder.buildCopy(getOrCreateVReg(CI),
2417                          getOrCreateVReg(*CI.getArgOperand(0)));
2418     return true;
2419   }
2420   case Intrinsic::assume:
2421   case Intrinsic::experimental_noalias_scope_decl:
2422   case Intrinsic::var_annotation:
2423   case Intrinsic::sideeffect:
2424     // Discard annotate attributes, assumptions, and artificial side-effects.
2425     return true;
2426   case Intrinsic::read_volatile_register:
2427   case Intrinsic::read_register: {
2428     Value *Arg = CI.getArgOperand(0);
2429     MIRBuilder
2430         .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2431         .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2432     return true;
2433   }
2434   case Intrinsic::write_register: {
2435     Value *Arg = CI.getArgOperand(0);
2436     MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2437       .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2438       .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2439     return true;
2440   }
2441   case Intrinsic::localescape: {
2442     MachineBasicBlock &EntryMBB = MF->front();
2443     StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2444 
2445     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2446     // is the same on all targets.
2447     for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2448       Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2449       if (isa<ConstantPointerNull>(Arg))
2450         continue; // Skip null pointers. They represent a hole in index space.
2451 
2452       int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2453       MCSymbol *FrameAllocSym =
2454           MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2455 
2456       // This should be inserted at the start of the entry block.
2457       auto LocalEscape =
2458           MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2459               .addSym(FrameAllocSym)
2460               .addFrameIndex(FI);
2461 
2462       EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2463     }
2464 
2465     return true;
2466   }
2467   case Intrinsic::vector_reduce_fadd:
2468   case Intrinsic::vector_reduce_fmul: {
2469     // Need to check for the reassoc flag to decide whether we want a
2470     // sequential reduction opcode or not.
2471     Register Dst = getOrCreateVReg(CI);
2472     Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2473     Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2474     unsigned Opc = 0;
2475     if (!CI.hasAllowReassoc()) {
2476       // The sequential ordering case.
2477       Opc = ID == Intrinsic::vector_reduce_fadd
2478                 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2479                 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2480       MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2481                             MachineInstr::copyFlagsFromInstruction(CI));
2482       return true;
2483     }
2484     // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2485     // since the associativity doesn't matter.
2486     unsigned ScalarOpc;
2487     if (ID == Intrinsic::vector_reduce_fadd) {
2488       Opc = TargetOpcode::G_VECREDUCE_FADD;
2489       ScalarOpc = TargetOpcode::G_FADD;
2490     } else {
2491       Opc = TargetOpcode::G_VECREDUCE_FMUL;
2492       ScalarOpc = TargetOpcode::G_FMUL;
2493     }
2494     LLT DstTy = MRI->getType(Dst);
2495     auto Rdx = MIRBuilder.buildInstr(
2496         Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2497     MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2498                           MachineInstr::copyFlagsFromInstruction(CI));
2499 
2500     return true;
2501   }
2502   case Intrinsic::trap:
2503     return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2504   case Intrinsic::debugtrap:
2505     return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2506   case Intrinsic::ubsantrap:
2507     return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2508   case Intrinsic::allow_runtime_check:
2509   case Intrinsic::allow_ubsan_check:
2510     MIRBuilder.buildCopy(getOrCreateVReg(CI),
2511                          getOrCreateVReg(*ConstantInt::getTrue(CI.getType())));
2512     return true;
2513   case Intrinsic::amdgcn_cs_chain:
2514     return translateCallBase(CI, MIRBuilder);
2515   case Intrinsic::fptrunc_round: {
2516     uint32_t Flags = MachineInstr::copyFlagsFromInstruction(CI);
2517 
2518     // Convert the metadata argument to a constant integer
2519     Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2520     std::optional<RoundingMode> RoundMode =
2521         convertStrToRoundingMode(cast<MDString>(MD)->getString());
2522 
2523     // Add the Rounding mode as an integer
2524     MIRBuilder
2525         .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2526                     {getOrCreateVReg(CI)},
2527                     {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2528         .addImm((int)*RoundMode);
2529 
2530     return true;
2531   }
2532   case Intrinsic::is_fpclass: {
2533     Value *FpValue = CI.getOperand(0);
2534     ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2535 
2536     MIRBuilder
2537         .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2538                     {getOrCreateVReg(*FpValue)})
2539         .addImm(TestMaskValue->getZExtValue());
2540 
2541     return true;
2542   }
2543   case Intrinsic::set_fpenv: {
2544     Value *FPEnv = CI.getOperand(0);
2545     MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));
2546     return true;
2547   }
2548   case Intrinsic::reset_fpenv:
2549     MIRBuilder.buildResetFPEnv();
2550     return true;
2551   case Intrinsic::set_fpmode: {
2552     Value *FPState = CI.getOperand(0);
2553     MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));
2554     return true;
2555   }
2556   case Intrinsic::reset_fpmode:
2557     MIRBuilder.buildResetFPMode();
2558     return true;
2559   case Intrinsic::vscale: {
2560     MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);
2561     return true;
2562   }
2563   case Intrinsic::scmp:
2564     MIRBuilder.buildSCmp(getOrCreateVReg(CI),
2565                          getOrCreateVReg(*CI.getOperand(0)),
2566                          getOrCreateVReg(*CI.getOperand(1)));
2567     return true;
2568   case Intrinsic::ucmp:
2569     MIRBuilder.buildUCmp(getOrCreateVReg(CI),
2570                          getOrCreateVReg(*CI.getOperand(0)),
2571                          getOrCreateVReg(*CI.getOperand(1)));
2572     return true;
2573   case Intrinsic::prefetch: {
2574     Value *Addr = CI.getOperand(0);
2575     unsigned RW = cast<ConstantInt>(CI.getOperand(1))->getZExtValue();
2576     unsigned Locality = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
2577     unsigned CacheType = cast<ConstantInt>(CI.getOperand(3))->getZExtValue();
2578 
2579     auto Flags = RW ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
2580     auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2581                                           LLT(), Align());
2582 
2583     MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2584                              MMO);
2585 
2586     return true;
2587   }
2588 
2589   case Intrinsic::vector_interleave2:
2590   case Intrinsic::vector_deinterleave2: {
2591     // Both intrinsics have at least one operand.
2592     Value *Op0 = CI.getOperand(0);
2593     LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
2594     if (!ResTy.isFixedVector())
2595       return false;
2596 
2597     if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2598       return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2599 
2600     return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2601   }
2602 
2603 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)  \
2604   case Intrinsic::INTRINSIC:
2605 #include "llvm/IR/ConstrainedOps.def"
2606     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2607                                            MIRBuilder);
2608   case Intrinsic::experimental_convergence_anchor:
2609   case Intrinsic::experimental_convergence_entry:
2610   case Intrinsic::experimental_convergence_loop:
2611     return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2612   }
2613   return false;
2614 }
2615 
translateInlineAsm(const CallBase & CB,MachineIRBuilder & MIRBuilder)2616 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2617                                       MachineIRBuilder &MIRBuilder) {
2618 
2619   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2620 
2621   if (!ALI) {
2622     LLVM_DEBUG(
2623         dbgs() << "Inline asm lowering is not supported for this target yet\n");
2624     return false;
2625   }
2626 
2627   return ALI->lowerInlineAsm(
2628       MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2629 }
2630 
translateCallBase(const CallBase & CB,MachineIRBuilder & MIRBuilder)2631 bool IRTranslator::translateCallBase(const CallBase &CB,
2632                                      MachineIRBuilder &MIRBuilder) {
2633   ArrayRef<Register> Res = getOrCreateVRegs(CB);
2634 
2635   SmallVector<ArrayRef<Register>, 8> Args;
2636   Register SwiftInVReg = 0;
2637   Register SwiftErrorVReg = 0;
2638   for (const auto &Arg : CB.args()) {
2639     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2640       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2641       LLT Ty = getLLTForType(*Arg->getType(), *DL);
2642       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2643       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2644                                             &CB, &MIRBuilder.getMBB(), Arg));
2645       Args.emplace_back(ArrayRef(SwiftInVReg));
2646       SwiftErrorVReg =
2647           SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2648       continue;
2649     }
2650     Args.push_back(getOrCreateVRegs(*Arg));
2651   }
2652 
2653   if (auto *CI = dyn_cast<CallInst>(&CB)) {
2654     if (ORE->enabled()) {
2655       if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2656         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2657         R.visit(CI);
2658       }
2659     }
2660   }
2661 
2662   std::optional<CallLowering::PtrAuthInfo> PAI;
2663   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_ptrauth)) {
2664     // Functions should never be ptrauth-called directly.
2665     assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2666 
2667     const Value *Key = Bundle->Inputs[0];
2668     const Value *Discriminator = Bundle->Inputs[1];
2669 
2670     // Look through ptrauth constants to try to eliminate the matching bundle
2671     // and turn this into a direct call with no ptrauth.
2672     // CallLowering will use the raw pointer if it doesn't find the PAI.
2673     const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CB.getCalledOperand());
2674     if (!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||
2675         !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {
2676       // If we can't make it direct, package the bundle into PAI.
2677       Register DiscReg = getOrCreateVReg(*Discriminator);
2678       PAI = CallLowering::PtrAuthInfo{cast<ConstantInt>(Key)->getZExtValue(),
2679                                       DiscReg};
2680     }
2681   }
2682 
2683   Register ConvergenceCtrlToken = 0;
2684   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2685     const auto &Token = *Bundle->Inputs[0].get();
2686     ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2687   }
2688 
2689   // We don't set HasCalls on MFI here yet because call lowering may decide to
2690   // optimize into tail calls. Instead, we defer that to selection where a final
2691   // scan is done to check if any instructions are calls.
2692   bool Success = CLI->lowerCall(
2693       MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2694       [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2695 
2696   // Check if we just inserted a tail call.
2697   if (Success) {
2698     assert(!HasTailCall && "Can't tail call return twice from block?");
2699     const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2700     HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2701   }
2702 
2703   return Success;
2704 }
2705 
translateCall(const User & U,MachineIRBuilder & MIRBuilder)2706 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2707   const CallInst &CI = cast<CallInst>(U);
2708   auto TII = MF->getTarget().getIntrinsicInfo();
2709   const Function *F = CI.getCalledFunction();
2710 
2711   // FIXME: support Windows dllimport function calls and calls through
2712   // weak symbols.
2713   if (F && (F->hasDLLImportStorageClass() ||
2714             (MF->getTarget().getTargetTriple().isOSWindows() &&
2715              F->hasExternalWeakLinkage())))
2716     return false;
2717 
2718   // FIXME: support control flow guard targets.
2719   if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2720     return false;
2721 
2722   // FIXME: support statepoints and related.
2723   if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2724     return false;
2725 
2726   if (CI.isInlineAsm())
2727     return translateInlineAsm(CI, MIRBuilder);
2728 
2729   diagnoseDontCall(CI);
2730 
2731   Intrinsic::ID ID = Intrinsic::not_intrinsic;
2732   if (F && F->isIntrinsic()) {
2733     ID = F->getIntrinsicID();
2734     if (TII && ID == Intrinsic::not_intrinsic)
2735       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2736   }
2737 
2738   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2739     return translateCallBase(CI, MIRBuilder);
2740 
2741   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2742 
2743   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2744     return true;
2745 
2746   ArrayRef<Register> ResultRegs;
2747   if (!CI.getType()->isVoidTy())
2748     ResultRegs = getOrCreateVRegs(CI);
2749 
2750   // Ignore the callsite attributes. Backend code is most likely not expecting
2751   // an intrinsic to sometimes have side effects and sometimes not.
2752   MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2753   if (isa<FPMathOperator>(CI))
2754     MIB->copyIRFlags(CI);
2755 
2756   for (const auto &Arg : enumerate(CI.args())) {
2757     // If this is required to be an immediate, don't materialize it in a
2758     // register.
2759     if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2760       if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2761         // imm arguments are more convenient than cimm (and realistically
2762         // probably sufficient), so use them.
2763         assert(CI->getBitWidth() <= 64 &&
2764                "large intrinsic immediates not handled");
2765         MIB.addImm(CI->getSExtValue());
2766       } else {
2767         MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2768       }
2769     } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2770       auto *MD = MDVal->getMetadata();
2771       auto *MDN = dyn_cast<MDNode>(MD);
2772       if (!MDN) {
2773         if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2774           MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2775         else // This was probably an MDString.
2776           return false;
2777       }
2778       MIB.addMetadata(MDN);
2779     } else {
2780       ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2781       if (VRegs.size() > 1)
2782         return false;
2783       MIB.addUse(VRegs[0]);
2784     }
2785   }
2786 
2787   // Add a MachineMemOperand if it is a target mem intrinsic.
2788   TargetLowering::IntrinsicInfo Info;
2789   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2790   if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2791     Align Alignment = Info.align.value_or(
2792         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2793     LLT MemTy = Info.memVT.isSimple()
2794                     ? getLLTForMVT(Info.memVT.getSimpleVT())
2795                     : LLT::scalar(Info.memVT.getStoreSizeInBits());
2796 
2797     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2798     //       didn't yield anything useful.
2799     MachinePointerInfo MPI;
2800     if (Info.ptrVal)
2801       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2802     else if (Info.fallbackAddressSpace)
2803       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2804     MIB.addMemOperand(
2805         MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
2806   }
2807 
2808   if (CI.isConvergent()) {
2809     if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2810       auto *Token = Bundle->Inputs[0].get();
2811       Register TokenReg = getOrCreateVReg(*Token);
2812       MIB.addUse(TokenReg, RegState::Implicit);
2813     }
2814   }
2815 
2816   return true;
2817 }
2818 
findUnwindDestinations(const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)2819 bool IRTranslator::findUnwindDestinations(
2820     const BasicBlock *EHPadBB,
2821     BranchProbability Prob,
2822     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2823         &UnwindDests) {
2824   EHPersonality Personality = classifyEHPersonality(
2825       EHPadBB->getParent()->getFunction().getPersonalityFn());
2826   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2827   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2828   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2829   bool IsSEH = isAsynchronousEHPersonality(Personality);
2830 
2831   if (IsWasmCXX) {
2832     // Ignore this for now.
2833     return false;
2834   }
2835 
2836   while (EHPadBB) {
2837     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2838     BasicBlock *NewEHPadBB = nullptr;
2839     if (isa<LandingPadInst>(Pad)) {
2840       // Stop on landingpads. They are not funclets.
2841       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2842       break;
2843     }
2844     if (isa<CleanupPadInst>(Pad)) {
2845       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2846       // personalities.
2847       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2848       UnwindDests.back().first->setIsEHScopeEntry();
2849       UnwindDests.back().first->setIsEHFuncletEntry();
2850       break;
2851     }
2852     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2853       // Add the catchpad handlers to the possible destinations.
2854       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2855         UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2856         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2857         if (IsMSVCCXX || IsCoreCLR)
2858           UnwindDests.back().first->setIsEHFuncletEntry();
2859         if (!IsSEH)
2860           UnwindDests.back().first->setIsEHScopeEntry();
2861       }
2862       NewEHPadBB = CatchSwitch->getUnwindDest();
2863     } else {
2864       continue;
2865     }
2866 
2867     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2868     if (BPI && NewEHPadBB)
2869       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2870     EHPadBB = NewEHPadBB;
2871   }
2872   return true;
2873 }
2874 
translateInvoke(const User & U,MachineIRBuilder & MIRBuilder)2875 bool IRTranslator::translateInvoke(const User &U,
2876                                    MachineIRBuilder &MIRBuilder) {
2877   const InvokeInst &I = cast<InvokeInst>(U);
2878   MCContext &Context = MF->getContext();
2879 
2880   const BasicBlock *ReturnBB = I.getSuccessor(0);
2881   const BasicBlock *EHPadBB = I.getSuccessor(1);
2882 
2883   const Function *Fn = I.getCalledFunction();
2884 
2885   // FIXME: support invoking patchpoint and statepoint intrinsics.
2886   if (Fn && Fn->isIntrinsic())
2887     return false;
2888 
2889   // FIXME: support whatever these are.
2890   if (I.hasDeoptState())
2891     return false;
2892 
2893   // FIXME: support control flow guard targets.
2894   if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2895     return false;
2896 
2897   // FIXME: support Windows exception handling.
2898   if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2899     return false;
2900 
2901   // FIXME: support Windows dllimport function calls and calls through
2902   // weak symbols.
2903   if (Fn && (Fn->hasDLLImportStorageClass() ||
2904             (MF->getTarget().getTargetTriple().isOSWindows() &&
2905              Fn->hasExternalWeakLinkage())))
2906     return false;
2907 
2908   bool LowerInlineAsm = I.isInlineAsm();
2909   bool NeedEHLabel = true;
2910 
2911   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2912   // the region covered by the try.
2913   MCSymbol *BeginSymbol = nullptr;
2914   if (NeedEHLabel) {
2915     MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2916     BeginSymbol = Context.createTempSymbol();
2917     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2918   }
2919 
2920   if (LowerInlineAsm) {
2921     if (!translateInlineAsm(I, MIRBuilder))
2922       return false;
2923   } else if (!translateCallBase(I, MIRBuilder))
2924     return false;
2925 
2926   MCSymbol *EndSymbol = nullptr;
2927   if (NeedEHLabel) {
2928     EndSymbol = Context.createTempSymbol();
2929     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2930   }
2931 
2932   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2933   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2934   MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2935   BranchProbability EHPadBBProb =
2936       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2937           : BranchProbability::getZero();
2938 
2939   if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2940     return false;
2941 
2942   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2943                     &ReturnMBB = getMBB(*ReturnBB);
2944   // Update successor info.
2945   addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2946   for (auto &UnwindDest : UnwindDests) {
2947     UnwindDest.first->setIsEHPad();
2948     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2949   }
2950   InvokeMBB->normalizeSuccProbs();
2951 
2952   if (NeedEHLabel) {
2953     assert(BeginSymbol && "Expected a begin symbol!");
2954     assert(EndSymbol && "Expected an end symbol!");
2955     MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2956   }
2957 
2958   MIRBuilder.buildBr(ReturnMBB);
2959   return true;
2960 }
2961 
translateCallBr(const User & U,MachineIRBuilder & MIRBuilder)2962 bool IRTranslator::translateCallBr(const User &U,
2963                                    MachineIRBuilder &MIRBuilder) {
2964   // FIXME: Implement this.
2965   return false;
2966 }
2967 
translateLandingPad(const User & U,MachineIRBuilder & MIRBuilder)2968 bool IRTranslator::translateLandingPad(const User &U,
2969                                        MachineIRBuilder &MIRBuilder) {
2970   const LandingPadInst &LP = cast<LandingPadInst>(U);
2971 
2972   MachineBasicBlock &MBB = MIRBuilder.getMBB();
2973 
2974   MBB.setIsEHPad();
2975 
2976   // If there aren't registers to copy the values into (e.g., during SjLj
2977   // exceptions), then don't bother.
2978   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2979   if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
2980       TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
2981     return true;
2982 
2983   // If landingpad's return type is token type, we don't create DAG nodes
2984   // for its exception pointer and selector value. The extraction of exception
2985   // pointer or selector value from token type landingpads is not currently
2986   // supported.
2987   if (LP.getType()->isTokenTy())
2988     return true;
2989 
2990   // Add a label to mark the beginning of the landing pad.  Deletion of the
2991   // landing pad can thus be detected via the MachineModuleInfo.
2992   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2993     .addSym(MF->addLandingPad(&MBB));
2994 
2995   // If the unwinder does not preserve all registers, ensure that the
2996   // function marks the clobbered registers as used.
2997   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2998   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2999     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3000 
3001   LLT Ty = getLLTForType(*LP.getType(), *DL);
3002   Register Undef = MRI->createGenericVirtualRegister(Ty);
3003   MIRBuilder.buildUndef(Undef);
3004 
3005   SmallVector<LLT, 2> Tys;
3006   for (Type *Ty : cast<StructType>(LP.getType())->elements())
3007     Tys.push_back(getLLTForType(*Ty, *DL));
3008   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
3009 
3010   // Mark exception register as live in.
3011   Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3012   if (!ExceptionReg)
3013     return false;
3014 
3015   MBB.addLiveIn(ExceptionReg);
3016   ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
3017   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
3018 
3019   Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3020   if (!SelectorReg)
3021     return false;
3022 
3023   MBB.addLiveIn(SelectorReg);
3024   Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3025   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
3026   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
3027 
3028   return true;
3029 }
3030 
translateAlloca(const User & U,MachineIRBuilder & MIRBuilder)3031 bool IRTranslator::translateAlloca(const User &U,
3032                                    MachineIRBuilder &MIRBuilder) {
3033   auto &AI = cast<AllocaInst>(U);
3034 
3035   if (AI.isSwiftError())
3036     return true;
3037 
3038   if (AI.isStaticAlloca()) {
3039     Register Res = getOrCreateVReg(AI);
3040     int FI = getOrCreateFrameIndex(AI);
3041     MIRBuilder.buildFrameIndex(Res, FI);
3042     return true;
3043   }
3044 
3045   // FIXME: support stack probing for Windows.
3046   if (MF->getTarget().getTargetTriple().isOSWindows())
3047     return false;
3048 
3049   // Now we're in the harder dynamic case.
3050   Register NumElts = getOrCreateVReg(*AI.getArraySize());
3051   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3052   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
3053   if (MRI->getType(NumElts) != IntPtrTy) {
3054     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3055     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
3056     NumElts = ExtElts;
3057   }
3058 
3059   Type *Ty = AI.getAllocatedType();
3060 
3061   Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3062   Register TySize =
3063       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3064   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
3065 
3066   // Round the size of the allocation up to the stack alignment size
3067   // by add SA-1 to the size. This doesn't overflow because we're computing
3068   // an address inside an alloca.
3069   Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3070   auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
3071   auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3072                                       MachineInstr::NoUWrap);
3073   auto AlignCst =
3074       MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
3075   auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
3076 
3077   Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
3078   if (Alignment <= StackAlign)
3079     Alignment = Align(1);
3080   MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
3081 
3082   MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3083   assert(MF->getFrameInfo().hasVarSizedObjects());
3084   return true;
3085 }
3086 
translateVAArg(const User & U,MachineIRBuilder & MIRBuilder)3087 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3088   // FIXME: We may need more info about the type. Because of how LLT works,
3089   // we're completely discarding the i64/double distinction here (amongst
3090   // others). Fortunately the ABIs I know of where that matters don't use va_arg
3091   // anyway but that's not guaranteed.
3092   MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3093                         {getOrCreateVReg(*U.getOperand(0)),
3094                          DL->getABITypeAlign(U.getType()).value()});
3095   return true;
3096 }
3097 
translateUnreachable(const User & U,MachineIRBuilder & MIRBuilder)3098 bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
3099   if (!MF->getTarget().Options.TrapUnreachable)
3100     return true;
3101 
3102   auto &UI = cast<UnreachableInst>(U);
3103 
3104   // We may be able to ignore unreachable behind a noreturn call.
3105   if (const CallInst *Call = dyn_cast_or_null<CallInst>(UI.getPrevNode());
3106       Call && Call->doesNotReturn()) {
3107     if (MF->getTarget().Options.NoTrapAfterNoreturn)
3108       return true;
3109     // Do not emit an additional trap instruction.
3110     if (Call->isNonContinuableTrap())
3111       return true;
3112   }
3113 
3114   MIRBuilder.buildTrap();
3115   return true;
3116 }
3117 
translateInsertElement(const User & U,MachineIRBuilder & MIRBuilder)3118 bool IRTranslator::translateInsertElement(const User &U,
3119                                           MachineIRBuilder &MIRBuilder) {
3120   // If it is a <1 x Ty> vector, use the scalar as it is
3121   // not a legal vector type in LLT.
3122   if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
3123       FVT && FVT->getNumElements() == 1)
3124     return translateCopy(U, *U.getOperand(1), MIRBuilder);
3125 
3126   Register Res = getOrCreateVReg(U);
3127   Register Val = getOrCreateVReg(*U.getOperand(0));
3128   Register Elt = getOrCreateVReg(*U.getOperand(1));
3129   unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3130   Register Idx;
3131   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(2))) {
3132     if (CI->getBitWidth() != PreferredVecIdxWidth) {
3133       APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3134       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3135       Idx = getOrCreateVReg(*NewIdxCI);
3136     }
3137   }
3138   if (!Idx)
3139     Idx = getOrCreateVReg(*U.getOperand(2));
3140   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3141     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3142     Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3143   }
3144   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3145   return true;
3146 }
3147 
translateExtractElement(const User & U,MachineIRBuilder & MIRBuilder)3148 bool IRTranslator::translateExtractElement(const User &U,
3149                                            MachineIRBuilder &MIRBuilder) {
3150   // If it is a <1 x Ty> vector, use the scalar as it is
3151   // not a legal vector type in LLT.
3152   if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
3153     return translateCopy(U, *U.getOperand(0), MIRBuilder);
3154 
3155   Register Res = getOrCreateVReg(U);
3156   Register Val = getOrCreateVReg(*U.getOperand(0));
3157   unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3158   Register Idx;
3159   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
3160     if (CI->getBitWidth() != PreferredVecIdxWidth) {
3161       APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3162       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3163       Idx = getOrCreateVReg(*NewIdxCI);
3164     }
3165   }
3166   if (!Idx)
3167     Idx = getOrCreateVReg(*U.getOperand(1));
3168   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3169     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3170     Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3171   }
3172   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3173   return true;
3174 }
3175 
translateShuffleVector(const User & U,MachineIRBuilder & MIRBuilder)3176 bool IRTranslator::translateShuffleVector(const User &U,
3177                                           MachineIRBuilder &MIRBuilder) {
3178   // A ShuffleVector that has operates on scalable vectors is a splat vector
3179   // where the value of the splat vector is the 0th element of the first
3180   // operand, since the index mask operand is the zeroinitializer (undef and
3181   // poison are treated as zeroinitializer here).
3182   if (U.getOperand(0)->getType()->isScalableTy()) {
3183     Value *Op0 = U.getOperand(0);
3184     auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3185         LLT::scalar(Op0->getType()->getScalarSizeInBits()),
3186         getOrCreateVReg(*Op0), 0);
3187     MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
3188     return true;
3189   }
3190 
3191   ArrayRef<int> Mask;
3192   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3193     Mask = SVI->getShuffleMask();
3194   else
3195     Mask = cast<ConstantExpr>(U).getShuffleMask();
3196   ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3197   MIRBuilder
3198       .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3199                   {getOrCreateVReg(*U.getOperand(0)),
3200                    getOrCreateVReg(*U.getOperand(1))})
3201       .addShuffleMask(MaskAlloc);
3202   return true;
3203 }
3204 
translatePHI(const User & U,MachineIRBuilder & MIRBuilder)3205 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3206   const PHINode &PI = cast<PHINode>(U);
3207 
3208   SmallVector<MachineInstr *, 4> Insts;
3209   for (auto Reg : getOrCreateVRegs(PI)) {
3210     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
3211     Insts.push_back(MIB.getInstr());
3212   }
3213 
3214   PendingPHIs.emplace_back(&PI, std::move(Insts));
3215   return true;
3216 }
3217 
translateAtomicCmpXchg(const User & U,MachineIRBuilder & MIRBuilder)3218 bool IRTranslator::translateAtomicCmpXchg(const User &U,
3219                                           MachineIRBuilder &MIRBuilder) {
3220   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3221 
3222   auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3223 
3224   auto Res = getOrCreateVRegs(I);
3225   Register OldValRes = Res[0];
3226   Register SuccessRes = Res[1];
3227   Register Addr = getOrCreateVReg(*I.getPointerOperand());
3228   Register Cmp = getOrCreateVReg(*I.getCompareOperand());
3229   Register NewVal = getOrCreateVReg(*I.getNewValOperand());
3230 
3231   MIRBuilder.buildAtomicCmpXchgWithSuccess(
3232       OldValRes, SuccessRes, Addr, Cmp, NewVal,
3233       *MF->getMachineMemOperand(
3234           MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3235           getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3236           I.getSuccessOrdering(), I.getFailureOrdering()));
3237   return true;
3238 }
3239 
translateAtomicRMW(const User & U,MachineIRBuilder & MIRBuilder)3240 bool IRTranslator::translateAtomicRMW(const User &U,
3241                                       MachineIRBuilder &MIRBuilder) {
3242   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3243   auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3244 
3245   Register Res = getOrCreateVReg(I);
3246   Register Addr = getOrCreateVReg(*I.getPointerOperand());
3247   Register Val = getOrCreateVReg(*I.getValOperand());
3248 
3249   unsigned Opcode = 0;
3250   switch (I.getOperation()) {
3251   default:
3252     return false;
3253   case AtomicRMWInst::Xchg:
3254     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3255     break;
3256   case AtomicRMWInst::Add:
3257     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3258     break;
3259   case AtomicRMWInst::Sub:
3260     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3261     break;
3262   case AtomicRMWInst::And:
3263     Opcode = TargetOpcode::G_ATOMICRMW_AND;
3264     break;
3265   case AtomicRMWInst::Nand:
3266     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3267     break;
3268   case AtomicRMWInst::Or:
3269     Opcode = TargetOpcode::G_ATOMICRMW_OR;
3270     break;
3271   case AtomicRMWInst::Xor:
3272     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3273     break;
3274   case AtomicRMWInst::Max:
3275     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3276     break;
3277   case AtomicRMWInst::Min:
3278     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3279     break;
3280   case AtomicRMWInst::UMax:
3281     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3282     break;
3283   case AtomicRMWInst::UMin:
3284     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3285     break;
3286   case AtomicRMWInst::FAdd:
3287     Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3288     break;
3289   case AtomicRMWInst::FSub:
3290     Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3291     break;
3292   case AtomicRMWInst::FMax:
3293     Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3294     break;
3295   case AtomicRMWInst::FMin:
3296     Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3297     break;
3298   case AtomicRMWInst::UIncWrap:
3299     Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3300     break;
3301   case AtomicRMWInst::UDecWrap:
3302     Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3303     break;
3304   }
3305 
3306   MIRBuilder.buildAtomicRMW(
3307       Opcode, Res, Addr, Val,
3308       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3309                                 Flags, MRI->getType(Val), getMemOpAlign(I),
3310                                 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3311                                 I.getOrdering()));
3312   return true;
3313 }
3314 
translateFence(const User & U,MachineIRBuilder & MIRBuilder)3315 bool IRTranslator::translateFence(const User &U,
3316                                   MachineIRBuilder &MIRBuilder) {
3317   const FenceInst &Fence = cast<FenceInst>(U);
3318   MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3319                         Fence.getSyncScopeID());
3320   return true;
3321 }
3322 
translateFreeze(const User & U,MachineIRBuilder & MIRBuilder)3323 bool IRTranslator::translateFreeze(const User &U,
3324                                    MachineIRBuilder &MIRBuilder) {
3325   const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3326   const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3327 
3328   assert(DstRegs.size() == SrcRegs.size() &&
3329          "Freeze with different source and destination type?");
3330 
3331   for (unsigned I = 0; I < DstRegs.size(); ++I) {
3332     MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3333   }
3334 
3335   return true;
3336 }
3337 
finishPendingPhis()3338 void IRTranslator::finishPendingPhis() {
3339 #ifndef NDEBUG
3340   DILocationVerifier Verifier;
3341   GISelObserverWrapper WrapperObserver(&Verifier);
3342   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3343 #endif // ifndef NDEBUG
3344   for (auto &Phi : PendingPHIs) {
3345     const PHINode *PI = Phi.first;
3346     if (PI->getType()->isEmptyTy())
3347       continue;
3348     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3349     MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3350     EntryBuilder->setDebugLoc(PI->getDebugLoc());
3351 #ifndef NDEBUG
3352     Verifier.setCurrentInst(PI);
3353 #endif // ifndef NDEBUG
3354 
3355     SmallSet<const MachineBasicBlock *, 16> SeenPreds;
3356     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3357       auto IRPred = PI->getIncomingBlock(i);
3358       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3359       for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3360         if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3361           continue;
3362         SeenPreds.insert(Pred);
3363         for (unsigned j = 0; j < ValRegs.size(); ++j) {
3364           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3365           MIB.addUse(ValRegs[j]);
3366           MIB.addMBB(Pred);
3367         }
3368       }
3369     }
3370   }
3371 }
3372 
translateDbgValueRecord(Value * V,bool HasArgList,const DILocalVariable * Variable,const DIExpression * Expression,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)3373 void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3374                                      const DILocalVariable *Variable,
3375                                      const DIExpression *Expression,
3376                                      const DebugLoc &DL,
3377                                      MachineIRBuilder &MIRBuilder) {
3378   assert(Variable->isValidLocationForIntrinsic(DL) &&
3379          "Expected inlined-at fields to agree");
3380   // Act as if we're handling a debug intrinsic.
3381   MIRBuilder.setDebugLoc(DL);
3382 
3383   if (!V || HasArgList) {
3384     // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3385     // terminate any prior location.
3386     MIRBuilder.buildIndirectDbgValue(0, Variable, Expression);
3387     return;
3388   }
3389 
3390   if (const auto *CI = dyn_cast<Constant>(V)) {
3391     MIRBuilder.buildConstDbgValue(*CI, Variable, Expression);
3392     return;
3393   }
3394 
3395   if (auto *AI = dyn_cast<AllocaInst>(V);
3396       AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3397     // If the value is an alloca and the expression starts with a
3398     // dereference, track a stack slot instead of a register, as registers
3399     // may be clobbered.
3400     auto ExprOperands = Expression->getElements();
3401     auto *ExprDerefRemoved =
3402         DIExpression::get(AI->getContext(), ExprOperands.drop_front());
3403     MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,
3404                                ExprDerefRemoved);
3405     return;
3406   }
3407   if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,
3408                                     MIRBuilder))
3409     return;
3410   for (Register Reg : getOrCreateVRegs(*V)) {
3411     // FIXME: This does not handle register-indirect values at offset 0. The
3412     // direct/indirect thing shouldn't really be handled by something as
3413     // implicit as reg+noreg vs reg+imm in the first place, but it seems
3414     // pretty baked in right now.
3415     MIRBuilder.buildDirectDbgValue(Reg, Variable, Expression);
3416   }
3417   return;
3418 }
3419 
translateDbgDeclareRecord(Value * Address,bool HasArgList,const DILocalVariable * Variable,const DIExpression * Expression,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)3420 void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3421                                      const DILocalVariable *Variable,
3422                                      const DIExpression *Expression,
3423                                      const DebugLoc &DL,
3424                                      MachineIRBuilder &MIRBuilder) {
3425   if (!Address || isa<UndefValue>(Address)) {
3426     LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3427     return;
3428   }
3429 
3430   assert(Variable->isValidLocationForIntrinsic(DL) &&
3431          "Expected inlined-at fields to agree");
3432   auto AI = dyn_cast<AllocaInst>(Address);
3433   if (AI && AI->isStaticAlloca()) {
3434     // Static allocas are tracked at the MF level, no need for DBG_VALUE
3435     // instructions (in fact, they get ignored if they *do* exist).
3436     MF->setVariableDbgInfo(Variable, Expression,
3437                            getOrCreateFrameIndex(*AI), DL);
3438     return;
3439   }
3440 
3441   if (translateIfEntryValueArgument(true, Address, Variable,
3442                                     Expression, DL,
3443                                     MIRBuilder))
3444     return;
3445 
3446   // A dbg.declare describes the address of a source variable, so lower it
3447   // into an indirect DBG_VALUE.
3448   MIRBuilder.setDebugLoc(DL);
3449   MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
3450                                    Variable, Expression);
3451   return;
3452 }
3453 
translateDbgInfo(const Instruction & Inst,MachineIRBuilder & MIRBuilder)3454 void IRTranslator::translateDbgInfo(const Instruction &Inst,
3455                                       MachineIRBuilder &MIRBuilder) {
3456   for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3457     if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
3458       MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3459       assert(DLR->getLabel() && "Missing label");
3460       assert(DLR->getLabel()->isValidLocationForIntrinsic(
3461                  MIRBuilder.getDebugLoc()) &&
3462              "Expected inlined-at fields to agree");
3463       MIRBuilder.buildDbgLabel(DLR->getLabel());
3464       continue;
3465     }
3466     DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
3467     const DILocalVariable *Variable = DVR.getVariable();
3468     const DIExpression *Expression = DVR.getExpression();
3469     Value *V = DVR.getVariableLocationOp(0);
3470     if (DVR.isDbgDeclare())
3471       translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,
3472                                 DVR.getDebugLoc(), MIRBuilder);
3473     else
3474       translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,
3475                               DVR.getDebugLoc(), MIRBuilder);
3476   }
3477 }
3478 
translate(const Instruction & Inst)3479 bool IRTranslator::translate(const Instruction &Inst) {
3480   CurBuilder->setDebugLoc(Inst.getDebugLoc());
3481   CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3482   CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));
3483 
3484   if (TLI->fallBackToDAGISel(Inst))
3485     return false;
3486 
3487   switch (Inst.getOpcode()) {
3488 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3489   case Instruction::OPCODE:                                                    \
3490     return translate##OPCODE(Inst, *CurBuilder.get());
3491 #include "llvm/IR/Instruction.def"
3492   default:
3493     return false;
3494   }
3495 }
3496 
translate(const Constant & C,Register Reg)3497 bool IRTranslator::translate(const Constant &C, Register Reg) {
3498   // We only emit constants into the entry block from here. To prevent jumpy
3499   // debug behaviour remove debug line.
3500   if (auto CurrInstDL = CurBuilder->getDL())
3501     EntryBuilder->setDebugLoc(DebugLoc());
3502 
3503   if (auto CI = dyn_cast<ConstantInt>(&C))
3504     EntryBuilder->buildConstant(Reg, *CI);
3505   else if (auto CF = dyn_cast<ConstantFP>(&C))
3506     EntryBuilder->buildFConstant(Reg, *CF);
3507   else if (isa<UndefValue>(C))
3508     EntryBuilder->buildUndef(Reg);
3509   else if (isa<ConstantPointerNull>(C))
3510     EntryBuilder->buildConstant(Reg, 0);
3511   else if (auto GV = dyn_cast<GlobalValue>(&C))
3512     EntryBuilder->buildGlobalValue(Reg, GV);
3513   else if (auto CPA = dyn_cast<ConstantPtrAuth>(&C)) {
3514     Register Addr = getOrCreateVReg(*CPA->getPointer());
3515     Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3516     EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
3517   } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3518     if (!isa<FixedVectorType>(CAZ->getType()))
3519       return false;
3520     // Return the scalar if it is a <1 x Ty> vector.
3521     unsigned NumElts = CAZ->getElementCount().getFixedValue();
3522     if (NumElts == 1)
3523       return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3524     SmallVector<Register, 4> Ops;
3525     for (unsigned I = 0; I < NumElts; ++I) {
3526       Constant &Elt = *CAZ->getElementValue(I);
3527       Ops.push_back(getOrCreateVReg(Elt));
3528     }
3529     EntryBuilder->buildBuildVector(Reg, Ops);
3530   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3531     // Return the scalar if it is a <1 x Ty> vector.
3532     if (CV->getNumElements() == 1)
3533       return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3534     SmallVector<Register, 4> Ops;
3535     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3536       Constant &Elt = *CV->getElementAsConstant(i);
3537       Ops.push_back(getOrCreateVReg(Elt));
3538     }
3539     EntryBuilder->buildBuildVector(Reg, Ops);
3540   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3541     switch(CE->getOpcode()) {
3542 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3543   case Instruction::OPCODE:                                                    \
3544     return translate##OPCODE(*CE, *EntryBuilder.get());
3545 #include "llvm/IR/Instruction.def"
3546     default:
3547       return false;
3548     }
3549   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3550     if (CV->getNumOperands() == 1)
3551       return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3552     SmallVector<Register, 4> Ops;
3553     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3554       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3555     }
3556     EntryBuilder->buildBuildVector(Reg, Ops);
3557   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3558     EntryBuilder->buildBlockAddress(Reg, BA);
3559   } else
3560     return false;
3561 
3562   return true;
3563 }
3564 
finalizeBasicBlock(const BasicBlock & BB,MachineBasicBlock & MBB)3565 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3566                                       MachineBasicBlock &MBB) {
3567   for (auto &BTB : SL->BitTestCases) {
3568     // Emit header first, if it wasn't already emitted.
3569     if (!BTB.Emitted)
3570       emitBitTestHeader(BTB, BTB.Parent);
3571 
3572     BranchProbability UnhandledProb = BTB.Prob;
3573     for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3574       UnhandledProb -= BTB.Cases[j].ExtraProb;
3575       // Set the current basic block to the mbb we wish to insert the code into
3576       MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3577       // If all cases cover a contiguous range, it is not necessary to jump to
3578       // the default block after the last bit test fails. This is because the
3579       // range check during bit test header creation has guaranteed that every
3580       // case here doesn't go outside the range. In this case, there is no need
3581       // to perform the last bit test, as it will always be true. Instead, make
3582       // the second-to-last bit-test fall through to the target of the last bit
3583       // test, and delete the last bit test.
3584 
3585       MachineBasicBlock *NextMBB;
3586       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3587         // Second-to-last bit-test with contiguous range: fall through to the
3588         // target of the final bit test.
3589         NextMBB = BTB.Cases[j + 1].TargetBB;
3590       } else if (j + 1 == ej) {
3591         // For the last bit test, fall through to Default.
3592         NextMBB = BTB.Default;
3593       } else {
3594         // Otherwise, fall through to the next bit test.
3595         NextMBB = BTB.Cases[j + 1].ThisBB;
3596       }
3597 
3598       emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3599 
3600       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3601         // We need to record the replacement phi edge here that normally
3602         // happens in emitBitTestCase before we delete the case, otherwise the
3603         // phi edge will be lost.
3604         addMachineCFGPred({BTB.Parent->getBasicBlock(),
3605                            BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3606                           MBB);
3607         // Since we're not going to use the final bit test, remove it.
3608         BTB.Cases.pop_back();
3609         break;
3610       }
3611     }
3612     // This is "default" BB. We have two jumps to it. From "header" BB and from
3613     // last "case" BB, unless the latter was skipped.
3614     CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3615                                    BTB.Default->getBasicBlock()};
3616     addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3617     if (!BTB.ContiguousRange) {
3618       addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3619     }
3620   }
3621   SL->BitTestCases.clear();
3622 
3623   for (auto &JTCase : SL->JTCases) {
3624     // Emit header first, if it wasn't already emitted.
3625     if (!JTCase.first.Emitted)
3626       emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3627 
3628     emitJumpTable(JTCase.second, JTCase.second.MBB);
3629   }
3630   SL->JTCases.clear();
3631 
3632   for (auto &SwCase : SL->SwitchCases)
3633     emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3634   SL->SwitchCases.clear();
3635 
3636   // Check if we need to generate stack-protector guard checks.
3637   StackProtector &SP = getAnalysis<StackProtector>();
3638   if (SP.shouldEmitSDCheck(BB)) {
3639     bool FunctionBasedInstrumentation =
3640         TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3641     SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3642   }
3643   // Handle stack protector.
3644   if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3645     LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3646     return false;
3647   } else if (SPDescriptor.shouldEmitStackProtector()) {
3648     MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3649     MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3650 
3651     // Find the split point to split the parent mbb. At the same time copy all
3652     // physical registers used in the tail of parent mbb into virtual registers
3653     // before the split point and back into physical registers after the split
3654     // point. This prevents us needing to deal with Live-ins and many other
3655     // register allocation issues caused by us splitting the parent mbb. The
3656     // register allocator will clean up said virtual copies later on.
3657     MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3658         ParentMBB, *MF->getSubtarget().getInstrInfo());
3659 
3660     // Splice the terminator of ParentMBB into SuccessMBB.
3661     SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3662                        ParentMBB->end());
3663 
3664     // Add compare/jump on neq/jump to the parent BB.
3665     if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3666       return false;
3667 
3668     // CodeGen Failure MBB if we have not codegened it yet.
3669     MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3670     if (FailureMBB->empty()) {
3671       if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3672         return false;
3673     }
3674 
3675     // Clear the Per-BB State.
3676     SPDescriptor.resetPerBBState();
3677   }
3678   return true;
3679 }
3680 
emitSPDescriptorParent(StackProtectorDescriptor & SPD,MachineBasicBlock * ParentBB)3681 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3682                                           MachineBasicBlock *ParentBB) {
3683   CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3684   // First create the loads to the guard/stack slot for the comparison.
3685   Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
3686   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3687   LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
3688 
3689   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3690   int FI = MFI.getStackProtectorIndex();
3691 
3692   Register Guard;
3693   Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3694   const Module &M = *ParentBB->getParent()->getFunction().getParent();
3695   Align Align = DL->getPrefTypeAlign(PointerType::getUnqual(M.getContext()));
3696 
3697   // Generate code to load the content of the guard slot.
3698   Register GuardVal =
3699       CurBuilder
3700           ->buildLoad(PtrMemTy, StackSlotPtr,
3701                       MachinePointerInfo::getFixedStack(*MF, FI), Align,
3702                       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3703           .getReg(0);
3704 
3705   if (TLI->useStackGuardXorFP()) {
3706     LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3707     return false;
3708   }
3709 
3710   // Retrieve guard check function, nullptr if instrumentation is inlined.
3711   if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3712     // This path is currently untestable on GlobalISel, since the only platform
3713     // that needs this seems to be Windows, and we fall back on that currently.
3714     // The code still lives here in case that changes.
3715     // Silence warning about unused variable until the code below that uses
3716     // 'GuardCheckFn' is enabled.
3717     (void)GuardCheckFn;
3718     return false;
3719 #if 0
3720     // The target provides a guard check function to validate the guard value.
3721     // Generate a call to that function with the content of the guard slot as
3722     // argument.
3723     FunctionType *FnTy = GuardCheckFn->getFunctionType();
3724     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3725     ISD::ArgFlagsTy Flags;
3726     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3727       Flags.setInReg();
3728     CallLowering::ArgInfo GuardArgInfo(
3729         {GuardVal, FnTy->getParamType(0), {Flags}});
3730 
3731     CallLowering::CallLoweringInfo Info;
3732     Info.OrigArgs.push_back(GuardArgInfo);
3733     Info.CallConv = GuardCheckFn->getCallingConv();
3734     Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3735     Info.OrigRet = {Register(), FnTy->getReturnType()};
3736     if (!CLI->lowerCall(MIRBuilder, Info)) {
3737       LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3738       return false;
3739     }
3740     return true;
3741 #endif
3742   }
3743 
3744   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3745   // Otherwise, emit a volatile load to retrieve the stack guard value.
3746   if (TLI->useLoadStackGuardNode()) {
3747     Guard =
3748         MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3749     getStackGuard(Guard, *CurBuilder);
3750   } else {
3751     // TODO: test using android subtarget when we support @llvm.thread.pointer.
3752     const Value *IRGuard = TLI->getSDagStackGuard(M);
3753     Register GuardPtr = getOrCreateVReg(*IRGuard);
3754 
3755     Guard = CurBuilder
3756                 ->buildLoad(PtrMemTy, GuardPtr,
3757                             MachinePointerInfo::getFixedStack(*MF, FI), Align,
3758                             MachineMemOperand::MOLoad |
3759                                 MachineMemOperand::MOVolatile)
3760                 .getReg(0);
3761   }
3762 
3763   // Perform the comparison.
3764   auto Cmp =
3765       CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3766   // If the guard/stackslot do not equal, branch to failure MBB.
3767   CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3768   // Otherwise branch to success MBB.
3769   CurBuilder->buildBr(*SPD.getSuccessMBB());
3770   return true;
3771 }
3772 
emitSPDescriptorFailure(StackProtectorDescriptor & SPD,MachineBasicBlock * FailureBB)3773 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3774                                            MachineBasicBlock *FailureBB) {
3775   CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3776 
3777   const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3778   const char *Name = TLI->getLibcallName(Libcall);
3779 
3780   CallLowering::CallLoweringInfo Info;
3781   Info.CallConv = TLI->getLibcallCallingConv(Libcall);
3782   Info.Callee = MachineOperand::CreateES(Name);
3783   Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3784                   0};
3785   if (!CLI->lowerCall(*CurBuilder, Info)) {
3786     LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3787     return false;
3788   }
3789 
3790   // On PS4/PS5, the "return address" must still be within the calling
3791   // function, even if it's at the very end, so emit an explicit TRAP here.
3792   // WebAssembly needs an unreachable instruction after a non-returning call,
3793   // because the function return type can be different from __stack_chk_fail's
3794   // return type (void).
3795   const TargetMachine &TM = MF->getTarget();
3796   if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3797     LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3798     return false;
3799   }
3800   return true;
3801 }
3802 
finalizeFunction()3803 void IRTranslator::finalizeFunction() {
3804   // Release the memory used by the different maps we
3805   // needed during the translation.
3806   PendingPHIs.clear();
3807   VMap.reset();
3808   FrameIndices.clear();
3809   MachinePreds.clear();
3810   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3811   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3812   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3813   EntryBuilder.reset();
3814   CurBuilder.reset();
3815   FuncInfo.clear();
3816   SPDescriptor.resetPerFunctionState();
3817 }
3818 
3819 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3820 /// variadic musttail call.
checkForMustTailInVarArgFn(bool IsVarArg,const BasicBlock & BB)3821 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3822   if (!IsVarArg)
3823     return false;
3824 
3825   // Walk the block backwards, because tail calls usually only appear at the end
3826   // of a block.
3827   return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3828     const auto *CI = dyn_cast<CallInst>(&I);
3829     return CI && CI->isMustTailCall();
3830   });
3831 }
3832 
runOnMachineFunction(MachineFunction & CurMF)3833 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3834   MF = &CurMF;
3835   const Function &F = MF->getFunction();
3836   GISelCSEAnalysisWrapper &Wrapper =
3837       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3838   // Set the CSEConfig and run the analysis.
3839   GISelCSEInfo *CSEInfo = nullptr;
3840   TPC = &getAnalysis<TargetPassConfig>();
3841   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3842                        ? EnableCSEInIRTranslator
3843                        : TPC->isGISelCSEEnabled();
3844   TLI = MF->getSubtarget().getTargetLowering();
3845 
3846   if (EnableCSE) {
3847     EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3848     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3849     EntryBuilder->setCSEInfo(CSEInfo);
3850     CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3851     CurBuilder->setCSEInfo(CSEInfo);
3852   } else {
3853     EntryBuilder = std::make_unique<MachineIRBuilder>();
3854     CurBuilder = std::make_unique<MachineIRBuilder>();
3855   }
3856   CLI = MF->getSubtarget().getCallLowering();
3857   CurBuilder->setMF(*MF);
3858   EntryBuilder->setMF(*MF);
3859   MRI = &MF->getRegInfo();
3860   DL = &F.getDataLayout();
3861   ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3862   const TargetMachine &TM = MF->getTarget();
3863   TM.resetTargetOptions(F);
3864   EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
3865   FuncInfo.MF = MF;
3866   if (EnableOpts) {
3867     AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3868     FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3869   } else {
3870     AA = nullptr;
3871     FuncInfo.BPI = nullptr;
3872   }
3873 
3874   AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3875       MF->getFunction());
3876   LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3877   FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3878 
3879   SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3880   SL->init(*TLI, TM, *DL);
3881 
3882   assert(PendingPHIs.empty() && "stale PHIs");
3883 
3884   // Targets which want to use big endian can enable it using
3885   // enableBigEndian()
3886   if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3887     // Currently we don't properly handle big endian code.
3888     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3889                                F.getSubprogram(), &F.getEntryBlock());
3890     R << "unable to translate in big endian mode";
3891     reportTranslationError(*MF, *TPC, *ORE, R);
3892     return false;
3893   }
3894 
3895   // Release the per-function state when we return, whether we succeeded or not.
3896   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3897 
3898   // Setup a separate basic-block for the arguments and constants
3899   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3900   MF->push_back(EntryBB);
3901   EntryBuilder->setMBB(*EntryBB);
3902 
3903   DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3904   SwiftError.setFunction(CurMF);
3905   SwiftError.createEntriesInEntryBlock(DbgLoc);
3906 
3907   bool IsVarArg = F.isVarArg();
3908   bool HasMustTailInVarArgFn = false;
3909 
3910   // Create all blocks, in IR order, to preserve the layout.
3911   for (const BasicBlock &BB: F) {
3912     auto *&MBB = BBToMBB[&BB];
3913 
3914     MBB = MF->CreateMachineBasicBlock(&BB);
3915     MF->push_back(MBB);
3916 
3917     if (BB.hasAddressTaken())
3918       MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
3919 
3920     if (!HasMustTailInVarArgFn)
3921       HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3922   }
3923 
3924   MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3925 
3926   // Make our arguments/constants entry block fallthrough to the IR entry block.
3927   EntryBB->addSuccessor(&getMBB(F.front()));
3928 
3929   if (CLI->fallBackToDAGISel(*MF)) {
3930     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3931                                F.getSubprogram(), &F.getEntryBlock());
3932     R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3933     reportTranslationError(*MF, *TPC, *ORE, R);
3934     return false;
3935   }
3936 
3937   // Lower the actual args into this basic block.
3938   SmallVector<ArrayRef<Register>, 8> VRegArgs;
3939   for (const Argument &Arg: F.args()) {
3940     if (DL->getTypeStoreSize(Arg.getType()).isZero())
3941       continue; // Don't handle zero sized types.
3942     ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3943     VRegArgs.push_back(VRegs);
3944 
3945     if (Arg.hasSwiftErrorAttr()) {
3946       assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3947       SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3948     }
3949   }
3950 
3951   if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3952     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3953                                F.getSubprogram(), &F.getEntryBlock());
3954     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3955     reportTranslationError(*MF, *TPC, *ORE, R);
3956     return false;
3957   }
3958 
3959   // Need to visit defs before uses when translating instructions.
3960   GISelObserverWrapper WrapperObserver;
3961   if (EnableCSE && CSEInfo)
3962     WrapperObserver.addObserver(CSEInfo);
3963   {
3964     ReversePostOrderTraversal<const Function *> RPOT(&F);
3965 #ifndef NDEBUG
3966     DILocationVerifier Verifier;
3967     WrapperObserver.addObserver(&Verifier);
3968 #endif // ifndef NDEBUG
3969     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3970     RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3971     for (const BasicBlock *BB : RPOT) {
3972       MachineBasicBlock &MBB = getMBB(*BB);
3973       // Set the insertion point of all the following translations to
3974       // the end of this basic block.
3975       CurBuilder->setMBB(MBB);
3976       HasTailCall = false;
3977       for (const Instruction &Inst : *BB) {
3978         // If we translated a tail call in the last step, then we know
3979         // everything after the call is either a return, or something that is
3980         // handled by the call itself. (E.g. a lifetime marker or assume
3981         // intrinsic.) In this case, we should stop translating the block and
3982         // move on.
3983         if (HasTailCall)
3984           break;
3985 #ifndef NDEBUG
3986         Verifier.setCurrentInst(&Inst);
3987 #endif // ifndef NDEBUG
3988 
3989         // Translate any debug-info attached to the instruction.
3990         translateDbgInfo(Inst, *CurBuilder);
3991 
3992         if (translate(Inst))
3993           continue;
3994 
3995         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3996                                    Inst.getDebugLoc(), BB);
3997         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3998 
3999         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
4000           std::string InstStrStorage;
4001           raw_string_ostream InstStr(InstStrStorage);
4002           InstStr << Inst;
4003 
4004           R << ": '" << InstStrStorage << "'";
4005         }
4006 
4007         reportTranslationError(*MF, *TPC, *ORE, R);
4008         return false;
4009       }
4010 
4011       if (!finalizeBasicBlock(*BB, MBB)) {
4012         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4013                                    BB->getTerminator()->getDebugLoc(), BB);
4014         R << "unable to translate basic block";
4015         reportTranslationError(*MF, *TPC, *ORE, R);
4016         return false;
4017       }
4018     }
4019 #ifndef NDEBUG
4020     WrapperObserver.removeObserver(&Verifier);
4021 #endif
4022   }
4023 
4024   finishPendingPhis();
4025 
4026   SwiftError.propagateVRegs();
4027 
4028   // Merge the argument lowering and constants block with its single
4029   // successor, the LLVM-IR entry block.  We want the basic block to
4030   // be maximal.
4031   assert(EntryBB->succ_size() == 1 &&
4032          "Custom BB used for lowering should have only one successor");
4033   // Get the successor of the current entry block.
4034   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4035   assert(NewEntryBB.pred_size() == 1 &&
4036          "LLVM-IR entry block has a predecessor!?");
4037   // Move all the instruction from the current entry block to the
4038   // new entry block.
4039   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
4040                     EntryBB->end());
4041 
4042   // Update the live-in information for the new entry block.
4043   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4044     NewEntryBB.addLiveIn(LiveIn);
4045   NewEntryBB.sortUniqueLiveIns();
4046 
4047   // Get rid of the now empty basic block.
4048   EntryBB->removeSuccessor(&NewEntryBB);
4049   MF->remove(EntryBB);
4050   MF->deleteMachineBasicBlock(EntryBB);
4051 
4052   assert(&MF->front() == &NewEntryBB &&
4053          "New entry wasn't next in the list of basic block!");
4054 
4055   // Initialize stack protector information.
4056   StackProtector &SP = getAnalysis<StackProtector>();
4057   SP.copyToMachineFrameInfo(MF->getFrameInfo());
4058 
4059   return false;
4060 }
4061