1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/CodeGen/Analysis.h"
26 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
27 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
28 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
29 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
30 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
31 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/StackProtector.h"
42 #include "llvm/CodeGen/SwitchLoweringUtils.h"
43 #include "llvm/CodeGen/TargetFrameLowering.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetOpcodes.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/CodeGenTypes/LowLevelType.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/CFG.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/DiagnosticInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/InlineAsm.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAMDGPU.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Metadata.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CodeGen.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetMachine.h"
83 #include "llvm/Transforms/Utils/Local.h"
84 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstdint>
88 #include <iterator>
89 #include <optional>
90 #include <string>
91 #include <utility>
92 #include <vector>
93
94 #define DEBUG_TYPE "irtranslator"
95
96 using namespace llvm;
97
98 static cl::opt<bool>
99 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
100 cl::desc("Should enable CSE in irtranslator"),
101 cl::Optional, cl::init(false));
102 char IRTranslator::ID = 0;
103
104 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
105 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)106 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
107 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
108 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
109 INITIALIZE_PASS_DEPENDENCY(StackProtector)
110 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
111 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
112 false, false)
113
114 static void reportTranslationError(MachineFunction &MF,
115 const TargetPassConfig &TPC,
116 OptimizationRemarkEmitter &ORE,
117 OptimizationRemarkMissed &R) {
118 MF.getProperties().setFailedISel();
119
120 // Print the function name explicitly if we don't have a debug location (which
121 // makes the diagnostic less useful) or if we're going to emit a raw error.
122 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
123 R << (" (in function: " + MF.getName() + ")").str();
124
125 if (TPC.isGlobalISelAbortEnabled())
126 report_fatal_error(Twine(R.getMsg()));
127 else
128 ORE.emit(R);
129 }
130
IRTranslator(CodeGenOptLevel optlevel)131 IRTranslator::IRTranslator(CodeGenOptLevel optlevel)
132 : MachineFunctionPass(ID), OptLevel(optlevel) {}
133
134 #ifndef NDEBUG
135 namespace {
136 /// Verify that every instruction created has the same DILocation as the
137 /// instruction being translated.
138 class DILocationVerifier : public GISelChangeObserver {
139 const Instruction *CurrInst = nullptr;
140
141 public:
142 DILocationVerifier() = default;
143 ~DILocationVerifier() = default;
144
getCurrentInst() const145 const Instruction *getCurrentInst() const { return CurrInst; }
setCurrentInst(const Instruction * Inst)146 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
147
erasingInstr(MachineInstr & MI)148 void erasingInstr(MachineInstr &MI) override {}
changingInstr(MachineInstr & MI)149 void changingInstr(MachineInstr &MI) override {}
changedInstr(MachineInstr & MI)150 void changedInstr(MachineInstr &MI) override {}
151
createdInstr(MachineInstr & MI)152 void createdInstr(MachineInstr &MI) override {
153 assert(getCurrentInst() && "Inserted instruction without a current MI");
154
155 // Only print the check message if we're actually checking it.
156 #ifndef NDEBUG
157 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
158 << " was copied to " << MI);
159 #endif
160 // We allow insts in the entry block to have no debug loc because
161 // they could have originated from constants, and we don't want a jumpy
162 // debug experience.
163 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
164 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
165 (MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
167 }
168 };
169 } // namespace
170 #endif // ifndef NDEBUG
171
172
getAnalysisUsage(AnalysisUsage & AU) const173 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
174 AU.addRequired<StackProtector>();
175 AU.addRequired<TargetPassConfig>();
176 AU.addRequired<GISelCSEAnalysisWrapperPass>();
177 AU.addRequired<AssumptionCacheTracker>();
178 if (OptLevel != CodeGenOptLevel::None) {
179 AU.addRequired<BranchProbabilityInfoWrapperPass>();
180 AU.addRequired<AAResultsWrapperPass>();
181 }
182 AU.addRequired<TargetLibraryInfoWrapperPass>();
183 AU.addPreserved<TargetLibraryInfoWrapperPass>();
184 getSelectionDAGFallbackAnalysisUsage(AU);
185 MachineFunctionPass::getAnalysisUsage(AU);
186 }
187
188 IRTranslator::ValueToVRegInfo::VRegListT &
allocateVRegs(const Value & Val)189 IRTranslator::allocateVRegs(const Value &Val) {
190 auto VRegsIt = VMap.findVRegs(Val);
191 if (VRegsIt != VMap.vregs_end())
192 return *VRegsIt->second;
193 auto *Regs = VMap.getVRegs(Val);
194 auto *Offsets = VMap.getOffsets(Val);
195 SmallVector<LLT, 4> SplitTys;
196 computeValueLLTs(*DL, *Val.getType(), SplitTys,
197 Offsets->empty() ? Offsets : nullptr);
198 for (unsigned i = 0; i < SplitTys.size(); ++i)
199 Regs->push_back(0);
200 return *Regs;
201 }
202
getOrCreateVRegs(const Value & Val)203 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
204 auto VRegsIt = VMap.findVRegs(Val);
205 if (VRegsIt != VMap.vregs_end())
206 return *VRegsIt->second;
207
208 if (Val.getType()->isVoidTy())
209 return *VMap.getVRegs(Val);
210
211 // Create entry for this type.
212 auto *VRegs = VMap.getVRegs(Val);
213 auto *Offsets = VMap.getOffsets(Val);
214
215 if (!Val.getType()->isTokenTy())
216 assert(Val.getType()->isSized() &&
217 "Don't know how to create an empty vreg");
218
219 SmallVector<LLT, 4> SplitTys;
220 computeValueLLTs(*DL, *Val.getType(), SplitTys,
221 Offsets->empty() ? Offsets : nullptr);
222
223 if (!isa<Constant>(Val)) {
224 for (auto Ty : SplitTys)
225 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
226 return *VRegs;
227 }
228
229 if (Val.getType()->isAggregateType()) {
230 // UndefValue, ConstantAggregateZero
231 auto &C = cast<Constant>(Val);
232 unsigned Idx = 0;
233 while (auto Elt = C.getAggregateElement(Idx++)) {
234 auto EltRegs = getOrCreateVRegs(*Elt);
235 llvm::append_range(*VRegs, EltRegs);
236 }
237 } else {
238 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
239 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
240 bool Success = translate(cast<Constant>(Val), VRegs->front());
241 if (!Success) {
242 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
243 MF->getFunction().getSubprogram(),
244 &MF->getFunction().getEntryBlock());
245 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
246 reportTranslationError(*MF, *TPC, *ORE, R);
247 return *VRegs;
248 }
249 }
250
251 return *VRegs;
252 }
253
getOrCreateFrameIndex(const AllocaInst & AI)254 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
255 auto [MapEntry, Inserted] = FrameIndices.try_emplace(&AI);
256 if (!Inserted)
257 return MapEntry->second;
258
259 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
260 uint64_t Size =
261 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
262
263 // Always allocate at least one byte.
264 Size = std::max<uint64_t>(Size, 1u);
265
266 int &FI = MapEntry->second;
267 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
268 return FI;
269 }
270
getMemOpAlign(const Instruction & I)271 Align IRTranslator::getMemOpAlign(const Instruction &I) {
272 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
273 return SI->getAlign();
274 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
275 return LI->getAlign();
276 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
277 return AI->getAlign();
278 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
279 return AI->getAlign();
280
281 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
282 R << "unable to translate memop: " << ore::NV("Opcode", &I);
283 reportTranslationError(*MF, *TPC, *ORE, R);
284 return Align(1);
285 }
286
getMBB(const BasicBlock & BB)287 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
288 MachineBasicBlock *MBB = FuncInfo.getMBB(&BB);
289 assert(MBB && "BasicBlock was not encountered before");
290 return *MBB;
291 }
292
addMachineCFGPred(CFGEdge Edge,MachineBasicBlock * NewPred)293 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
294 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
295 MachinePreds[Edge].push_back(NewPred);
296 }
297
containsBF16Type(const User & U)298 static bool containsBF16Type(const User &U) {
299 // BF16 cannot currently be represented by LLT, to avoid miscompiles we
300 // prevent any instructions using them. FIXME: This can be removed once LLT
301 // supports bfloat.
302 return U.getType()->getScalarType()->isBFloatTy() ||
303 any_of(U.operands(), [](Value *V) {
304 return V->getType()->getScalarType()->isBFloatTy();
305 });
306 }
307
translateBinaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)308 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
309 MachineIRBuilder &MIRBuilder) {
310 if (containsBF16Type(U))
311 return false;
312
313 // Get or create a virtual register for each value.
314 // Unless the value is a Constant => loadimm cst?
315 // or inline constant each time?
316 // Creation of a virtual register needs to have a size.
317 Register Op0 = getOrCreateVReg(*U.getOperand(0));
318 Register Op1 = getOrCreateVReg(*U.getOperand(1));
319 Register Res = getOrCreateVReg(U);
320 uint32_t Flags = 0;
321 if (isa<Instruction>(U)) {
322 const Instruction &I = cast<Instruction>(U);
323 Flags = MachineInstr::copyFlagsFromInstruction(I);
324 }
325
326 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
327 return true;
328 }
329
translateUnaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)330 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
331 MachineIRBuilder &MIRBuilder) {
332 if (containsBF16Type(U))
333 return false;
334
335 Register Op0 = getOrCreateVReg(*U.getOperand(0));
336 Register Res = getOrCreateVReg(U);
337 uint32_t Flags = 0;
338 if (isa<Instruction>(U)) {
339 const Instruction &I = cast<Instruction>(U);
340 Flags = MachineInstr::copyFlagsFromInstruction(I);
341 }
342 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
343 return true;
344 }
345
translateFNeg(const User & U,MachineIRBuilder & MIRBuilder)346 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
347 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
348 }
349
translateCompare(const User & U,MachineIRBuilder & MIRBuilder)350 bool IRTranslator::translateCompare(const User &U,
351 MachineIRBuilder &MIRBuilder) {
352 if (containsBF16Type(U))
353 return false;
354
355 auto *CI = cast<CmpInst>(&U);
356 Register Op0 = getOrCreateVReg(*U.getOperand(0));
357 Register Op1 = getOrCreateVReg(*U.getOperand(1));
358 Register Res = getOrCreateVReg(U);
359 CmpInst::Predicate Pred = CI->getPredicate();
360 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(*CI);
361 if (CmpInst::isIntPredicate(Pred))
362 MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags);
363 else if (Pred == CmpInst::FCMP_FALSE)
364 MIRBuilder.buildCopy(
365 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
366 else if (Pred == CmpInst::FCMP_TRUE)
367 MIRBuilder.buildCopy(
368 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
369 else
370 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
371
372 return true;
373 }
374
translateRet(const User & U,MachineIRBuilder & MIRBuilder)375 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
376 const ReturnInst &RI = cast<ReturnInst>(U);
377 const Value *Ret = RI.getReturnValue();
378 if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
379 Ret = nullptr;
380
381 ArrayRef<Register> VRegs;
382 if (Ret)
383 VRegs = getOrCreateVRegs(*Ret);
384
385 Register SwiftErrorVReg = 0;
386 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
387 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
388 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
389 }
390
391 // The target may mess up with the insertion point, but
392 // this is not important as a return is the last instruction
393 // of the block anyway.
394 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
395 }
396
emitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,BranchProbability TProb,BranchProbability FProb,bool InvertCond)397 void IRTranslator::emitBranchForMergedCondition(
398 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
399 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
400 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
401 // If the leaf of the tree is a comparison, merge the condition into
402 // the caseblock.
403 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
404 CmpInst::Predicate Condition;
405 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
406 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
407 } else {
408 const FCmpInst *FC = cast<FCmpInst>(Cond);
409 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
410 }
411
412 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
413 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
414 CurBuilder->getDebugLoc(), TProb, FProb);
415 SL->SwitchCases.push_back(CB);
416 return;
417 }
418
419 // Create a CaseBlock record representing this branch.
420 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
421 SwitchCG::CaseBlock CB(
422 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
423 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
424 SL->SwitchCases.push_back(CB);
425 }
426
isValInBlock(const Value * V,const BasicBlock * BB)427 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
428 if (const Instruction *I = dyn_cast<Instruction>(V))
429 return I->getParent() == BB;
430 return true;
431 }
432
findMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,Instruction::BinaryOps Opc,BranchProbability TProb,BranchProbability FProb,bool InvertCond)433 void IRTranslator::findMergedConditions(
434 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
435 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
436 Instruction::BinaryOps Opc, BranchProbability TProb,
437 BranchProbability FProb, bool InvertCond) {
438 using namespace PatternMatch;
439 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
440 "Expected Opc to be AND/OR");
441 // Skip over not part of the tree and remember to invert op and operands at
442 // next level.
443 Value *NotCond;
444 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
445 isValInBlock(NotCond, CurBB->getBasicBlock())) {
446 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
447 !InvertCond);
448 return;
449 }
450
451 const Instruction *BOp = dyn_cast<Instruction>(Cond);
452 const Value *BOpOp0, *BOpOp1;
453 // Compute the effective opcode for Cond, taking into account whether it needs
454 // to be inverted, e.g.
455 // and (not (or A, B)), C
456 // gets lowered as
457 // and (and (not A, not B), C)
458 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
459 if (BOp) {
460 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
461 ? Instruction::And
462 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
463 ? Instruction::Or
464 : (Instruction::BinaryOps)0);
465 if (InvertCond) {
466 if (BOpc == Instruction::And)
467 BOpc = Instruction::Or;
468 else if (BOpc == Instruction::Or)
469 BOpc = Instruction::And;
470 }
471 }
472
473 // If this node is not part of the or/and tree, emit it as a branch.
474 // Note that all nodes in the tree should have same opcode.
475 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
476 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
477 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
478 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
479 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
480 InvertCond);
481 return;
482 }
483
484 // Create TmpBB after CurBB.
485 MachineFunction::iterator BBI(CurBB);
486 MachineBasicBlock *TmpBB =
487 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
488 CurBB->getParent()->insert(++BBI, TmpBB);
489
490 if (Opc == Instruction::Or) {
491 // Codegen X | Y as:
492 // BB1:
493 // jmp_if_X TBB
494 // jmp TmpBB
495 // TmpBB:
496 // jmp_if_Y TBB
497 // jmp FBB
498 //
499
500 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
501 // The requirement is that
502 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
503 // = TrueProb for original BB.
504 // Assuming the original probabilities are A and B, one choice is to set
505 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
506 // A/(1+B) and 2B/(1+B). This choice assumes that
507 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
508 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
509 // TmpBB, but the math is more complicated.
510
511 auto NewTrueProb = TProb / 2;
512 auto NewFalseProb = TProb / 2 + FProb;
513 // Emit the LHS condition.
514 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
515 NewFalseProb, InvertCond);
516
517 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
518 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
519 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
520 // Emit the RHS condition into TmpBB.
521 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
522 Probs[1], InvertCond);
523 } else {
524 assert(Opc == Instruction::And && "Unknown merge op!");
525 // Codegen X & Y as:
526 // BB1:
527 // jmp_if_X TmpBB
528 // jmp FBB
529 // TmpBB:
530 // jmp_if_Y TBB
531 // jmp FBB
532 //
533 // This requires creation of TmpBB after CurBB.
534
535 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
536 // The requirement is that
537 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
538 // = FalseProb for original BB.
539 // Assuming the original probabilities are A and B, one choice is to set
540 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
541 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
542 // TrueProb for BB1 * FalseProb for TmpBB.
543
544 auto NewTrueProb = TProb + FProb / 2;
545 auto NewFalseProb = FProb / 2;
546 // Emit the LHS condition.
547 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
548 NewFalseProb, InvertCond);
549
550 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
551 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
552 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
553 // Emit the RHS condition into TmpBB.
554 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
555 Probs[1], InvertCond);
556 }
557 }
558
shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> & Cases)559 bool IRTranslator::shouldEmitAsBranches(
560 const std::vector<SwitchCG::CaseBlock> &Cases) {
561 // For multiple cases, it's better to emit as branches.
562 if (Cases.size() != 2)
563 return true;
564
565 // If this is two comparisons of the same values or'd or and'd together, they
566 // will get folded into a single comparison, so don't emit two blocks.
567 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
568 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
569 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
570 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
571 return false;
572 }
573
574 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
575 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
576 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
577 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
578 isa<Constant>(Cases[0].CmpRHS) &&
579 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
580 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
581 Cases[0].TrueBB == Cases[1].ThisBB)
582 return false;
583 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
584 Cases[0].FalseBB == Cases[1].ThisBB)
585 return false;
586 }
587
588 return true;
589 }
590
translateBr(const User & U,MachineIRBuilder & MIRBuilder)591 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
592 const BranchInst &BrInst = cast<BranchInst>(U);
593 auto &CurMBB = MIRBuilder.getMBB();
594 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
595
596 if (BrInst.isUnconditional()) {
597 // If the unconditional target is the layout successor, fallthrough.
598 if (OptLevel == CodeGenOptLevel::None ||
599 !CurMBB.isLayoutSuccessor(Succ0MBB))
600 MIRBuilder.buildBr(*Succ0MBB);
601
602 // Link successors.
603 for (const BasicBlock *Succ : successors(&BrInst))
604 CurMBB.addSuccessor(&getMBB(*Succ));
605 return true;
606 }
607
608 // If this condition is one of the special cases we handle, do special stuff
609 // now.
610 const Value *CondVal = BrInst.getCondition();
611 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
612
613 // If this is a series of conditions that are or'd or and'd together, emit
614 // this as a sequence of branches instead of setcc's with and/or operations.
615 // As long as jumps are not expensive (exceptions for multi-use logic ops,
616 // unpredictable branches, and vector extracts because those jumps are likely
617 // expensive for any target), this should improve performance.
618 // For example, instead of something like:
619 // cmp A, B
620 // C = seteq
621 // cmp D, E
622 // F = setle
623 // or C, F
624 // jnz foo
625 // Emit:
626 // cmp A, B
627 // je foo
628 // cmp D, E
629 // jle foo
630 using namespace PatternMatch;
631 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
632 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
633 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
634 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
635 Value *Vec;
636 const Value *BOp0, *BOp1;
637 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
638 Opcode = Instruction::And;
639 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
640 Opcode = Instruction::Or;
641
642 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
643 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
644 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
645 getEdgeProbability(&CurMBB, Succ0MBB),
646 getEdgeProbability(&CurMBB, Succ1MBB),
647 /*InvertCond=*/false);
648 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
649
650 // Allow some cases to be rejected.
651 if (shouldEmitAsBranches(SL->SwitchCases)) {
652 // Emit the branch for this block.
653 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
654 SL->SwitchCases.erase(SL->SwitchCases.begin());
655 return true;
656 }
657
658 // Okay, we decided not to do this, remove any inserted MBB's and clear
659 // SwitchCases.
660 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
661 MF->erase(SL->SwitchCases[I].ThisBB);
662
663 SL->SwitchCases.clear();
664 }
665 }
666
667 // Create a CaseBlock record representing this branch.
668 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
669 ConstantInt::getTrue(MF->getFunction().getContext()),
670 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
671 CurBuilder->getDebugLoc());
672
673 // Use emitSwitchCase to actually insert the fast branch sequence for this
674 // cond branch.
675 emitSwitchCase(CB, &CurMBB, *CurBuilder);
676 return true;
677 }
678
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)679 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
680 MachineBasicBlock *Dst,
681 BranchProbability Prob) {
682 if (!FuncInfo.BPI) {
683 Src->addSuccessorWithoutProb(Dst);
684 return;
685 }
686 if (Prob.isUnknown())
687 Prob = getEdgeProbability(Src, Dst);
688 Src->addSuccessor(Dst, Prob);
689 }
690
691 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const692 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
693 const MachineBasicBlock *Dst) const {
694 const BasicBlock *SrcBB = Src->getBasicBlock();
695 const BasicBlock *DstBB = Dst->getBasicBlock();
696 if (!FuncInfo.BPI) {
697 // If BPI is not available, set the default probability as 1 / N, where N is
698 // the number of successors.
699 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
700 return BranchProbability(1, SuccSize);
701 }
702 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
703 }
704
translateSwitch(const User & U,MachineIRBuilder & MIB)705 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
706 using namespace SwitchCG;
707 // Extract cases from the switch.
708 const SwitchInst &SI = cast<SwitchInst>(U);
709 BranchProbabilityInfo *BPI = FuncInfo.BPI;
710 CaseClusterVector Clusters;
711 Clusters.reserve(SI.getNumCases());
712 for (const auto &I : SI.cases()) {
713 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
714 assert(Succ && "Could not find successor mbb in mapping");
715 const ConstantInt *CaseVal = I.getCaseValue();
716 BranchProbability Prob =
717 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
718 : BranchProbability(1, SI.getNumCases() + 1);
719 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
720 }
721
722 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
723
724 // Cluster adjacent cases with the same destination. We do this at all
725 // optimization levels because it's cheap to do and will make codegen faster
726 // if there are many clusters.
727 sortAndRangeify(Clusters);
728
729 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
730
731 // If there is only the default destination, jump there directly.
732 if (Clusters.empty()) {
733 SwitchMBB->addSuccessor(DefaultMBB);
734 if (DefaultMBB != SwitchMBB->getNextNode())
735 MIB.buildBr(*DefaultMBB);
736 return true;
737 }
738
739 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);
740 SL->findBitTestClusters(Clusters, &SI);
741
742 LLVM_DEBUG({
743 dbgs() << "Case clusters: ";
744 for (const CaseCluster &C : Clusters) {
745 if (C.Kind == CC_JumpTable)
746 dbgs() << "JT:";
747 if (C.Kind == CC_BitTests)
748 dbgs() << "BT:";
749
750 C.Low->getValue().print(dbgs(), true);
751 if (C.Low != C.High) {
752 dbgs() << '-';
753 C.High->getValue().print(dbgs(), true);
754 }
755 dbgs() << ' ';
756 }
757 dbgs() << '\n';
758 });
759
760 assert(!Clusters.empty());
761 SwitchWorkList WorkList;
762 CaseClusterIt First = Clusters.begin();
763 CaseClusterIt Last = Clusters.end() - 1;
764 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
765 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
766
767 while (!WorkList.empty()) {
768 SwitchWorkListItem W = WorkList.pop_back_val();
769
770 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
771 // For optimized builds, lower large range as a balanced binary tree.
772 if (NumClusters > 3 &&
773 MF->getTarget().getOptLevel() != CodeGenOptLevel::None &&
774 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
775 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
776 continue;
777 }
778
779 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
780 return false;
781 }
782 return true;
783 }
784
splitWorkItem(SwitchCG::SwitchWorkList & WorkList,const SwitchCG::SwitchWorkListItem & W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineIRBuilder & MIB)785 void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
786 const SwitchCG::SwitchWorkListItem &W,
787 Value *Cond, MachineBasicBlock *SwitchMBB,
788 MachineIRBuilder &MIB) {
789 using namespace SwitchCG;
790 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
791 "Clusters not sorted?");
792 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
793
794 auto [LastLeft, FirstRight, LeftProb, RightProb] =
795 SL->computeSplitWorkItemInfo(W);
796
797 // Use the first element on the right as pivot since we will make less-than
798 // comparisons against it.
799 CaseClusterIt PivotCluster = FirstRight;
800 assert(PivotCluster > W.FirstCluster);
801 assert(PivotCluster <= W.LastCluster);
802
803 CaseClusterIt FirstLeft = W.FirstCluster;
804 CaseClusterIt LastRight = W.LastCluster;
805
806 const ConstantInt *Pivot = PivotCluster->Low;
807
808 // New blocks will be inserted immediately after the current one.
809 MachineFunction::iterator BBI(W.MBB);
810 ++BBI;
811
812 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
813 // we can branch to its destination directly if it's squeezed exactly in
814 // between the known lower bound and Pivot - 1.
815 MachineBasicBlock *LeftMBB;
816 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
817 FirstLeft->Low == W.GE &&
818 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
819 LeftMBB = FirstLeft->MBB;
820 } else {
821 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
822 FuncInfo.MF->insert(BBI, LeftMBB);
823 WorkList.push_back(
824 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
825 }
826
827 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
828 // single cluster, RHS.Low == Pivot, and we can branch to its destination
829 // directly if RHS.High equals the current upper bound.
830 MachineBasicBlock *RightMBB;
831 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
832 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
833 RightMBB = FirstRight->MBB;
834 } else {
835 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
836 FuncInfo.MF->insert(BBI, RightMBB);
837 WorkList.push_back(
838 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
839 }
840
841 // Create the CaseBlock record that will be used to lower the branch.
842 CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
843 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
844 RightProb);
845
846 if (W.MBB == SwitchMBB)
847 emitSwitchCase(CB, SwitchMBB, MIB);
848 else
849 SL->SwitchCases.push_back(CB);
850 }
851
emitJumpTable(SwitchCG::JumpTable & JT,MachineBasicBlock * MBB)852 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
853 MachineBasicBlock *MBB) {
854 // Emit the code for the jump table
855 assert(JT.Reg && "Should lower JT Header first!");
856 MachineIRBuilder MIB(*MBB->getParent());
857 MIB.setMBB(*MBB);
858 MIB.setDebugLoc(CurBuilder->getDebugLoc());
859
860 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
861 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
862
863 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
864 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
865 }
866
emitJumpTableHeader(SwitchCG::JumpTable & JT,SwitchCG::JumpTableHeader & JTH,MachineBasicBlock * HeaderBB)867 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
868 SwitchCG::JumpTableHeader &JTH,
869 MachineBasicBlock *HeaderBB) {
870 MachineIRBuilder MIB(*HeaderBB->getParent());
871 MIB.setMBB(*HeaderBB);
872 MIB.setDebugLoc(CurBuilder->getDebugLoc());
873
874 const Value &SValue = *JTH.SValue;
875 // Subtract the lowest switch case value from the value being switched on.
876 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
877 Register SwitchOpReg = getOrCreateVReg(SValue);
878 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
879 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
880
881 // This value may be smaller or larger than the target's pointer type, and
882 // therefore require extension or truncating.
883 auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
884 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
885 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
886
887 JT.Reg = Sub.getReg(0);
888
889 if (JTH.FallthroughUnreachable) {
890 if (JT.MBB != HeaderBB->getNextNode())
891 MIB.buildBr(*JT.MBB);
892 return true;
893 }
894
895 // Emit the range check for the jump table, and branch to the default block
896 // for the switch statement if the value being switched on exceeds the
897 // largest case in the switch.
898 auto Cst = getOrCreateVReg(
899 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
900 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
901 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
902
903 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
904
905 // Avoid emitting unnecessary branches to the next block.
906 if (JT.MBB != HeaderBB->getNextNode())
907 BrCond = MIB.buildBr(*JT.MBB);
908 return true;
909 }
910
emitSwitchCase(SwitchCG::CaseBlock & CB,MachineBasicBlock * SwitchBB,MachineIRBuilder & MIB)911 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
912 MachineBasicBlock *SwitchBB,
913 MachineIRBuilder &MIB) {
914 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
915 Register Cond;
916 DebugLoc OldDbgLoc = MIB.getDebugLoc();
917 MIB.setDebugLoc(CB.DbgLoc);
918 MIB.setMBB(*CB.ThisBB);
919
920 if (CB.PredInfo.NoCmp) {
921 // Branch or fall through to TrueBB.
922 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
923 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
924 CB.ThisBB);
925 CB.ThisBB->normalizeSuccProbs();
926 if (CB.TrueBB != CB.ThisBB->getNextNode())
927 MIB.buildBr(*CB.TrueBB);
928 MIB.setDebugLoc(OldDbgLoc);
929 return;
930 }
931
932 const LLT i1Ty = LLT::scalar(1);
933 // Build the compare.
934 if (!CB.CmpMHS) {
935 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
936 // For conditional branch lowering, we might try to do something silly like
937 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
938 // just re-use the existing condition vreg.
939 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
940 CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
941 Cond = CondLHS;
942 } else {
943 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
944 if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
945 Cond =
946 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
947 else
948 Cond =
949 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
950 }
951 } else {
952 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
953 "Can only handle SLE ranges");
954
955 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
956 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
957
958 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
959 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
960 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
961 Cond =
962 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
963 } else {
964 const LLT CmpTy = MRI->getType(CmpOpReg);
965 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
966 auto Diff = MIB.buildConstant(CmpTy, High - Low);
967 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
968 }
969 }
970
971 // Update successor info
972 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
973
974 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
975 CB.ThisBB);
976
977 // TrueBB and FalseBB are always different unless the incoming IR is
978 // degenerate. This only happens when running llc on weird IR.
979 if (CB.TrueBB != CB.FalseBB)
980 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
981 CB.ThisBB->normalizeSuccProbs();
982
983 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
984 CB.ThisBB);
985
986 MIB.buildBrCond(Cond, *CB.TrueBB);
987 MIB.buildBr(*CB.FalseBB);
988 MIB.setDebugLoc(OldDbgLoc);
989 }
990
lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)991 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
992 MachineBasicBlock *SwitchMBB,
993 MachineBasicBlock *CurMBB,
994 MachineBasicBlock *DefaultMBB,
995 MachineIRBuilder &MIB,
996 MachineFunction::iterator BBI,
997 BranchProbability UnhandledProbs,
998 SwitchCG::CaseClusterIt I,
999 MachineBasicBlock *Fallthrough,
1000 bool FallthroughUnreachable) {
1001 using namespace SwitchCG;
1002 MachineFunction *CurMF = SwitchMBB->getParent();
1003 // FIXME: Optimize away range check based on pivot comparisons.
1004 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
1005 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
1006 BranchProbability DefaultProb = W.DefaultProb;
1007
1008 // The jump block hasn't been inserted yet; insert it here.
1009 MachineBasicBlock *JumpMBB = JT->MBB;
1010 CurMF->insert(BBI, JumpMBB);
1011
1012 // Since the jump table block is separate from the switch block, we need
1013 // to keep track of it as a machine predecessor to the default block,
1014 // otherwise we lose the phi edges.
1015 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1016 CurMBB);
1017 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1018 JumpMBB);
1019
1020 auto JumpProb = I->Prob;
1021 auto FallthroughProb = UnhandledProbs;
1022
1023 // If the default statement is a target of the jump table, we evenly
1024 // distribute the default probability to successors of CurMBB. Also
1025 // update the probability on the edge from JumpMBB to Fallthrough.
1026 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1027 SE = JumpMBB->succ_end();
1028 SI != SE; ++SI) {
1029 if (*SI == DefaultMBB) {
1030 JumpProb += DefaultProb / 2;
1031 FallthroughProb -= DefaultProb / 2;
1032 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
1033 JumpMBB->normalizeSuccProbs();
1034 } else {
1035 // Also record edges from the jump table block to it's successors.
1036 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1037 JumpMBB);
1038 }
1039 }
1040
1041 if (FallthroughUnreachable)
1042 JTH->FallthroughUnreachable = true;
1043
1044 if (!JTH->FallthroughUnreachable)
1045 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1046 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1047 CurMBB->normalizeSuccProbs();
1048
1049 // The jump table header will be inserted in our current block, do the
1050 // range check, and fall through to our fallthrough block.
1051 JTH->HeaderBB = CurMBB;
1052 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1053
1054 // If we're in the right place, emit the jump table header right now.
1055 if (CurMBB == SwitchMBB) {
1056 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1057 return false;
1058 JTH->Emitted = true;
1059 }
1060 return true;
1061 }
lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,Value * Cond,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable,BranchProbability UnhandledProbs,MachineBasicBlock * CurMBB,MachineIRBuilder & MIB,MachineBasicBlock * SwitchMBB)1062 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1063 Value *Cond,
1064 MachineBasicBlock *Fallthrough,
1065 bool FallthroughUnreachable,
1066 BranchProbability UnhandledProbs,
1067 MachineBasicBlock *CurMBB,
1068 MachineIRBuilder &MIB,
1069 MachineBasicBlock *SwitchMBB) {
1070 using namespace SwitchCG;
1071 const Value *RHS, *LHS, *MHS;
1072 CmpInst::Predicate Pred;
1073 if (I->Low == I->High) {
1074 // Check Cond == I->Low.
1075 Pred = CmpInst::ICMP_EQ;
1076 LHS = Cond;
1077 RHS = I->Low;
1078 MHS = nullptr;
1079 } else {
1080 // Check I->Low <= Cond <= I->High.
1081 Pred = CmpInst::ICMP_SLE;
1082 LHS = I->Low;
1083 MHS = Cond;
1084 RHS = I->High;
1085 }
1086
1087 // If Fallthrough is unreachable, fold away the comparison.
1088 // The false probability is the sum of all unhandled cases.
1089 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1090 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1091
1092 emitSwitchCase(CB, SwitchMBB, MIB);
1093 return true;
1094 }
1095
emitBitTestHeader(SwitchCG::BitTestBlock & B,MachineBasicBlock * SwitchBB)1096 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1097 MachineBasicBlock *SwitchBB) {
1098 MachineIRBuilder &MIB = *CurBuilder;
1099 MIB.setMBB(*SwitchBB);
1100
1101 // Subtract the minimum value.
1102 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1103
1104 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1105 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1106 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1107
1108 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
1109 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1110
1111 LLT MaskTy = SwitchOpTy;
1112 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1113 !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1114 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1115 else {
1116 // Ensure that the type will fit the mask value.
1117 for (const SwitchCG::BitTestCase &Case : B.Cases) {
1118 if (!isUIntN(SwitchOpTy.getSizeInBits(), Case.Mask)) {
1119 // Switch table case range are encoded into series of masks.
1120 // Just use pointer type, it's guaranteed to fit.
1121 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1122 break;
1123 }
1124 }
1125 }
1126 Register SubReg = RangeSub.getReg(0);
1127 if (SwitchOpTy != MaskTy)
1128 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1129
1130 B.RegVT = getMVTForLLT(MaskTy);
1131 B.Reg = SubReg;
1132
1133 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1134
1135 if (!B.FallthroughUnreachable)
1136 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1137 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1138
1139 SwitchBB->normalizeSuccProbs();
1140
1141 if (!B.FallthroughUnreachable) {
1142 // Conditional branch to the default block.
1143 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1144 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1145 RangeSub, RangeCst);
1146 MIB.buildBrCond(RangeCmp, *B.Default);
1147 }
1148
1149 // Avoid emitting unnecessary branches to the next block.
1150 if (MBB != SwitchBB->getNextNode())
1151 MIB.buildBr(*MBB);
1152 }
1153
emitBitTestCase(SwitchCG::BitTestBlock & BB,MachineBasicBlock * NextMBB,BranchProbability BranchProbToNext,Register Reg,SwitchCG::BitTestCase & B,MachineBasicBlock * SwitchBB)1154 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1155 MachineBasicBlock *NextMBB,
1156 BranchProbability BranchProbToNext,
1157 Register Reg, SwitchCG::BitTestCase &B,
1158 MachineBasicBlock *SwitchBB) {
1159 MachineIRBuilder &MIB = *CurBuilder;
1160 MIB.setMBB(*SwitchBB);
1161
1162 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1163 Register Cmp;
1164 unsigned PopCount = llvm::popcount(B.Mask);
1165 if (PopCount == 1) {
1166 // Testing for a single bit; just compare the shift count with what it
1167 // would need to be to shift a 1 bit in that position.
1168 auto MaskTrailingZeros =
1169 MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1170 Cmp =
1171 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1172 .getReg(0);
1173 } else if (PopCount == BB.Range) {
1174 // There is only one zero bit in the range, test for it directly.
1175 auto MaskTrailingOnes =
1176 MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1177 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1178 .getReg(0);
1179 } else {
1180 // Make desired shift.
1181 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1182 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1183
1184 // Emit bit tests and jumps.
1185 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1186 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1187 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1188 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1189 .getReg(0);
1190 }
1191
1192 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1193 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1194 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1195 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1196 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1197 // one as they are relative probabilities (and thus work more like weights),
1198 // and hence we need to normalize them to let the sum of them become one.
1199 SwitchBB->normalizeSuccProbs();
1200
1201 // Record the fact that the IR edge from the header to the bit test target
1202 // will go through our new block. Neeeded for PHIs to have nodes added.
1203 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1204 SwitchBB);
1205
1206 MIB.buildBrCond(Cmp, *B.TargetBB);
1207
1208 // Avoid emitting unnecessary branches to the next block.
1209 if (NextMBB != SwitchBB->getNextNode())
1210 MIB.buildBr(*NextMBB);
1211 }
1212
lowerBitTestWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability DefaultProb,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)1213 bool IRTranslator::lowerBitTestWorkItem(
1214 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1215 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1216 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1217 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1218 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1219 bool FallthroughUnreachable) {
1220 using namespace SwitchCG;
1221 MachineFunction *CurMF = SwitchMBB->getParent();
1222 // FIXME: Optimize away range check based on pivot comparisons.
1223 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1224 // The bit test blocks haven't been inserted yet; insert them here.
1225 for (BitTestCase &BTC : BTB->Cases)
1226 CurMF->insert(BBI, BTC.ThisBB);
1227
1228 // Fill in fields of the BitTestBlock.
1229 BTB->Parent = CurMBB;
1230 BTB->Default = Fallthrough;
1231
1232 BTB->DefaultProb = UnhandledProbs;
1233 // If the cases in bit test don't form a contiguous range, we evenly
1234 // distribute the probability on the edge to Fallthrough to two
1235 // successors of CurMBB.
1236 if (!BTB->ContiguousRange) {
1237 BTB->Prob += DefaultProb / 2;
1238 BTB->DefaultProb -= DefaultProb / 2;
1239 }
1240
1241 if (FallthroughUnreachable)
1242 BTB->FallthroughUnreachable = true;
1243
1244 // If we're in the right place, emit the bit test header right now.
1245 if (CurMBB == SwitchMBB) {
1246 emitBitTestHeader(*BTB, SwitchMBB);
1247 BTB->Emitted = true;
1248 }
1249 return true;
1250 }
1251
lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB)1252 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1253 Value *Cond,
1254 MachineBasicBlock *SwitchMBB,
1255 MachineBasicBlock *DefaultMBB,
1256 MachineIRBuilder &MIB) {
1257 using namespace SwitchCG;
1258 MachineFunction *CurMF = FuncInfo.MF;
1259 MachineBasicBlock *NextMBB = nullptr;
1260 MachineFunction::iterator BBI(W.MBB);
1261 if (++BBI != FuncInfo.MF->end())
1262 NextMBB = &*BBI;
1263
1264 if (EnableOpts) {
1265 // Here, we order cases by probability so the most likely case will be
1266 // checked first. However, two clusters can have the same probability in
1267 // which case their relative ordering is non-deterministic. So we use Low
1268 // as a tie-breaker as clusters are guaranteed to never overlap.
1269 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1270 [](const CaseCluster &a, const CaseCluster &b) {
1271 return a.Prob != b.Prob
1272 ? a.Prob > b.Prob
1273 : a.Low->getValue().slt(b.Low->getValue());
1274 });
1275
1276 // Rearrange the case blocks so that the last one falls through if possible
1277 // without changing the order of probabilities.
1278 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1279 --I;
1280 if (I->Prob > W.LastCluster->Prob)
1281 break;
1282 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1283 std::swap(*I, *W.LastCluster);
1284 break;
1285 }
1286 }
1287 }
1288
1289 // Compute total probability.
1290 BranchProbability DefaultProb = W.DefaultProb;
1291 BranchProbability UnhandledProbs = DefaultProb;
1292 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1293 UnhandledProbs += I->Prob;
1294
1295 MachineBasicBlock *CurMBB = W.MBB;
1296 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1297 bool FallthroughUnreachable = false;
1298 MachineBasicBlock *Fallthrough;
1299 if (I == W.LastCluster) {
1300 // For the last cluster, fall through to the default destination.
1301 Fallthrough = DefaultMBB;
1302 FallthroughUnreachable = isa<UnreachableInst>(
1303 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1304 } else {
1305 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1306 CurMF->insert(BBI, Fallthrough);
1307 }
1308 UnhandledProbs -= I->Prob;
1309
1310 switch (I->Kind) {
1311 case CC_BitTests: {
1312 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1313 DefaultProb, UnhandledProbs, I, Fallthrough,
1314 FallthroughUnreachable)) {
1315 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1316 return false;
1317 }
1318 break;
1319 }
1320
1321 case CC_JumpTable: {
1322 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1323 UnhandledProbs, I, Fallthrough,
1324 FallthroughUnreachable)) {
1325 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1326 return false;
1327 }
1328 break;
1329 }
1330 case CC_Range: {
1331 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1332 FallthroughUnreachable, UnhandledProbs,
1333 CurMBB, MIB, SwitchMBB)) {
1334 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1335 return false;
1336 }
1337 break;
1338 }
1339 }
1340 CurMBB = Fallthrough;
1341 }
1342
1343 return true;
1344 }
1345
translateIndirectBr(const User & U,MachineIRBuilder & MIRBuilder)1346 bool IRTranslator::translateIndirectBr(const User &U,
1347 MachineIRBuilder &MIRBuilder) {
1348 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1349
1350 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1351 MIRBuilder.buildBrIndirect(Tgt);
1352
1353 // Link successors.
1354 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1355 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1356 for (const BasicBlock *Succ : successors(&BrInst)) {
1357 // It's legal for indirectbr instructions to have duplicate blocks in the
1358 // destination list. We don't allow this in MIR. Skip anything that's
1359 // already a successor.
1360 if (!AddedSuccessors.insert(Succ).second)
1361 continue;
1362 CurBB.addSuccessor(&getMBB(*Succ));
1363 }
1364
1365 return true;
1366 }
1367
isSwiftError(const Value * V)1368 static bool isSwiftError(const Value *V) {
1369 if (auto Arg = dyn_cast<Argument>(V))
1370 return Arg->hasSwiftErrorAttr();
1371 if (auto AI = dyn_cast<AllocaInst>(V))
1372 return AI->isSwiftError();
1373 return false;
1374 }
1375
translateLoad(const User & U,MachineIRBuilder & MIRBuilder)1376 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1377 const LoadInst &LI = cast<LoadInst>(U);
1378 TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
1379 if (StoreSize.isZero())
1380 return true;
1381
1382 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1383 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1384 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1385 AAMDNodes AAInfo = LI.getAAMetadata();
1386
1387 const Value *Ptr = LI.getPointerOperand();
1388 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1389 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1390
1391 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1392 assert(Regs.size() == 1 && "swifterror should be single pointer");
1393 Register VReg =
1394 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1395 MIRBuilder.buildCopy(Regs[0], VReg);
1396 return true;
1397 }
1398
1399 MachineMemOperand::Flags Flags =
1400 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1401 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1402 if (AA->pointsToConstantMemory(
1403 MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1404 Flags |= MachineMemOperand::MOInvariant;
1405 }
1406 }
1407
1408 const MDNode *Ranges =
1409 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1410 for (unsigned i = 0; i < Regs.size(); ++i) {
1411 Register Addr;
1412 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1413
1414 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1415 Align BaseAlign = getMemOpAlign(LI);
1416 auto MMO = MF->getMachineMemOperand(
1417 Ptr, Flags, MRI->getType(Regs[i]),
1418 commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1419 LI.getSyncScopeID(), LI.getOrdering());
1420 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1421 }
1422
1423 return true;
1424 }
1425
translateStore(const User & U,MachineIRBuilder & MIRBuilder)1426 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1427 const StoreInst &SI = cast<StoreInst>(U);
1428 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())
1429 return true;
1430
1431 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1432 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1433 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1434
1435 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1436 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1437
1438 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1439 assert(Vals.size() == 1 && "swifterror should be single pointer");
1440
1441 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1442 SI.getPointerOperand());
1443 MIRBuilder.buildCopy(VReg, Vals[0]);
1444 return true;
1445 }
1446
1447 MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, *DL);
1448
1449 for (unsigned i = 0; i < Vals.size(); ++i) {
1450 Register Addr;
1451 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1452
1453 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1454 Align BaseAlign = getMemOpAlign(SI);
1455 auto MMO = MF->getMachineMemOperand(
1456 Ptr, Flags, MRI->getType(Vals[i]),
1457 commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1458 SI.getSyncScopeID(), SI.getOrdering());
1459 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1460 }
1461 return true;
1462 }
1463
getOffsetFromIndices(const User & U,const DataLayout & DL)1464 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1465 const Value *Src = U.getOperand(0);
1466 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1467
1468 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1469 // usual array element rather than looking into the actual aggregate.
1470 SmallVector<Value *, 1> Indices;
1471 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1472
1473 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1474 for (auto Idx : EVI->indices())
1475 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1476 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1477 for (auto Idx : IVI->indices())
1478 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1479 } else {
1480 llvm::append_range(Indices, drop_begin(U.operands()));
1481 }
1482
1483 return 8 * static_cast<uint64_t>(
1484 DL.getIndexedOffsetInType(Src->getType(), Indices));
1485 }
1486
translateExtractValue(const User & U,MachineIRBuilder & MIRBuilder)1487 bool IRTranslator::translateExtractValue(const User &U,
1488 MachineIRBuilder &MIRBuilder) {
1489 const Value *Src = U.getOperand(0);
1490 uint64_t Offset = getOffsetFromIndices(U, *DL);
1491 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1492 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1493 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1494 auto &DstRegs = allocateVRegs(U);
1495
1496 for (unsigned i = 0; i < DstRegs.size(); ++i)
1497 DstRegs[i] = SrcRegs[Idx++];
1498
1499 return true;
1500 }
1501
translateInsertValue(const User & U,MachineIRBuilder & MIRBuilder)1502 bool IRTranslator::translateInsertValue(const User &U,
1503 MachineIRBuilder &MIRBuilder) {
1504 const Value *Src = U.getOperand(0);
1505 uint64_t Offset = getOffsetFromIndices(U, *DL);
1506 auto &DstRegs = allocateVRegs(U);
1507 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1508 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1509 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1510 auto *InsertedIt = InsertedRegs.begin();
1511
1512 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1513 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1514 DstRegs[i] = *InsertedIt++;
1515 else
1516 DstRegs[i] = SrcRegs[i];
1517 }
1518
1519 return true;
1520 }
1521
translateSelect(const User & U,MachineIRBuilder & MIRBuilder)1522 bool IRTranslator::translateSelect(const User &U,
1523 MachineIRBuilder &MIRBuilder) {
1524 Register Tst = getOrCreateVReg(*U.getOperand(0));
1525 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1526 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1527 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1528
1529 uint32_t Flags = 0;
1530 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1531 Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1532
1533 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1534 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1535 }
1536
1537 return true;
1538 }
1539
translateCopy(const User & U,const Value & V,MachineIRBuilder & MIRBuilder)1540 bool IRTranslator::translateCopy(const User &U, const Value &V,
1541 MachineIRBuilder &MIRBuilder) {
1542 Register Src = getOrCreateVReg(V);
1543 auto &Regs = *VMap.getVRegs(U);
1544 if (Regs.empty()) {
1545 Regs.push_back(Src);
1546 VMap.getOffsets(U)->push_back(0);
1547 } else {
1548 // If we already assigned a vreg for this instruction, we can't change that.
1549 // Emit a copy to satisfy the users we already emitted.
1550 MIRBuilder.buildCopy(Regs[0], Src);
1551 }
1552 return true;
1553 }
1554
translateBitCast(const User & U,MachineIRBuilder & MIRBuilder)1555 bool IRTranslator::translateBitCast(const User &U,
1556 MachineIRBuilder &MIRBuilder) {
1557 // If we're bitcasting to the source type, we can reuse the source vreg.
1558 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1559 getLLTForType(*U.getType(), *DL)) {
1560 // If the source is a ConstantInt then it was probably created by
1561 // ConstantHoisting and we should leave it alone.
1562 if (isa<ConstantInt>(U.getOperand(0)))
1563 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1564 MIRBuilder);
1565 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1566 }
1567
1568 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1569 }
1570
translateCast(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)1571 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1572 MachineIRBuilder &MIRBuilder) {
1573 if (containsBF16Type(U))
1574 return false;
1575
1576 uint32_t Flags = 0;
1577 if (const Instruction *I = dyn_cast<Instruction>(&U))
1578 Flags = MachineInstr::copyFlagsFromInstruction(*I);
1579
1580 Register Op = getOrCreateVReg(*U.getOperand(0));
1581 Register Res = getOrCreateVReg(U);
1582 MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
1583 return true;
1584 }
1585
translateGetElementPtr(const User & U,MachineIRBuilder & MIRBuilder)1586 bool IRTranslator::translateGetElementPtr(const User &U,
1587 MachineIRBuilder &MIRBuilder) {
1588 Value &Op0 = *U.getOperand(0);
1589 Register BaseReg = getOrCreateVReg(Op0);
1590 Type *PtrIRTy = Op0.getType();
1591 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1592 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1593 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1594
1595 uint32_t Flags = 0;
1596 if (const Instruction *I = dyn_cast<Instruction>(&U))
1597 Flags = MachineInstr::copyFlagsFromInstruction(*I);
1598
1599 // Normalize Vector GEP - all scalar operands should be converted to the
1600 // splat vector.
1601 unsigned VectorWidth = 0;
1602
1603 // True if we should use a splat vector; using VectorWidth alone is not
1604 // sufficient.
1605 bool WantSplatVector = false;
1606 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1607 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1608 // We don't produce 1 x N vectors; those are treated as scalars.
1609 WantSplatVector = VectorWidth > 1;
1610 }
1611
1612 // We might need to splat the base pointer into a vector if the offsets
1613 // are vectors.
1614 if (WantSplatVector && !PtrTy.isVector()) {
1615 BaseReg = MIRBuilder
1616 .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
1617 BaseReg)
1618 .getReg(0);
1619 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1620 PtrTy = getLLTForType(*PtrIRTy, *DL);
1621 OffsetIRTy = DL->getIndexType(PtrIRTy);
1622 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1623 }
1624
1625 int64_t Offset = 0;
1626 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1627 GTI != E; ++GTI) {
1628 const Value *Idx = GTI.getOperand();
1629 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1630 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1631 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1632 continue;
1633 } else {
1634 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1635
1636 // If this is a scalar constant or a splat vector of constants,
1637 // handle it quickly.
1638 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1639 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1640 Offset += ElementSize * *Val;
1641 continue;
1642 }
1643 }
1644
1645 if (Offset != 0) {
1646 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1647 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1648 .getReg(0);
1649 Offset = 0;
1650 }
1651
1652 Register IdxReg = getOrCreateVReg(*Idx);
1653 LLT IdxTy = MRI->getType(IdxReg);
1654 if (IdxTy != OffsetTy) {
1655 if (!IdxTy.isVector() && WantSplatVector) {
1656 IdxReg = MIRBuilder
1657 .buildSplatBuildVector(OffsetTy.changeElementType(IdxTy),
1658 IdxReg)
1659 .getReg(0);
1660 }
1661
1662 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1663 }
1664
1665 // N = N + Idx * ElementSize;
1666 // Avoid doing it for ElementSize of 1.
1667 Register GepOffsetReg;
1668 if (ElementSize != 1) {
1669 auto ElementSizeMIB = MIRBuilder.buildConstant(
1670 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1671 GepOffsetReg =
1672 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1673 } else
1674 GepOffsetReg = IdxReg;
1675
1676 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1677 }
1678 }
1679
1680 if (Offset != 0) {
1681 auto OffsetMIB =
1682 MIRBuilder.buildConstant(OffsetTy, Offset);
1683
1684 if (Offset >= 0 && cast<GEPOperator>(U).isInBounds())
1685 Flags |= MachineInstr::MIFlag::NoUWrap;
1686
1687 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1688 Flags);
1689 return true;
1690 }
1691
1692 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1693 return true;
1694 }
1695
translateMemFunc(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned Opcode)1696 bool IRTranslator::translateMemFunc(const CallInst &CI,
1697 MachineIRBuilder &MIRBuilder,
1698 unsigned Opcode) {
1699 const Value *SrcPtr = CI.getArgOperand(1);
1700 // If the source is undef, then just emit a nop.
1701 if (isa<UndefValue>(SrcPtr))
1702 return true;
1703
1704 SmallVector<Register, 3> SrcRegs;
1705
1706 unsigned MinPtrSize = UINT_MAX;
1707 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1708 Register SrcReg = getOrCreateVReg(**AI);
1709 LLT SrcTy = MRI->getType(SrcReg);
1710 if (SrcTy.isPointer())
1711 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1712 SrcRegs.push_back(SrcReg);
1713 }
1714
1715 LLT SizeTy = LLT::scalar(MinPtrSize);
1716
1717 // The size operand should be the minimum of the pointer sizes.
1718 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1719 if (MRI->getType(SizeOpReg) != SizeTy)
1720 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1721
1722 auto ICall = MIRBuilder.buildInstr(Opcode);
1723 for (Register SrcReg : SrcRegs)
1724 ICall.addUse(SrcReg);
1725
1726 Align DstAlign;
1727 Align SrcAlign;
1728 unsigned IsVol =
1729 cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1730
1731 ConstantInt *CopySize = nullptr;
1732
1733 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1734 DstAlign = MCI->getDestAlign().valueOrOne();
1735 SrcAlign = MCI->getSourceAlign().valueOrOne();
1736 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1737 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1738 DstAlign = MMI->getDestAlign().valueOrOne();
1739 SrcAlign = MMI->getSourceAlign().valueOrOne();
1740 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1741 } else {
1742 auto *MSI = cast<MemSetInst>(&CI);
1743 DstAlign = MSI->getDestAlign().valueOrOne();
1744 }
1745
1746 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1747 // We need to propagate the tail call flag from the IR inst as an argument.
1748 // Otherwise, we have to pessimize and assume later that we cannot tail call
1749 // any memory intrinsics.
1750 ICall.addImm(CI.isTailCall() ? 1 : 0);
1751 }
1752
1753 // Create mem operands to store the alignment and volatile info.
1754 MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1755 MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1756 if (IsVol) {
1757 LoadFlags |= MachineMemOperand::MOVolatile;
1758 StoreFlags |= MachineMemOperand::MOVolatile;
1759 }
1760
1761 AAMDNodes AAInfo = CI.getAAMetadata();
1762 if (AA && CopySize &&
1763 AA->pointsToConstantMemory(MemoryLocation(
1764 SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1765 LoadFlags |= MachineMemOperand::MOInvariant;
1766
1767 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1768 // but the previous usage implied it did. Probably should check
1769 // isDereferenceableAndAlignedPointer.
1770 LoadFlags |= MachineMemOperand::MODereferenceable;
1771 }
1772
1773 ICall.addMemOperand(
1774 MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1775 StoreFlags, 1, DstAlign, AAInfo));
1776 if (Opcode != TargetOpcode::G_MEMSET)
1777 ICall.addMemOperand(MF->getMachineMemOperand(
1778 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1779
1780 return true;
1781 }
1782
translateTrap(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned Opcode)1783 bool IRTranslator::translateTrap(const CallInst &CI,
1784 MachineIRBuilder &MIRBuilder,
1785 unsigned Opcode) {
1786 StringRef TrapFuncName =
1787 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1788 if (TrapFuncName.empty()) {
1789 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1790 uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
1791 MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
1792 } else {
1793 MIRBuilder.buildInstr(Opcode);
1794 }
1795 return true;
1796 }
1797
1798 CallLowering::CallLoweringInfo Info;
1799 if (Opcode == TargetOpcode::G_UBSANTRAP)
1800 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
1801 CI.getArgOperand(0)->getType(), 0});
1802
1803 Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
1804 Info.CB = &CI;
1805 Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
1806 return CLI->lowerCall(MIRBuilder, Info);
1807 }
1808
translateVectorInterleave2Intrinsic(const CallInst & CI,MachineIRBuilder & MIRBuilder)1809 bool IRTranslator::translateVectorInterleave2Intrinsic(
1810 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1811 assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1812 "This function can only be called on the interleave2 intrinsic!");
1813 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1814 Register Op0 = getOrCreateVReg(*CI.getOperand(0));
1815 Register Op1 = getOrCreateVReg(*CI.getOperand(1));
1816 Register Res = getOrCreateVReg(CI);
1817
1818 LLT OpTy = MRI->getType(Op0);
1819 MIRBuilder.buildShuffleVector(Res, Op0, Op1,
1820 createInterleaveMask(OpTy.getNumElements(), 2));
1821
1822 return true;
1823 }
1824
translateVectorDeinterleave2Intrinsic(const CallInst & CI,MachineIRBuilder & MIRBuilder)1825 bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1826 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1827 assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1828 "This function can only be called on the deinterleave2 intrinsic!");
1829 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1830 // SelectionDAG).
1831 Register Op = getOrCreateVReg(*CI.getOperand(0));
1832 auto Undef = MIRBuilder.buildUndef(MRI->getType(Op));
1833 ArrayRef<Register> Res = getOrCreateVRegs(CI);
1834
1835 LLT ResTy = MRI->getType(Res[0]);
1836 MIRBuilder.buildShuffleVector(Res[0], Op, Undef,
1837 createStrideMask(0, 2, ResTy.getNumElements()));
1838 MIRBuilder.buildShuffleVector(Res[1], Op, Undef,
1839 createStrideMask(1, 2, ResTy.getNumElements()));
1840
1841 return true;
1842 }
1843
getStackGuard(Register DstReg,MachineIRBuilder & MIRBuilder)1844 void IRTranslator::getStackGuard(Register DstReg,
1845 MachineIRBuilder &MIRBuilder) {
1846 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1847 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1848 auto MIB =
1849 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1850
1851 Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1852 if (!Global)
1853 return;
1854
1855 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1856 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1857
1858 MachinePointerInfo MPInfo(Global);
1859 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1860 MachineMemOperand::MODereferenceable;
1861 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1862 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1863 MIB.setMemRefs({MemRef});
1864 }
1865
translateOverflowIntrinsic(const CallInst & CI,unsigned Op,MachineIRBuilder & MIRBuilder)1866 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1867 MachineIRBuilder &MIRBuilder) {
1868 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1869 MIRBuilder.buildInstr(
1870 Op, {ResRegs[0], ResRegs[1]},
1871 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1872
1873 return true;
1874 }
1875
translateFixedPointIntrinsic(unsigned Op,const CallInst & CI,MachineIRBuilder & MIRBuilder)1876 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1877 MachineIRBuilder &MIRBuilder) {
1878 Register Dst = getOrCreateVReg(CI);
1879 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1880 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1881 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1882 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1883 return true;
1884 }
1885
getSimpleIntrinsicOpcode(Intrinsic::ID ID)1886 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1887 switch (ID) {
1888 default:
1889 break;
1890 case Intrinsic::acos:
1891 return TargetOpcode::G_FACOS;
1892 case Intrinsic::asin:
1893 return TargetOpcode::G_FASIN;
1894 case Intrinsic::atan:
1895 return TargetOpcode::G_FATAN;
1896 case Intrinsic::atan2:
1897 return TargetOpcode::G_FATAN2;
1898 case Intrinsic::bswap:
1899 return TargetOpcode::G_BSWAP;
1900 case Intrinsic::bitreverse:
1901 return TargetOpcode::G_BITREVERSE;
1902 case Intrinsic::fshl:
1903 return TargetOpcode::G_FSHL;
1904 case Intrinsic::fshr:
1905 return TargetOpcode::G_FSHR;
1906 case Intrinsic::ceil:
1907 return TargetOpcode::G_FCEIL;
1908 case Intrinsic::cos:
1909 return TargetOpcode::G_FCOS;
1910 case Intrinsic::cosh:
1911 return TargetOpcode::G_FCOSH;
1912 case Intrinsic::ctpop:
1913 return TargetOpcode::G_CTPOP;
1914 case Intrinsic::exp:
1915 return TargetOpcode::G_FEXP;
1916 case Intrinsic::exp2:
1917 return TargetOpcode::G_FEXP2;
1918 case Intrinsic::exp10:
1919 return TargetOpcode::G_FEXP10;
1920 case Intrinsic::fabs:
1921 return TargetOpcode::G_FABS;
1922 case Intrinsic::copysign:
1923 return TargetOpcode::G_FCOPYSIGN;
1924 case Intrinsic::minnum:
1925 return TargetOpcode::G_FMINNUM;
1926 case Intrinsic::maxnum:
1927 return TargetOpcode::G_FMAXNUM;
1928 case Intrinsic::minimum:
1929 return TargetOpcode::G_FMINIMUM;
1930 case Intrinsic::maximum:
1931 return TargetOpcode::G_FMAXIMUM;
1932 case Intrinsic::minimumnum:
1933 return TargetOpcode::G_FMINIMUMNUM;
1934 case Intrinsic::maximumnum:
1935 return TargetOpcode::G_FMAXIMUMNUM;
1936 case Intrinsic::canonicalize:
1937 return TargetOpcode::G_FCANONICALIZE;
1938 case Intrinsic::floor:
1939 return TargetOpcode::G_FFLOOR;
1940 case Intrinsic::fma:
1941 return TargetOpcode::G_FMA;
1942 case Intrinsic::log:
1943 return TargetOpcode::G_FLOG;
1944 case Intrinsic::log2:
1945 return TargetOpcode::G_FLOG2;
1946 case Intrinsic::log10:
1947 return TargetOpcode::G_FLOG10;
1948 case Intrinsic::ldexp:
1949 return TargetOpcode::G_FLDEXP;
1950 case Intrinsic::nearbyint:
1951 return TargetOpcode::G_FNEARBYINT;
1952 case Intrinsic::pow:
1953 return TargetOpcode::G_FPOW;
1954 case Intrinsic::powi:
1955 return TargetOpcode::G_FPOWI;
1956 case Intrinsic::rint:
1957 return TargetOpcode::G_FRINT;
1958 case Intrinsic::round:
1959 return TargetOpcode::G_INTRINSIC_ROUND;
1960 case Intrinsic::roundeven:
1961 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1962 case Intrinsic::sin:
1963 return TargetOpcode::G_FSIN;
1964 case Intrinsic::sinh:
1965 return TargetOpcode::G_FSINH;
1966 case Intrinsic::sqrt:
1967 return TargetOpcode::G_FSQRT;
1968 case Intrinsic::tan:
1969 return TargetOpcode::G_FTAN;
1970 case Intrinsic::tanh:
1971 return TargetOpcode::G_FTANH;
1972 case Intrinsic::trunc:
1973 return TargetOpcode::G_INTRINSIC_TRUNC;
1974 case Intrinsic::readcyclecounter:
1975 return TargetOpcode::G_READCYCLECOUNTER;
1976 case Intrinsic::readsteadycounter:
1977 return TargetOpcode::G_READSTEADYCOUNTER;
1978 case Intrinsic::ptrmask:
1979 return TargetOpcode::G_PTRMASK;
1980 case Intrinsic::lrint:
1981 return TargetOpcode::G_INTRINSIC_LRINT;
1982 case Intrinsic::llrint:
1983 return TargetOpcode::G_INTRINSIC_LLRINT;
1984 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1985 case Intrinsic::vector_reduce_fmin:
1986 return TargetOpcode::G_VECREDUCE_FMIN;
1987 case Intrinsic::vector_reduce_fmax:
1988 return TargetOpcode::G_VECREDUCE_FMAX;
1989 case Intrinsic::vector_reduce_fminimum:
1990 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1991 case Intrinsic::vector_reduce_fmaximum:
1992 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1993 case Intrinsic::vector_reduce_add:
1994 return TargetOpcode::G_VECREDUCE_ADD;
1995 case Intrinsic::vector_reduce_mul:
1996 return TargetOpcode::G_VECREDUCE_MUL;
1997 case Intrinsic::vector_reduce_and:
1998 return TargetOpcode::G_VECREDUCE_AND;
1999 case Intrinsic::vector_reduce_or:
2000 return TargetOpcode::G_VECREDUCE_OR;
2001 case Intrinsic::vector_reduce_xor:
2002 return TargetOpcode::G_VECREDUCE_XOR;
2003 case Intrinsic::vector_reduce_smax:
2004 return TargetOpcode::G_VECREDUCE_SMAX;
2005 case Intrinsic::vector_reduce_smin:
2006 return TargetOpcode::G_VECREDUCE_SMIN;
2007 case Intrinsic::vector_reduce_umax:
2008 return TargetOpcode::G_VECREDUCE_UMAX;
2009 case Intrinsic::vector_reduce_umin:
2010 return TargetOpcode::G_VECREDUCE_UMIN;
2011 case Intrinsic::experimental_vector_compress:
2012 return TargetOpcode::G_VECTOR_COMPRESS;
2013 case Intrinsic::lround:
2014 return TargetOpcode::G_LROUND;
2015 case Intrinsic::llround:
2016 return TargetOpcode::G_LLROUND;
2017 case Intrinsic::get_fpenv:
2018 return TargetOpcode::G_GET_FPENV;
2019 case Intrinsic::get_fpmode:
2020 return TargetOpcode::G_GET_FPMODE;
2021 }
2022 return Intrinsic::not_intrinsic;
2023 }
2024
translateSimpleIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2025 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2026 Intrinsic::ID ID,
2027 MachineIRBuilder &MIRBuilder) {
2028
2029 unsigned Op = getSimpleIntrinsicOpcode(ID);
2030
2031 // Is this a simple intrinsic?
2032 if (Op == Intrinsic::not_intrinsic)
2033 return false;
2034
2035 // Yes. Let's translate it.
2036 SmallVector<llvm::SrcOp, 4> VRegs;
2037 for (const auto &Arg : CI.args())
2038 VRegs.push_back(getOrCreateVReg(*Arg));
2039
2040 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
2041 MachineInstr::copyFlagsFromInstruction(CI));
2042 return true;
2043 }
2044
2045 // TODO: Include ConstainedOps.def when all strict instructions are defined.
getConstrainedOpcode(Intrinsic::ID ID)2046 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
2047 switch (ID) {
2048 case Intrinsic::experimental_constrained_fadd:
2049 return TargetOpcode::G_STRICT_FADD;
2050 case Intrinsic::experimental_constrained_fsub:
2051 return TargetOpcode::G_STRICT_FSUB;
2052 case Intrinsic::experimental_constrained_fmul:
2053 return TargetOpcode::G_STRICT_FMUL;
2054 case Intrinsic::experimental_constrained_fdiv:
2055 return TargetOpcode::G_STRICT_FDIV;
2056 case Intrinsic::experimental_constrained_frem:
2057 return TargetOpcode::G_STRICT_FREM;
2058 case Intrinsic::experimental_constrained_fma:
2059 return TargetOpcode::G_STRICT_FMA;
2060 case Intrinsic::experimental_constrained_sqrt:
2061 return TargetOpcode::G_STRICT_FSQRT;
2062 case Intrinsic::experimental_constrained_ldexp:
2063 return TargetOpcode::G_STRICT_FLDEXP;
2064 default:
2065 return 0;
2066 }
2067 }
2068
translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic & FPI,MachineIRBuilder & MIRBuilder)2069 bool IRTranslator::translateConstrainedFPIntrinsic(
2070 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2071 fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
2072
2073 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
2074 if (!Opcode)
2075 return false;
2076
2077 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(FPI);
2078 if (EB == fp::ExceptionBehavior::ebIgnore)
2079 Flags |= MachineInstr::NoFPExcept;
2080
2081 SmallVector<llvm::SrcOp, 4> VRegs;
2082 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2083 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(I)));
2084
2085 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
2086 return true;
2087 }
2088
getArgPhysReg(Argument & Arg)2089 std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2090 auto VRegs = getOrCreateVRegs(Arg);
2091 if (VRegs.size() != 1)
2092 return std::nullopt;
2093
2094 // Arguments are lowered as a copy of a livein physical register.
2095 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2096 if (!VRegDef || !VRegDef->isCopy())
2097 return std::nullopt;
2098 return VRegDef->getOperand(1).getReg().asMCReg();
2099 }
2100
translateIfEntryValueArgument(bool isDeclare,Value * Val,const DILocalVariable * Var,const DIExpression * Expr,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)2101 bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2102 const DILocalVariable *Var,
2103 const DIExpression *Expr,
2104 const DebugLoc &DL,
2105 MachineIRBuilder &MIRBuilder) {
2106 auto *Arg = dyn_cast<Argument>(Val);
2107 if (!Arg)
2108 return false;
2109
2110 if (!Expr->isEntryValue())
2111 return false;
2112
2113 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2114 if (!PhysReg) {
2115 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2116 << ": expression is entry_value but "
2117 << "couldn't find a physical register\n");
2118 LLVM_DEBUG(dbgs() << *Var << "\n");
2119 return true;
2120 }
2121
2122 if (isDeclare) {
2123 // Append an op deref to account for the fact that this is a dbg_declare.
2124 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2125 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2126 } else {
2127 MIRBuilder.buildDirectDbgValue(*PhysReg, Var, Expr);
2128 }
2129
2130 return true;
2131 }
2132
getConvOpcode(Intrinsic::ID ID)2133 static unsigned getConvOpcode(Intrinsic::ID ID) {
2134 switch (ID) {
2135 default:
2136 llvm_unreachable("Unexpected intrinsic");
2137 case Intrinsic::experimental_convergence_anchor:
2138 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2139 case Intrinsic::experimental_convergence_entry:
2140 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2141 case Intrinsic::experimental_convergence_loop:
2142 return TargetOpcode::CONVERGENCECTRL_LOOP;
2143 }
2144 }
2145
translateConvergenceControlIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2146 bool IRTranslator::translateConvergenceControlIntrinsic(
2147 const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2148 MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
2149 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2150 MIB.addDef(OutputReg);
2151
2152 if (ID == Intrinsic::experimental_convergence_loop) {
2153 auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
2154 assert(Bundle && "Expected a convergence control token.");
2155 Register InputReg =
2156 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2157 MIB.addUse(InputReg);
2158 }
2159
2160 return true;
2161 }
2162
translateKnownIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)2163 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2164 MachineIRBuilder &MIRBuilder) {
2165 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2166 if (ORE->enabled()) {
2167 if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
2168 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2169 R.visit(MI);
2170 }
2171 }
2172 }
2173
2174 // If this is a simple intrinsic (that is, we just need to add a def of
2175 // a vreg, and uses for each arg operand, then translate it.
2176 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2177 return true;
2178
2179 switch (ID) {
2180 default:
2181 break;
2182 case Intrinsic::lifetime_start:
2183 case Intrinsic::lifetime_end: {
2184 // No stack colouring in O0, discard region information.
2185 if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None ||
2186 MF->getFunction().hasOptNone())
2187 return true;
2188
2189 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2190 : TargetOpcode::LIFETIME_END;
2191
2192 // Get the underlying objects for the location passed on the lifetime
2193 // marker.
2194 SmallVector<const Value *, 4> Allocas;
2195 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
2196
2197 // Iterate over each underlying object, creating lifetime markers for each
2198 // static alloca. Quit if we find a non-static alloca.
2199 for (const Value *V : Allocas) {
2200 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2201 if (!AI)
2202 continue;
2203
2204 if (!AI->isStaticAlloca())
2205 return true;
2206
2207 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2208 }
2209 return true;
2210 }
2211 case Intrinsic::fake_use: {
2212 SmallVector<llvm::SrcOp, 4> VRegs;
2213 for (const auto &Arg : CI.args())
2214 llvm::append_range(VRegs, getOrCreateVRegs(*Arg));
2215 MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2216 MF->setHasFakeUses(true);
2217 return true;
2218 }
2219 case Intrinsic::dbg_declare: {
2220 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2221 assert(DI.getVariable() && "Missing variable");
2222 translateDbgDeclareRecord(DI.getAddress(), DI.hasArgList(), DI.getVariable(),
2223 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2224 return true;
2225 }
2226 case Intrinsic::dbg_label: {
2227 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2228 assert(DI.getLabel() && "Missing label");
2229
2230 assert(DI.getLabel()->isValidLocationForIntrinsic(
2231 MIRBuilder.getDebugLoc()) &&
2232 "Expected inlined-at fields to agree");
2233
2234 MIRBuilder.buildDbgLabel(DI.getLabel());
2235 return true;
2236 }
2237 case Intrinsic::vaend:
2238 // No target I know of cares about va_end. Certainly no in-tree target
2239 // does. Simplest intrinsic ever!
2240 return true;
2241 case Intrinsic::vastart: {
2242 Value *Ptr = CI.getArgOperand(0);
2243 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2244 Align Alignment = getKnownAlignment(Ptr, *DL);
2245
2246 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2247 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2248 MachineMemOperand::MOStore,
2249 ListSize, Alignment));
2250 return true;
2251 }
2252 case Intrinsic::dbg_assign:
2253 // A dbg.assign is a dbg.value with more information about stack locations,
2254 // typically produced during optimisation of variables with leaked
2255 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2256 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2257 // need to register for and use the AssignmentTrackingAnalysis pass.
2258 [[fallthrough]];
2259 case Intrinsic::dbg_value: {
2260 // This form of DBG_VALUE is target-independent.
2261 const DbgValueInst &DI = cast<DbgValueInst>(CI);
2262 translateDbgValueRecord(DI.getValue(), DI.hasArgList(), DI.getVariable(),
2263 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2264 return true;
2265 }
2266 case Intrinsic::uadd_with_overflow:
2267 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2268 case Intrinsic::sadd_with_overflow:
2269 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2270 case Intrinsic::usub_with_overflow:
2271 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2272 case Intrinsic::ssub_with_overflow:
2273 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2274 case Intrinsic::umul_with_overflow:
2275 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2276 case Intrinsic::smul_with_overflow:
2277 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2278 case Intrinsic::uadd_sat:
2279 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2280 case Intrinsic::sadd_sat:
2281 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2282 case Intrinsic::usub_sat:
2283 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2284 case Intrinsic::ssub_sat:
2285 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2286 case Intrinsic::ushl_sat:
2287 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2288 case Intrinsic::sshl_sat:
2289 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2290 case Intrinsic::umin:
2291 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2292 case Intrinsic::umax:
2293 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2294 case Intrinsic::smin:
2295 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2296 case Intrinsic::smax:
2297 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2298 case Intrinsic::abs:
2299 // TODO: Preserve "int min is poison" arg in GMIR?
2300 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2301 case Intrinsic::smul_fix:
2302 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2303 case Intrinsic::umul_fix:
2304 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2305 case Intrinsic::smul_fix_sat:
2306 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2307 case Intrinsic::umul_fix_sat:
2308 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2309 case Intrinsic::sdiv_fix:
2310 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2311 case Intrinsic::udiv_fix:
2312 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2313 case Intrinsic::sdiv_fix_sat:
2314 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2315 case Intrinsic::udiv_fix_sat:
2316 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2317 case Intrinsic::fmuladd: {
2318 const TargetMachine &TM = MF->getTarget();
2319 Register Dst = getOrCreateVReg(CI);
2320 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2321 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2322 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2323 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2324 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2325 TLI->getValueType(*DL, CI.getType()))) {
2326 // TODO: Revisit this to see if we should move this part of the
2327 // lowering to the combiner.
2328 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2329 MachineInstr::copyFlagsFromInstruction(CI));
2330 } else {
2331 LLT Ty = getLLTForType(*CI.getType(), *DL);
2332 auto FMul = MIRBuilder.buildFMul(
2333 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2334 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2335 MachineInstr::copyFlagsFromInstruction(CI));
2336 }
2337 return true;
2338 }
2339 case Intrinsic::convert_from_fp16:
2340 // FIXME: This intrinsic should probably be removed from the IR.
2341 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2342 getOrCreateVReg(*CI.getArgOperand(0)),
2343 MachineInstr::copyFlagsFromInstruction(CI));
2344 return true;
2345 case Intrinsic::convert_to_fp16:
2346 // FIXME: This intrinsic should probably be removed from the IR.
2347 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2348 getOrCreateVReg(*CI.getArgOperand(0)),
2349 MachineInstr::copyFlagsFromInstruction(CI));
2350 return true;
2351 case Intrinsic::frexp: {
2352 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2353 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2354 getOrCreateVReg(*CI.getArgOperand(0)),
2355 MachineInstr::copyFlagsFromInstruction(CI));
2356 return true;
2357 }
2358 case Intrinsic::sincos: {
2359 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2360 MIRBuilder.buildFSincos(VRegs[0], VRegs[1],
2361 getOrCreateVReg(*CI.getArgOperand(0)),
2362 MachineInstr::copyFlagsFromInstruction(CI));
2363 return true;
2364 }
2365 case Intrinsic::fptosi_sat:
2366 MIRBuilder.buildFPTOSI_SAT(getOrCreateVReg(CI),
2367 getOrCreateVReg(*CI.getArgOperand(0)));
2368 return true;
2369 case Intrinsic::fptoui_sat:
2370 MIRBuilder.buildFPTOUI_SAT(getOrCreateVReg(CI),
2371 getOrCreateVReg(*CI.getArgOperand(0)));
2372 return true;
2373 case Intrinsic::memcpy_inline:
2374 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2375 case Intrinsic::memcpy:
2376 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2377 case Intrinsic::memmove:
2378 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2379 case Intrinsic::memset:
2380 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2381 case Intrinsic::eh_typeid_for: {
2382 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2383 Register Reg = getOrCreateVReg(CI);
2384 unsigned TypeID = MF->getTypeIDFor(GV);
2385 MIRBuilder.buildConstant(Reg, TypeID);
2386 return true;
2387 }
2388 case Intrinsic::objectsize:
2389 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2390
2391 case Intrinsic::is_constant:
2392 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2393
2394 case Intrinsic::stackguard:
2395 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2396 return true;
2397 case Intrinsic::stackprotector: {
2398 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2399 Register GuardVal;
2400 if (TLI->useLoadStackGuardNode(*CI.getModule())) {
2401 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2402 getStackGuard(GuardVal, MIRBuilder);
2403 } else
2404 GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2405
2406 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2407 int FI = getOrCreateFrameIndex(*Slot);
2408 MF->getFrameInfo().setStackProtectorIndex(FI);
2409
2410 MIRBuilder.buildStore(
2411 GuardVal, getOrCreateVReg(*Slot),
2412 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2413 MachineMemOperand::MOStore |
2414 MachineMemOperand::MOVolatile,
2415 PtrTy, Align(8)));
2416 return true;
2417 }
2418 case Intrinsic::stacksave: {
2419 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2420 return true;
2421 }
2422 case Intrinsic::stackrestore: {
2423 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2424 {getOrCreateVReg(*CI.getArgOperand(0))});
2425 return true;
2426 }
2427 case Intrinsic::cttz:
2428 case Intrinsic::ctlz: {
2429 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2430 bool isTrailing = ID == Intrinsic::cttz;
2431 unsigned Opcode = isTrailing
2432 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2433 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2434 : Cst->isZero() ? TargetOpcode::G_CTLZ
2435 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2436 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2437 {getOrCreateVReg(*CI.getArgOperand(0))});
2438 return true;
2439 }
2440 case Intrinsic::invariant_start: {
2441 MIRBuilder.buildUndef(getOrCreateVReg(CI));
2442 return true;
2443 }
2444 case Intrinsic::invariant_end:
2445 return true;
2446 case Intrinsic::expect:
2447 case Intrinsic::expect_with_probability:
2448 case Intrinsic::annotation:
2449 case Intrinsic::ptr_annotation:
2450 case Intrinsic::launder_invariant_group:
2451 case Intrinsic::strip_invariant_group: {
2452 // Drop the intrinsic, but forward the value.
2453 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2454 getOrCreateVReg(*CI.getArgOperand(0)));
2455 return true;
2456 }
2457 case Intrinsic::assume:
2458 case Intrinsic::experimental_noalias_scope_decl:
2459 case Intrinsic::var_annotation:
2460 case Intrinsic::sideeffect:
2461 // Discard annotate attributes, assumptions, and artificial side-effects.
2462 return true;
2463 case Intrinsic::read_volatile_register:
2464 case Intrinsic::read_register: {
2465 Value *Arg = CI.getArgOperand(0);
2466 MIRBuilder
2467 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2468 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2469 return true;
2470 }
2471 case Intrinsic::write_register: {
2472 Value *Arg = CI.getArgOperand(0);
2473 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2474 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2475 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2476 return true;
2477 }
2478 case Intrinsic::localescape: {
2479 MachineBasicBlock &EntryMBB = MF->front();
2480 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2481
2482 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2483 // is the same on all targets.
2484 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2485 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2486 if (isa<ConstantPointerNull>(Arg))
2487 continue; // Skip null pointers. They represent a hole in index space.
2488
2489 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2490 MCSymbol *FrameAllocSym =
2491 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2492
2493 // This should be inserted at the start of the entry block.
2494 auto LocalEscape =
2495 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2496 .addSym(FrameAllocSym)
2497 .addFrameIndex(FI);
2498
2499 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2500 }
2501
2502 return true;
2503 }
2504 case Intrinsic::vector_reduce_fadd:
2505 case Intrinsic::vector_reduce_fmul: {
2506 // Need to check for the reassoc flag to decide whether we want a
2507 // sequential reduction opcode or not.
2508 Register Dst = getOrCreateVReg(CI);
2509 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2510 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2511 unsigned Opc = 0;
2512 if (!CI.hasAllowReassoc()) {
2513 // The sequential ordering case.
2514 Opc = ID == Intrinsic::vector_reduce_fadd
2515 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2516 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2517 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2518 MachineInstr::copyFlagsFromInstruction(CI));
2519 return true;
2520 }
2521 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2522 // since the associativity doesn't matter.
2523 unsigned ScalarOpc;
2524 if (ID == Intrinsic::vector_reduce_fadd) {
2525 Opc = TargetOpcode::G_VECREDUCE_FADD;
2526 ScalarOpc = TargetOpcode::G_FADD;
2527 } else {
2528 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2529 ScalarOpc = TargetOpcode::G_FMUL;
2530 }
2531 LLT DstTy = MRI->getType(Dst);
2532 auto Rdx = MIRBuilder.buildInstr(
2533 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2534 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2535 MachineInstr::copyFlagsFromInstruction(CI));
2536
2537 return true;
2538 }
2539 case Intrinsic::trap:
2540 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2541 case Intrinsic::debugtrap:
2542 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2543 case Intrinsic::ubsantrap:
2544 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2545 case Intrinsic::allow_runtime_check:
2546 case Intrinsic::allow_ubsan_check:
2547 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2548 getOrCreateVReg(*ConstantInt::getTrue(CI.getType())));
2549 return true;
2550 case Intrinsic::amdgcn_cs_chain:
2551 return translateCallBase(CI, MIRBuilder);
2552 case Intrinsic::fptrunc_round: {
2553 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(CI);
2554
2555 // Convert the metadata argument to a constant integer
2556 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2557 std::optional<RoundingMode> RoundMode =
2558 convertStrToRoundingMode(cast<MDString>(MD)->getString());
2559
2560 // Add the Rounding mode as an integer
2561 MIRBuilder
2562 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2563 {getOrCreateVReg(CI)},
2564 {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2565 .addImm((int)*RoundMode);
2566
2567 return true;
2568 }
2569 case Intrinsic::is_fpclass: {
2570 Value *FpValue = CI.getOperand(0);
2571 ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2572
2573 MIRBuilder
2574 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2575 {getOrCreateVReg(*FpValue)})
2576 .addImm(TestMaskValue->getZExtValue());
2577
2578 return true;
2579 }
2580 case Intrinsic::set_fpenv: {
2581 Value *FPEnv = CI.getOperand(0);
2582 MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));
2583 return true;
2584 }
2585 case Intrinsic::reset_fpenv:
2586 MIRBuilder.buildResetFPEnv();
2587 return true;
2588 case Intrinsic::set_fpmode: {
2589 Value *FPState = CI.getOperand(0);
2590 MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));
2591 return true;
2592 }
2593 case Intrinsic::reset_fpmode:
2594 MIRBuilder.buildResetFPMode();
2595 return true;
2596 case Intrinsic::get_rounding:
2597 MIRBuilder.buildGetRounding(getOrCreateVReg(CI));
2598 return true;
2599 case Intrinsic::vscale: {
2600 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);
2601 return true;
2602 }
2603 case Intrinsic::scmp:
2604 MIRBuilder.buildSCmp(getOrCreateVReg(CI),
2605 getOrCreateVReg(*CI.getOperand(0)),
2606 getOrCreateVReg(*CI.getOperand(1)));
2607 return true;
2608 case Intrinsic::ucmp:
2609 MIRBuilder.buildUCmp(getOrCreateVReg(CI),
2610 getOrCreateVReg(*CI.getOperand(0)),
2611 getOrCreateVReg(*CI.getOperand(1)));
2612 return true;
2613 case Intrinsic::vector_extract:
2614 return translateExtractVector(CI, MIRBuilder);
2615 case Intrinsic::vector_insert:
2616 return translateInsertVector(CI, MIRBuilder);
2617 case Intrinsic::stepvector: {
2618 MIRBuilder.buildStepVector(getOrCreateVReg(CI), 1);
2619 return true;
2620 }
2621 case Intrinsic::prefetch: {
2622 Value *Addr = CI.getOperand(0);
2623 unsigned RW = cast<ConstantInt>(CI.getOperand(1))->getZExtValue();
2624 unsigned Locality = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
2625 unsigned CacheType = cast<ConstantInt>(CI.getOperand(3))->getZExtValue();
2626
2627 auto Flags = RW ? MachineMemOperand::MOStore : MachineMemOperand::MOLoad;
2628 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2629 LLT(), Align());
2630
2631 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2632 MMO);
2633
2634 return true;
2635 }
2636
2637 case Intrinsic::vector_interleave2:
2638 case Intrinsic::vector_deinterleave2: {
2639 // Both intrinsics have at least one operand.
2640 Value *Op0 = CI.getOperand(0);
2641 LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
2642 if (!ResTy.isFixedVector())
2643 return false;
2644
2645 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2646 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2647
2648 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2649 }
2650
2651 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2652 case Intrinsic::INTRINSIC:
2653 #include "llvm/IR/ConstrainedOps.def"
2654 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2655 MIRBuilder);
2656 case Intrinsic::experimental_convergence_anchor:
2657 case Intrinsic::experimental_convergence_entry:
2658 case Intrinsic::experimental_convergence_loop:
2659 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2660 }
2661 return false;
2662 }
2663
translateInlineAsm(const CallBase & CB,MachineIRBuilder & MIRBuilder)2664 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2665 MachineIRBuilder &MIRBuilder) {
2666 if (containsBF16Type(CB))
2667 return false;
2668
2669 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2670
2671 if (!ALI) {
2672 LLVM_DEBUG(
2673 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2674 return false;
2675 }
2676
2677 return ALI->lowerInlineAsm(
2678 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2679 }
2680
translateCallBase(const CallBase & CB,MachineIRBuilder & MIRBuilder)2681 bool IRTranslator::translateCallBase(const CallBase &CB,
2682 MachineIRBuilder &MIRBuilder) {
2683 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2684
2685 SmallVector<ArrayRef<Register>, 8> Args;
2686 Register SwiftInVReg = 0;
2687 Register SwiftErrorVReg = 0;
2688 for (const auto &Arg : CB.args()) {
2689 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2690 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2691 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2692 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2693 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2694 &CB, &MIRBuilder.getMBB(), Arg));
2695 Args.emplace_back(ArrayRef(SwiftInVReg));
2696 SwiftErrorVReg =
2697 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2698 continue;
2699 }
2700 Args.push_back(getOrCreateVRegs(*Arg));
2701 }
2702
2703 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2704 if (ORE->enabled()) {
2705 if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2706 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2707 R.visit(CI);
2708 }
2709 }
2710 }
2711
2712 std::optional<CallLowering::PtrAuthInfo> PAI;
2713 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_ptrauth)) {
2714 // Functions should never be ptrauth-called directly.
2715 assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2716
2717 const Value *Key = Bundle->Inputs[0];
2718 const Value *Discriminator = Bundle->Inputs[1];
2719
2720 // Look through ptrauth constants to try to eliminate the matching bundle
2721 // and turn this into a direct call with no ptrauth.
2722 // CallLowering will use the raw pointer if it doesn't find the PAI.
2723 const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CB.getCalledOperand());
2724 if (!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||
2725 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {
2726 // If we can't make it direct, package the bundle into PAI.
2727 Register DiscReg = getOrCreateVReg(*Discriminator);
2728 PAI = CallLowering::PtrAuthInfo{cast<ConstantInt>(Key)->getZExtValue(),
2729 DiscReg};
2730 }
2731 }
2732
2733 Register ConvergenceCtrlToken = 0;
2734 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2735 const auto &Token = *Bundle->Inputs[0].get();
2736 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2737 }
2738
2739 // We don't set HasCalls on MFI here yet because call lowering may decide to
2740 // optimize into tail calls. Instead, we defer that to selection where a final
2741 // scan is done to check if any instructions are calls.
2742 bool Success = CLI->lowerCall(
2743 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2744 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2745
2746 // Check if we just inserted a tail call.
2747 if (Success) {
2748 assert(!HasTailCall && "Can't tail call return twice from block?");
2749 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2750 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2751 }
2752
2753 return Success;
2754 }
2755
translateCall(const User & U,MachineIRBuilder & MIRBuilder)2756 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2757 if (containsBF16Type(U))
2758 return false;
2759
2760 const CallInst &CI = cast<CallInst>(U);
2761 const Function *F = CI.getCalledFunction();
2762
2763 // FIXME: support Windows dllimport function calls and calls through
2764 // weak symbols.
2765 if (F && (F->hasDLLImportStorageClass() ||
2766 (MF->getTarget().getTargetTriple().isOSWindows() &&
2767 F->hasExternalWeakLinkage())))
2768 return false;
2769
2770 // FIXME: support control flow guard targets.
2771 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2772 return false;
2773
2774 // FIXME: support statepoints and related.
2775 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2776 return false;
2777
2778 if (CI.isInlineAsm())
2779 return translateInlineAsm(CI, MIRBuilder);
2780
2781 diagnoseDontCall(CI);
2782
2783 Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic;
2784 if (!F || ID == Intrinsic::not_intrinsic)
2785 return translateCallBase(CI, MIRBuilder);
2786
2787 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2788
2789 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2790 return true;
2791
2792 ArrayRef<Register> ResultRegs;
2793 if (!CI.getType()->isVoidTy())
2794 ResultRegs = getOrCreateVRegs(CI);
2795
2796 // Ignore the callsite attributes. Backend code is most likely not expecting
2797 // an intrinsic to sometimes have side effects and sometimes not.
2798 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2799 if (isa<FPMathOperator>(CI))
2800 MIB->copyIRFlags(CI);
2801
2802 for (const auto &Arg : enumerate(CI.args())) {
2803 // If this is required to be an immediate, don't materialize it in a
2804 // register.
2805 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2806 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2807 // imm arguments are more convenient than cimm (and realistically
2808 // probably sufficient), so use them.
2809 assert(CI->getBitWidth() <= 64 &&
2810 "large intrinsic immediates not handled");
2811 MIB.addImm(CI->getSExtValue());
2812 } else {
2813 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2814 }
2815 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2816 auto *MD = MDVal->getMetadata();
2817 auto *MDN = dyn_cast<MDNode>(MD);
2818 if (!MDN) {
2819 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2820 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2821 else // This was probably an MDString.
2822 return false;
2823 }
2824 MIB.addMetadata(MDN);
2825 } else {
2826 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2827 if (VRegs.size() > 1)
2828 return false;
2829 MIB.addUse(VRegs[0]);
2830 }
2831 }
2832
2833 // Add a MachineMemOperand if it is a target mem intrinsic.
2834 TargetLowering::IntrinsicInfo Info;
2835 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2836 if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2837 Align Alignment = Info.align.value_or(
2838 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2839 LLT MemTy = Info.memVT.isSimple()
2840 ? getLLTForMVT(Info.memVT.getSimpleVT())
2841 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2842
2843 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2844 // didn't yield anything useful.
2845 MachinePointerInfo MPI;
2846 if (Info.ptrVal)
2847 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2848 else if (Info.fallbackAddressSpace)
2849 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2850 MIB.addMemOperand(MF->getMachineMemOperand(
2851 MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata(),
2852 /*Ranges=*/nullptr, Info.ssid, Info.order, Info.failureOrder));
2853 }
2854
2855 if (CI.isConvergent()) {
2856 if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2857 auto *Token = Bundle->Inputs[0].get();
2858 Register TokenReg = getOrCreateVReg(*Token);
2859 MIB.addUse(TokenReg, RegState::Implicit);
2860 }
2861 }
2862
2863 return true;
2864 }
2865
findUnwindDestinations(const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)2866 bool IRTranslator::findUnwindDestinations(
2867 const BasicBlock *EHPadBB,
2868 BranchProbability Prob,
2869 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2870 &UnwindDests) {
2871 EHPersonality Personality = classifyEHPersonality(
2872 EHPadBB->getParent()->getFunction().getPersonalityFn());
2873 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2874 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2875 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2876 bool IsSEH = isAsynchronousEHPersonality(Personality);
2877
2878 if (IsWasmCXX) {
2879 // Ignore this for now.
2880 return false;
2881 }
2882
2883 while (EHPadBB) {
2884 BasicBlock::const_iterator Pad = EHPadBB->getFirstNonPHIIt();
2885 BasicBlock *NewEHPadBB = nullptr;
2886 if (isa<LandingPadInst>(Pad)) {
2887 // Stop on landingpads. They are not funclets.
2888 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2889 break;
2890 }
2891 if (isa<CleanupPadInst>(Pad)) {
2892 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2893 // personalities.
2894 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2895 UnwindDests.back().first->setIsEHScopeEntry();
2896 UnwindDests.back().first->setIsEHFuncletEntry();
2897 break;
2898 }
2899 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2900 // Add the catchpad handlers to the possible destinations.
2901 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2902 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2903 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2904 if (IsMSVCCXX || IsCoreCLR)
2905 UnwindDests.back().first->setIsEHFuncletEntry();
2906 if (!IsSEH)
2907 UnwindDests.back().first->setIsEHScopeEntry();
2908 }
2909 NewEHPadBB = CatchSwitch->getUnwindDest();
2910 } else {
2911 continue;
2912 }
2913
2914 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2915 if (BPI && NewEHPadBB)
2916 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2917 EHPadBB = NewEHPadBB;
2918 }
2919 return true;
2920 }
2921
translateInvoke(const User & U,MachineIRBuilder & MIRBuilder)2922 bool IRTranslator::translateInvoke(const User &U,
2923 MachineIRBuilder &MIRBuilder) {
2924 const InvokeInst &I = cast<InvokeInst>(U);
2925 MCContext &Context = MF->getContext();
2926
2927 const BasicBlock *ReturnBB = I.getSuccessor(0);
2928 const BasicBlock *EHPadBB = I.getSuccessor(1);
2929
2930 const Function *Fn = I.getCalledFunction();
2931
2932 // FIXME: support invoking patchpoint and statepoint intrinsics.
2933 if (Fn && Fn->isIntrinsic())
2934 return false;
2935
2936 // FIXME: support whatever these are.
2937 if (I.hasDeoptState())
2938 return false;
2939
2940 // FIXME: support control flow guard targets.
2941 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2942 return false;
2943
2944 // FIXME: support Windows exception handling.
2945 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHIIt()))
2946 return false;
2947
2948 // FIXME: support Windows dllimport function calls and calls through
2949 // weak symbols.
2950 if (Fn && (Fn->hasDLLImportStorageClass() ||
2951 (MF->getTarget().getTargetTriple().isOSWindows() &&
2952 Fn->hasExternalWeakLinkage())))
2953 return false;
2954
2955 bool LowerInlineAsm = I.isInlineAsm();
2956 bool NeedEHLabel = true;
2957
2958 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2959 // the region covered by the try.
2960 MCSymbol *BeginSymbol = nullptr;
2961 if (NeedEHLabel) {
2962 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2963 BeginSymbol = Context.createTempSymbol();
2964 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2965 }
2966
2967 if (LowerInlineAsm) {
2968 if (!translateInlineAsm(I, MIRBuilder))
2969 return false;
2970 } else if (!translateCallBase(I, MIRBuilder))
2971 return false;
2972
2973 MCSymbol *EndSymbol = nullptr;
2974 if (NeedEHLabel) {
2975 EndSymbol = Context.createTempSymbol();
2976 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2977 }
2978
2979 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2980 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2981 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2982 BranchProbability EHPadBBProb =
2983 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2984 : BranchProbability::getZero();
2985
2986 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2987 return false;
2988
2989 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2990 &ReturnMBB = getMBB(*ReturnBB);
2991 // Update successor info.
2992 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2993 for (auto &UnwindDest : UnwindDests) {
2994 UnwindDest.first->setIsEHPad();
2995 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2996 }
2997 InvokeMBB->normalizeSuccProbs();
2998
2999 if (NeedEHLabel) {
3000 assert(BeginSymbol && "Expected a begin symbol!");
3001 assert(EndSymbol && "Expected an end symbol!");
3002 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3003 }
3004
3005 MIRBuilder.buildBr(ReturnMBB);
3006 return true;
3007 }
3008
translateCallBr(const User & U,MachineIRBuilder & MIRBuilder)3009 bool IRTranslator::translateCallBr(const User &U,
3010 MachineIRBuilder &MIRBuilder) {
3011 // FIXME: Implement this.
3012 return false;
3013 }
3014
translateLandingPad(const User & U,MachineIRBuilder & MIRBuilder)3015 bool IRTranslator::translateLandingPad(const User &U,
3016 MachineIRBuilder &MIRBuilder) {
3017 const LandingPadInst &LP = cast<LandingPadInst>(U);
3018
3019 MachineBasicBlock &MBB = MIRBuilder.getMBB();
3020
3021 MBB.setIsEHPad();
3022
3023 // If there aren't registers to copy the values into (e.g., during SjLj
3024 // exceptions), then don't bother.
3025 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3026 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3027 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3028 return true;
3029
3030 // If landingpad's return type is token type, we don't create DAG nodes
3031 // for its exception pointer and selector value. The extraction of exception
3032 // pointer or selector value from token type landingpads is not currently
3033 // supported.
3034 if (LP.getType()->isTokenTy())
3035 return true;
3036
3037 // Add a label to mark the beginning of the landing pad. Deletion of the
3038 // landing pad can thus be detected via the MachineModuleInfo.
3039 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
3040 .addSym(MF->addLandingPad(&MBB));
3041
3042 // If the unwinder does not preserve all registers, ensure that the
3043 // function marks the clobbered registers as used.
3044 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
3045 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
3046 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3047
3048 LLT Ty = getLLTForType(*LP.getType(), *DL);
3049 Register Undef = MRI->createGenericVirtualRegister(Ty);
3050 MIRBuilder.buildUndef(Undef);
3051
3052 SmallVector<LLT, 2> Tys;
3053 for (Type *Ty : cast<StructType>(LP.getType())->elements())
3054 Tys.push_back(getLLTForType(*Ty, *DL));
3055 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
3056
3057 // Mark exception register as live in.
3058 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3059 if (!ExceptionReg)
3060 return false;
3061
3062 MBB.addLiveIn(ExceptionReg);
3063 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
3064 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
3065
3066 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3067 if (!SelectorReg)
3068 return false;
3069
3070 MBB.addLiveIn(SelectorReg);
3071 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3072 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
3073 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
3074
3075 return true;
3076 }
3077
translateAlloca(const User & U,MachineIRBuilder & MIRBuilder)3078 bool IRTranslator::translateAlloca(const User &U,
3079 MachineIRBuilder &MIRBuilder) {
3080 auto &AI = cast<AllocaInst>(U);
3081
3082 if (AI.isSwiftError())
3083 return true;
3084
3085 if (AI.isStaticAlloca()) {
3086 Register Res = getOrCreateVReg(AI);
3087 int FI = getOrCreateFrameIndex(AI);
3088 MIRBuilder.buildFrameIndex(Res, FI);
3089 return true;
3090 }
3091
3092 // FIXME: support stack probing for Windows.
3093 if (MF->getTarget().getTargetTriple().isOSWindows())
3094 return false;
3095
3096 // Now we're in the harder dynamic case.
3097 Register NumElts = getOrCreateVReg(*AI.getArraySize());
3098 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3099 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
3100 if (MRI->getType(NumElts) != IntPtrTy) {
3101 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3102 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
3103 NumElts = ExtElts;
3104 }
3105
3106 Type *Ty = AI.getAllocatedType();
3107
3108 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3109 Register TySize =
3110 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3111 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
3112
3113 // Round the size of the allocation up to the stack alignment size
3114 // by add SA-1 to the size. This doesn't overflow because we're computing
3115 // an address inside an alloca.
3116 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3117 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
3118 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3119 MachineInstr::NoUWrap);
3120 auto AlignCst =
3121 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
3122 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
3123
3124 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
3125 if (Alignment <= StackAlign)
3126 Alignment = Align(1);
3127 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
3128
3129 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3130 assert(MF->getFrameInfo().hasVarSizedObjects());
3131 return true;
3132 }
3133
translateVAArg(const User & U,MachineIRBuilder & MIRBuilder)3134 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3135 // FIXME: We may need more info about the type. Because of how LLT works,
3136 // we're completely discarding the i64/double distinction here (amongst
3137 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3138 // anyway but that's not guaranteed.
3139 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3140 {getOrCreateVReg(*U.getOperand(0)),
3141 DL->getABITypeAlign(U.getType()).value()});
3142 return true;
3143 }
3144
translateUnreachable(const User & U,MachineIRBuilder & MIRBuilder)3145 bool IRTranslator::translateUnreachable(const User &U,
3146 MachineIRBuilder &MIRBuilder) {
3147 auto &UI = cast<UnreachableInst>(U);
3148 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3149 MF->getTarget().Options.NoTrapAfterNoreturn))
3150 return true;
3151
3152 MIRBuilder.buildTrap();
3153 return true;
3154 }
3155
translateInsertElement(const User & U,MachineIRBuilder & MIRBuilder)3156 bool IRTranslator::translateInsertElement(const User &U,
3157 MachineIRBuilder &MIRBuilder) {
3158 // If it is a <1 x Ty> vector, use the scalar as it is
3159 // not a legal vector type in LLT.
3160 if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
3161 FVT && FVT->getNumElements() == 1)
3162 return translateCopy(U, *U.getOperand(1), MIRBuilder);
3163
3164 Register Res = getOrCreateVReg(U);
3165 Register Val = getOrCreateVReg(*U.getOperand(0));
3166 Register Elt = getOrCreateVReg(*U.getOperand(1));
3167 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3168 Register Idx;
3169 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(2))) {
3170 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3171 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3172 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3173 Idx = getOrCreateVReg(*NewIdxCI);
3174 }
3175 }
3176 if (!Idx)
3177 Idx = getOrCreateVReg(*U.getOperand(2));
3178 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3179 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3180 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3181 }
3182 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3183 return true;
3184 }
3185
translateInsertVector(const User & U,MachineIRBuilder & MIRBuilder)3186 bool IRTranslator::translateInsertVector(const User &U,
3187 MachineIRBuilder &MIRBuilder) {
3188 Register Dst = getOrCreateVReg(U);
3189 Register Vec = getOrCreateVReg(*U.getOperand(0));
3190 Register Elt = getOrCreateVReg(*U.getOperand(1));
3191
3192 ConstantInt *CI = cast<ConstantInt>(U.getOperand(2));
3193 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3194
3195 // Resize Index to preferred index width.
3196 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3197 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3198 CI = ConstantInt::get(CI->getContext(), NewIdx);
3199 }
3200
3201 // If it is a <1 x Ty> vector, we have to use other means.
3202 if (auto *ResultType = dyn_cast<FixedVectorType>(U.getOperand(1)->getType());
3203 ResultType && ResultType->getNumElements() == 1) {
3204 if (auto *InputType = dyn_cast<FixedVectorType>(U.getOperand(0)->getType());
3205 InputType && InputType->getNumElements() == 1) {
3206 // We are inserting an illegal fixed vector into an illegal
3207 // fixed vector, use the scalar as it is not a legal vector type
3208 // in LLT.
3209 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3210 }
3211 if (isa<FixedVectorType>(U.getOperand(0)->getType())) {
3212 // We are inserting an illegal fixed vector into a legal fixed
3213 // vector, use the scalar as it is not a legal vector type in
3214 // LLT.
3215 Register Idx = getOrCreateVReg(*CI);
3216 MIRBuilder.buildInsertVectorElement(Dst, Vec, Elt, Idx);
3217 return true;
3218 }
3219 if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
3220 // We are inserting an illegal fixed vector into a scalable
3221 // vector, use a scalar element insert.
3222 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3223 Register Idx = getOrCreateVReg(*CI);
3224 auto ScaledIndex = MIRBuilder.buildMul(
3225 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
3226 MIRBuilder.buildInsertVectorElement(Dst, Vec, Elt, ScaledIndex);
3227 return true;
3228 }
3229 }
3230
3231 MIRBuilder.buildInsertSubvector(
3232 getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
3233 getOrCreateVReg(*U.getOperand(1)), CI->getZExtValue());
3234 return true;
3235 }
3236
translateExtractElement(const User & U,MachineIRBuilder & MIRBuilder)3237 bool IRTranslator::translateExtractElement(const User &U,
3238 MachineIRBuilder &MIRBuilder) {
3239 // If it is a <1 x Ty> vector, use the scalar as it is
3240 // not a legal vector type in LLT.
3241 if (const FixedVectorType *FVT =
3242 dyn_cast<FixedVectorType>(U.getOperand(0)->getType()))
3243 if (FVT->getNumElements() == 1)
3244 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3245
3246 Register Res = getOrCreateVReg(U);
3247 Register Val = getOrCreateVReg(*U.getOperand(0));
3248 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3249 Register Idx;
3250 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
3251 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3252 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3253 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3254 Idx = getOrCreateVReg(*NewIdxCI);
3255 }
3256 }
3257 if (!Idx)
3258 Idx = getOrCreateVReg(*U.getOperand(1));
3259 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3260 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3261 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3262 }
3263 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3264 return true;
3265 }
3266
translateExtractVector(const User & U,MachineIRBuilder & MIRBuilder)3267 bool IRTranslator::translateExtractVector(const User &U,
3268 MachineIRBuilder &MIRBuilder) {
3269 Register Res = getOrCreateVReg(U);
3270 Register Vec = getOrCreateVReg(*U.getOperand(0));
3271 ConstantInt *CI = cast<ConstantInt>(U.getOperand(1));
3272 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3273
3274 // Resize Index to preferred index width.
3275 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3276 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3277 CI = ConstantInt::get(CI->getContext(), NewIdx);
3278 }
3279
3280 // If it is a <1 x Ty> vector, we have to use other means.
3281 if (auto *ResultType = dyn_cast<FixedVectorType>(U.getType());
3282 ResultType && ResultType->getNumElements() == 1) {
3283 if (auto *InputType = dyn_cast<FixedVectorType>(U.getOperand(0)->getType());
3284 InputType && InputType->getNumElements() == 1) {
3285 // We are extracting an illegal fixed vector from an illegal fixed vector,
3286 // use the scalar as it is not a legal vector type in LLT.
3287 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3288 }
3289 if (isa<FixedVectorType>(U.getOperand(0)->getType())) {
3290 // We are extracting an illegal fixed vector from a legal fixed
3291 // vector, use the scalar as it is not a legal vector type in
3292 // LLT.
3293 Register Idx = getOrCreateVReg(*CI);
3294 MIRBuilder.buildExtractVectorElement(Res, Vec, Idx);
3295 return true;
3296 }
3297 if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
3298 // We are extracting an illegal fixed vector from a scalable
3299 // vector, use a scalar element extract.
3300 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3301 Register Idx = getOrCreateVReg(*CI);
3302 auto ScaledIndex = MIRBuilder.buildMul(
3303 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
3304 MIRBuilder.buildExtractVectorElement(Res, Vec, ScaledIndex);
3305 return true;
3306 }
3307 }
3308
3309 MIRBuilder.buildExtractSubvector(getOrCreateVReg(U),
3310 getOrCreateVReg(*U.getOperand(0)),
3311 CI->getZExtValue());
3312 return true;
3313 }
3314
translateShuffleVector(const User & U,MachineIRBuilder & MIRBuilder)3315 bool IRTranslator::translateShuffleVector(const User &U,
3316 MachineIRBuilder &MIRBuilder) {
3317 // A ShuffleVector that operates on scalable vectors is a splat vector where
3318 // the value of the splat vector is the 0th element of the first operand,
3319 // since the index mask operand is the zeroinitializer (undef and
3320 // poison are treated as zeroinitializer here).
3321 if (U.getOperand(0)->getType()->isScalableTy()) {
3322 Register Val = getOrCreateVReg(*U.getOperand(0));
3323 auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3324 MRI->getType(Val).getElementType(), Val, 0);
3325 MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
3326 return true;
3327 }
3328
3329 ArrayRef<int> Mask;
3330 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3331 Mask = SVI->getShuffleMask();
3332 else
3333 Mask = cast<ConstantExpr>(U).getShuffleMask();
3334 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3335 MIRBuilder
3336 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3337 {getOrCreateVReg(*U.getOperand(0)),
3338 getOrCreateVReg(*U.getOperand(1))})
3339 .addShuffleMask(MaskAlloc);
3340 return true;
3341 }
3342
translatePHI(const User & U,MachineIRBuilder & MIRBuilder)3343 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3344 const PHINode &PI = cast<PHINode>(U);
3345
3346 SmallVector<MachineInstr *, 4> Insts;
3347 for (auto Reg : getOrCreateVRegs(PI)) {
3348 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
3349 Insts.push_back(MIB.getInstr());
3350 }
3351
3352 PendingPHIs.emplace_back(&PI, std::move(Insts));
3353 return true;
3354 }
3355
translateAtomicCmpXchg(const User & U,MachineIRBuilder & MIRBuilder)3356 bool IRTranslator::translateAtomicCmpXchg(const User &U,
3357 MachineIRBuilder &MIRBuilder) {
3358 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3359
3360 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3361
3362 auto Res = getOrCreateVRegs(I);
3363 Register OldValRes = Res[0];
3364 Register SuccessRes = Res[1];
3365 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3366 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
3367 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
3368
3369 MIRBuilder.buildAtomicCmpXchgWithSuccess(
3370 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3371 *MF->getMachineMemOperand(
3372 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3373 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3374 I.getSuccessOrdering(), I.getFailureOrdering()));
3375 return true;
3376 }
3377
translateAtomicRMW(const User & U,MachineIRBuilder & MIRBuilder)3378 bool IRTranslator::translateAtomicRMW(const User &U,
3379 MachineIRBuilder &MIRBuilder) {
3380 if (containsBF16Type(U))
3381 return false;
3382
3383 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3384 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3385
3386 Register Res = getOrCreateVReg(I);
3387 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3388 Register Val = getOrCreateVReg(*I.getValOperand());
3389
3390 unsigned Opcode = 0;
3391 switch (I.getOperation()) {
3392 default:
3393 return false;
3394 case AtomicRMWInst::Xchg:
3395 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3396 break;
3397 case AtomicRMWInst::Add:
3398 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3399 break;
3400 case AtomicRMWInst::Sub:
3401 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3402 break;
3403 case AtomicRMWInst::And:
3404 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3405 break;
3406 case AtomicRMWInst::Nand:
3407 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3408 break;
3409 case AtomicRMWInst::Or:
3410 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3411 break;
3412 case AtomicRMWInst::Xor:
3413 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3414 break;
3415 case AtomicRMWInst::Max:
3416 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3417 break;
3418 case AtomicRMWInst::Min:
3419 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3420 break;
3421 case AtomicRMWInst::UMax:
3422 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3423 break;
3424 case AtomicRMWInst::UMin:
3425 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3426 break;
3427 case AtomicRMWInst::FAdd:
3428 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3429 break;
3430 case AtomicRMWInst::FSub:
3431 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3432 break;
3433 case AtomicRMWInst::FMax:
3434 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3435 break;
3436 case AtomicRMWInst::FMin:
3437 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3438 break;
3439 case AtomicRMWInst::FMaximum:
3440 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3441 break;
3442 case AtomicRMWInst::FMinimum:
3443 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3444 break;
3445 case AtomicRMWInst::UIncWrap:
3446 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3447 break;
3448 case AtomicRMWInst::UDecWrap:
3449 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3450 break;
3451 case AtomicRMWInst::USubCond:
3452 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3453 break;
3454 case AtomicRMWInst::USubSat:
3455 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3456 break;
3457 }
3458
3459 MIRBuilder.buildAtomicRMW(
3460 Opcode, Res, Addr, Val,
3461 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3462 Flags, MRI->getType(Val), getMemOpAlign(I),
3463 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3464 I.getOrdering()));
3465 return true;
3466 }
3467
translateFence(const User & U,MachineIRBuilder & MIRBuilder)3468 bool IRTranslator::translateFence(const User &U,
3469 MachineIRBuilder &MIRBuilder) {
3470 const FenceInst &Fence = cast<FenceInst>(U);
3471 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3472 Fence.getSyncScopeID());
3473 return true;
3474 }
3475
translateFreeze(const User & U,MachineIRBuilder & MIRBuilder)3476 bool IRTranslator::translateFreeze(const User &U,
3477 MachineIRBuilder &MIRBuilder) {
3478 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3479 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3480
3481 assert(DstRegs.size() == SrcRegs.size() &&
3482 "Freeze with different source and destination type?");
3483
3484 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3485 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3486 }
3487
3488 return true;
3489 }
3490
finishPendingPhis()3491 void IRTranslator::finishPendingPhis() {
3492 #ifndef NDEBUG
3493 DILocationVerifier Verifier;
3494 GISelObserverWrapper WrapperObserver(&Verifier);
3495 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3496 #endif // ifndef NDEBUG
3497 for (auto &Phi : PendingPHIs) {
3498 const PHINode *PI = Phi.first;
3499 if (PI->getType()->isEmptyTy())
3500 continue;
3501 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3502 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3503 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3504 #ifndef NDEBUG
3505 Verifier.setCurrentInst(PI);
3506 #endif // ifndef NDEBUG
3507
3508 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
3509 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3510 auto IRPred = PI->getIncomingBlock(i);
3511 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3512 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3513 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3514 continue;
3515 SeenPreds.insert(Pred);
3516 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3517 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3518 MIB.addUse(ValRegs[j]);
3519 MIB.addMBB(Pred);
3520 }
3521 }
3522 }
3523 }
3524 }
3525
translateDbgValueRecord(Value * V,bool HasArgList,const DILocalVariable * Variable,const DIExpression * Expression,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)3526 void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3527 const DILocalVariable *Variable,
3528 const DIExpression *Expression,
3529 const DebugLoc &DL,
3530 MachineIRBuilder &MIRBuilder) {
3531 assert(Variable->isValidLocationForIntrinsic(DL) &&
3532 "Expected inlined-at fields to agree");
3533 // Act as if we're handling a debug intrinsic.
3534 MIRBuilder.setDebugLoc(DL);
3535
3536 if (!V || HasArgList) {
3537 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3538 // terminate any prior location.
3539 MIRBuilder.buildIndirectDbgValue(0, Variable, Expression);
3540 return;
3541 }
3542
3543 if (const auto *CI = dyn_cast<Constant>(V)) {
3544 MIRBuilder.buildConstDbgValue(*CI, Variable, Expression);
3545 return;
3546 }
3547
3548 if (auto *AI = dyn_cast<AllocaInst>(V);
3549 AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3550 // If the value is an alloca and the expression starts with a
3551 // dereference, track a stack slot instead of a register, as registers
3552 // may be clobbered.
3553 auto ExprOperands = Expression->getElements();
3554 auto *ExprDerefRemoved =
3555 DIExpression::get(AI->getContext(), ExprOperands.drop_front());
3556 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,
3557 ExprDerefRemoved);
3558 return;
3559 }
3560 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,
3561 MIRBuilder))
3562 return;
3563 for (Register Reg : getOrCreateVRegs(*V)) {
3564 // FIXME: This does not handle register-indirect values at offset 0. The
3565 // direct/indirect thing shouldn't really be handled by something as
3566 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3567 // pretty baked in right now.
3568 MIRBuilder.buildDirectDbgValue(Reg, Variable, Expression);
3569 }
3570 }
3571
translateDbgDeclareRecord(Value * Address,bool HasArgList,const DILocalVariable * Variable,const DIExpression * Expression,const DebugLoc & DL,MachineIRBuilder & MIRBuilder)3572 void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3573 const DILocalVariable *Variable,
3574 const DIExpression *Expression,
3575 const DebugLoc &DL,
3576 MachineIRBuilder &MIRBuilder) {
3577 if (!Address || isa<UndefValue>(Address)) {
3578 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3579 return;
3580 }
3581
3582 assert(Variable->isValidLocationForIntrinsic(DL) &&
3583 "Expected inlined-at fields to agree");
3584 auto AI = dyn_cast<AllocaInst>(Address);
3585 if (AI && AI->isStaticAlloca()) {
3586 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3587 // instructions (in fact, they get ignored if they *do* exist).
3588 MF->setVariableDbgInfo(Variable, Expression,
3589 getOrCreateFrameIndex(*AI), DL);
3590 return;
3591 }
3592
3593 if (translateIfEntryValueArgument(true, Address, Variable,
3594 Expression, DL,
3595 MIRBuilder))
3596 return;
3597
3598 // A dbg.declare describes the address of a source variable, so lower it
3599 // into an indirect DBG_VALUE.
3600 MIRBuilder.setDebugLoc(DL);
3601 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), Variable,
3602 Expression);
3603 }
3604
translateDbgInfo(const Instruction & Inst,MachineIRBuilder & MIRBuilder)3605 void IRTranslator::translateDbgInfo(const Instruction &Inst,
3606 MachineIRBuilder &MIRBuilder) {
3607 for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3608 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
3609 MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3610 assert(DLR->getLabel() && "Missing label");
3611 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3612 MIRBuilder.getDebugLoc()) &&
3613 "Expected inlined-at fields to agree");
3614 MIRBuilder.buildDbgLabel(DLR->getLabel());
3615 continue;
3616 }
3617 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
3618 const DILocalVariable *Variable = DVR.getVariable();
3619 const DIExpression *Expression = DVR.getExpression();
3620 Value *V = DVR.getVariableLocationOp(0);
3621 if (DVR.isDbgDeclare())
3622 translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,
3623 DVR.getDebugLoc(), MIRBuilder);
3624 else
3625 translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,
3626 DVR.getDebugLoc(), MIRBuilder);
3627 }
3628 }
3629
translate(const Instruction & Inst)3630 bool IRTranslator::translate(const Instruction &Inst) {
3631 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3632 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3633 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));
3634
3635 if (TLI->fallBackToDAGISel(Inst))
3636 return false;
3637
3638 switch (Inst.getOpcode()) {
3639 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3640 case Instruction::OPCODE: \
3641 return translate##OPCODE(Inst, *CurBuilder.get());
3642 #include "llvm/IR/Instruction.def"
3643 default:
3644 return false;
3645 }
3646 }
3647
translate(const Constant & C,Register Reg)3648 bool IRTranslator::translate(const Constant &C, Register Reg) {
3649 // We only emit constants into the entry block from here. To prevent jumpy
3650 // debug behaviour remove debug line.
3651 if (auto CurrInstDL = CurBuilder->getDL())
3652 EntryBuilder->setDebugLoc(DebugLoc());
3653
3654 if (auto CI = dyn_cast<ConstantInt>(&C)) {
3655 // buildConstant expects a to-be-splatted scalar ConstantInt.
3656 if (isa<VectorType>(CI->getType()))
3657 CI = ConstantInt::get(CI->getContext(), CI->getValue());
3658 EntryBuilder->buildConstant(Reg, *CI);
3659 } else if (auto CF = dyn_cast<ConstantFP>(&C)) {
3660 // buildFConstant expects a to-be-splatted scalar ConstantFP.
3661 if (isa<VectorType>(CF->getType()))
3662 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3663 EntryBuilder->buildFConstant(Reg, *CF);
3664 } else if (isa<UndefValue>(C))
3665 EntryBuilder->buildUndef(Reg);
3666 else if (isa<ConstantPointerNull>(C))
3667 EntryBuilder->buildConstant(Reg, 0);
3668 else if (auto GV = dyn_cast<GlobalValue>(&C))
3669 EntryBuilder->buildGlobalValue(Reg, GV);
3670 else if (auto CPA = dyn_cast<ConstantPtrAuth>(&C)) {
3671 Register Addr = getOrCreateVReg(*CPA->getPointer());
3672 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3673 EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
3674 } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3675 Constant &Elt = *CAZ->getElementValue(0u);
3676 if (isa<ScalableVectorType>(CAZ->getType())) {
3677 EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));
3678 return true;
3679 }
3680 // Return the scalar if it is a <1 x Ty> vector.
3681 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3682 if (NumElts == 1)
3683 return translateCopy(C, Elt, *EntryBuilder);
3684 // All elements are zero so we can just use the first one.
3685 EntryBuilder->buildSplatBuildVector(Reg, getOrCreateVReg(Elt));
3686 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3687 // Return the scalar if it is a <1 x Ty> vector.
3688 if (CV->getNumElements() == 1)
3689 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3690 SmallVector<Register, 4> Ops;
3691 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3692 Constant &Elt = *CV->getElementAsConstant(i);
3693 Ops.push_back(getOrCreateVReg(Elt));
3694 }
3695 EntryBuilder->buildBuildVector(Reg, Ops);
3696 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3697 switch(CE->getOpcode()) {
3698 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3699 case Instruction::OPCODE: \
3700 return translate##OPCODE(*CE, *EntryBuilder.get());
3701 #include "llvm/IR/Instruction.def"
3702 default:
3703 return false;
3704 }
3705 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3706 if (CV->getNumOperands() == 1)
3707 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3708 SmallVector<Register, 4> Ops;
3709 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3710 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3711 }
3712 EntryBuilder->buildBuildVector(Reg, Ops);
3713 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3714 EntryBuilder->buildBlockAddress(Reg, BA);
3715 } else
3716 return false;
3717
3718 return true;
3719 }
3720
finalizeBasicBlock(const BasicBlock & BB,MachineBasicBlock & MBB)3721 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3722 MachineBasicBlock &MBB) {
3723 for (auto &BTB : SL->BitTestCases) {
3724 // Emit header first, if it wasn't already emitted.
3725 if (!BTB.Emitted)
3726 emitBitTestHeader(BTB, BTB.Parent);
3727
3728 BranchProbability UnhandledProb = BTB.Prob;
3729 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3730 UnhandledProb -= BTB.Cases[j].ExtraProb;
3731 // Set the current basic block to the mbb we wish to insert the code into
3732 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3733 // If all cases cover a contiguous range, it is not necessary to jump to
3734 // the default block after the last bit test fails. This is because the
3735 // range check during bit test header creation has guaranteed that every
3736 // case here doesn't go outside the range. In this case, there is no need
3737 // to perform the last bit test, as it will always be true. Instead, make
3738 // the second-to-last bit-test fall through to the target of the last bit
3739 // test, and delete the last bit test.
3740
3741 MachineBasicBlock *NextMBB;
3742 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3743 // Second-to-last bit-test with contiguous range: fall through to the
3744 // target of the final bit test.
3745 NextMBB = BTB.Cases[j + 1].TargetBB;
3746 } else if (j + 1 == ej) {
3747 // For the last bit test, fall through to Default.
3748 NextMBB = BTB.Default;
3749 } else {
3750 // Otherwise, fall through to the next bit test.
3751 NextMBB = BTB.Cases[j + 1].ThisBB;
3752 }
3753
3754 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3755
3756 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3757 // We need to record the replacement phi edge here that normally
3758 // happens in emitBitTestCase before we delete the case, otherwise the
3759 // phi edge will be lost.
3760 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3761 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3762 MBB);
3763 // Since we're not going to use the final bit test, remove it.
3764 BTB.Cases.pop_back();
3765 break;
3766 }
3767 }
3768 // This is "default" BB. We have two jumps to it. From "header" BB and from
3769 // last "case" BB, unless the latter was skipped.
3770 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3771 BTB.Default->getBasicBlock()};
3772 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3773 if (!BTB.ContiguousRange) {
3774 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3775 }
3776 }
3777 SL->BitTestCases.clear();
3778
3779 for (auto &JTCase : SL->JTCases) {
3780 // Emit header first, if it wasn't already emitted.
3781 if (!JTCase.first.Emitted)
3782 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3783
3784 emitJumpTable(JTCase.second, JTCase.second.MBB);
3785 }
3786 SL->JTCases.clear();
3787
3788 for (auto &SwCase : SL->SwitchCases)
3789 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3790 SL->SwitchCases.clear();
3791
3792 // Check if we need to generate stack-protector guard checks.
3793 StackProtector &SP = getAnalysis<StackProtector>();
3794 if (SP.shouldEmitSDCheck(BB)) {
3795 bool FunctionBasedInstrumentation =
3796 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3797 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3798 }
3799 // Handle stack protector.
3800 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3801 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3802 return false;
3803 } else if (SPDescriptor.shouldEmitStackProtector()) {
3804 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3805 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3806
3807 // Find the split point to split the parent mbb. At the same time copy all
3808 // physical registers used in the tail of parent mbb into virtual registers
3809 // before the split point and back into physical registers after the split
3810 // point. This prevents us needing to deal with Live-ins and many other
3811 // register allocation issues caused by us splitting the parent mbb. The
3812 // register allocator will clean up said virtual copies later on.
3813 MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3814 ParentMBB, *MF->getSubtarget().getInstrInfo());
3815
3816 // Splice the terminator of ParentMBB into SuccessMBB.
3817 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3818 ParentMBB->end());
3819
3820 // Add compare/jump on neq/jump to the parent BB.
3821 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3822 return false;
3823
3824 // CodeGen Failure MBB if we have not codegened it yet.
3825 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3826 if (FailureMBB->empty()) {
3827 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3828 return false;
3829 }
3830
3831 // Clear the Per-BB State.
3832 SPDescriptor.resetPerBBState();
3833 }
3834 return true;
3835 }
3836
emitSPDescriptorParent(StackProtectorDescriptor & SPD,MachineBasicBlock * ParentBB)3837 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3838 MachineBasicBlock *ParentBB) {
3839 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3840 // First create the loads to the guard/stack slot for the comparison.
3841 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
3842 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3843 LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
3844
3845 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3846 int FI = MFI.getStackProtectorIndex();
3847
3848 Register Guard;
3849 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3850 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3851 Align Align = DL->getPrefTypeAlign(PointerType::getUnqual(M.getContext()));
3852
3853 // Generate code to load the content of the guard slot.
3854 Register GuardVal =
3855 CurBuilder
3856 ->buildLoad(PtrMemTy, StackSlotPtr,
3857 MachinePointerInfo::getFixedStack(*MF, FI), Align,
3858 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3859 .getReg(0);
3860
3861 if (TLI->useStackGuardXorFP()) {
3862 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3863 return false;
3864 }
3865
3866 // Retrieve guard check function, nullptr if instrumentation is inlined.
3867 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3868 // This path is currently untestable on GlobalISel, since the only platform
3869 // that needs this seems to be Windows, and we fall back on that currently.
3870 // The code still lives here in case that changes.
3871 // Silence warning about unused variable until the code below that uses
3872 // 'GuardCheckFn' is enabled.
3873 (void)GuardCheckFn;
3874 return false;
3875 #if 0
3876 // The target provides a guard check function to validate the guard value.
3877 // Generate a call to that function with the content of the guard slot as
3878 // argument.
3879 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3880 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3881 ISD::ArgFlagsTy Flags;
3882 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3883 Flags.setInReg();
3884 CallLowering::ArgInfo GuardArgInfo(
3885 {GuardVal, FnTy->getParamType(0), {Flags}});
3886
3887 CallLowering::CallLoweringInfo Info;
3888 Info.OrigArgs.push_back(GuardArgInfo);
3889 Info.CallConv = GuardCheckFn->getCallingConv();
3890 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3891 Info.OrigRet = {Register(), FnTy->getReturnType()};
3892 if (!CLI->lowerCall(MIRBuilder, Info)) {
3893 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3894 return false;
3895 }
3896 return true;
3897 #endif
3898 }
3899
3900 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3901 // Otherwise, emit a volatile load to retrieve the stack guard value.
3902 if (TLI->useLoadStackGuardNode(*ParentBB->getBasicBlock()->getModule())) {
3903 Guard =
3904 MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3905 getStackGuard(Guard, *CurBuilder);
3906 } else {
3907 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3908 const Value *IRGuard = TLI->getSDagStackGuard(M);
3909 Register GuardPtr = getOrCreateVReg(*IRGuard);
3910
3911 Guard = CurBuilder
3912 ->buildLoad(PtrMemTy, GuardPtr,
3913 MachinePointerInfo::getFixedStack(*MF, FI), Align,
3914 MachineMemOperand::MOLoad |
3915 MachineMemOperand::MOVolatile)
3916 .getReg(0);
3917 }
3918
3919 // Perform the comparison.
3920 auto Cmp =
3921 CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3922 // If the guard/stackslot do not equal, branch to failure MBB.
3923 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3924 // Otherwise branch to success MBB.
3925 CurBuilder->buildBr(*SPD.getSuccessMBB());
3926 return true;
3927 }
3928
emitSPDescriptorFailure(StackProtectorDescriptor & SPD,MachineBasicBlock * FailureBB)3929 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3930 MachineBasicBlock *FailureBB) {
3931 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3932
3933 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3934 const char *Name = TLI->getLibcallName(Libcall);
3935
3936 CallLowering::CallLoweringInfo Info;
3937 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
3938 Info.Callee = MachineOperand::CreateES(Name);
3939 Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3940 0};
3941 if (!CLI->lowerCall(*CurBuilder, Info)) {
3942 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3943 return false;
3944 }
3945
3946 // Emit a trap instruction if we are required to do so.
3947 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
3948 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3949 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
3950
3951 return true;
3952 }
3953
finalizeFunction()3954 void IRTranslator::finalizeFunction() {
3955 // Release the memory used by the different maps we
3956 // needed during the translation.
3957 PendingPHIs.clear();
3958 VMap.reset();
3959 FrameIndices.clear();
3960 MachinePreds.clear();
3961 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3962 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3963 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3964 EntryBuilder.reset();
3965 CurBuilder.reset();
3966 FuncInfo.clear();
3967 SPDescriptor.resetPerFunctionState();
3968 }
3969
3970 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3971 /// variadic musttail call.
checkForMustTailInVarArgFn(bool IsVarArg,const BasicBlock & BB)3972 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3973 if (!IsVarArg)
3974 return false;
3975
3976 // Walk the block backwards, because tail calls usually only appear at the end
3977 // of a block.
3978 return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3979 const auto *CI = dyn_cast<CallInst>(&I);
3980 return CI && CI->isMustTailCall();
3981 });
3982 }
3983
runOnMachineFunction(MachineFunction & CurMF)3984 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3985 MF = &CurMF;
3986 const Function &F = MF->getFunction();
3987 GISelCSEAnalysisWrapper &Wrapper =
3988 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3989 // Set the CSEConfig and run the analysis.
3990 GISelCSEInfo *CSEInfo = nullptr;
3991 TPC = &getAnalysis<TargetPassConfig>();
3992 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3993 ? EnableCSEInIRTranslator
3994 : TPC->isGISelCSEEnabled();
3995 TLI = MF->getSubtarget().getTargetLowering();
3996
3997 if (EnableCSE) {
3998 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3999 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
4000 EntryBuilder->setCSEInfo(CSEInfo);
4001 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4002 CurBuilder->setCSEInfo(CSEInfo);
4003 } else {
4004 EntryBuilder = std::make_unique<MachineIRBuilder>();
4005 CurBuilder = std::make_unique<MachineIRBuilder>();
4006 }
4007 CLI = MF->getSubtarget().getCallLowering();
4008 CurBuilder->setMF(*MF);
4009 EntryBuilder->setMF(*MF);
4010 MRI = &MF->getRegInfo();
4011 DL = &F.getDataLayout();
4012 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
4013 const TargetMachine &TM = MF->getTarget();
4014 TM.resetTargetOptions(F);
4015 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
4016 FuncInfo.MF = MF;
4017 if (EnableOpts) {
4018 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4019 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
4020 } else {
4021 AA = nullptr;
4022 FuncInfo.BPI = nullptr;
4023 }
4024
4025 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
4026 MF->getFunction());
4027 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4028 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4029
4030 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
4031 SL->init(*TLI, TM, *DL);
4032
4033 assert(PendingPHIs.empty() && "stale PHIs");
4034
4035 // Targets which want to use big endian can enable it using
4036 // enableBigEndian()
4037 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4038 // Currently we don't properly handle big endian code.
4039 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4040 F.getSubprogram(), &F.getEntryBlock());
4041 R << "unable to translate in big endian mode";
4042 reportTranslationError(*MF, *TPC, *ORE, R);
4043 return false;
4044 }
4045
4046 // Release the per-function state when we return, whether we succeeded or not.
4047 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
4048
4049 // Setup a separate basic-block for the arguments and constants
4050 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
4051 MF->push_back(EntryBB);
4052 EntryBuilder->setMBB(*EntryBB);
4053
4054 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4055 SwiftError.setFunction(CurMF);
4056 SwiftError.createEntriesInEntryBlock(DbgLoc);
4057
4058 bool IsVarArg = F.isVarArg();
4059 bool HasMustTailInVarArgFn = false;
4060
4061 // Create all blocks, in IR order, to preserve the layout.
4062 FuncInfo.MBBMap.resize(F.getMaxBlockNumber());
4063 for (const BasicBlock &BB: F) {
4064 auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];
4065
4066 MBB = MF->CreateMachineBasicBlock(&BB);
4067 MF->push_back(MBB);
4068
4069 if (BB.hasAddressTaken())
4070 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
4071
4072 if (!HasMustTailInVarArgFn)
4073 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
4074 }
4075
4076 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4077
4078 // Make our arguments/constants entry block fallthrough to the IR entry block.
4079 EntryBB->addSuccessor(&getMBB(F.front()));
4080
4081 if (CLI->fallBackToDAGISel(*MF)) {
4082 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4083 F.getSubprogram(), &F.getEntryBlock());
4084 R << "unable to lower function: "
4085 << ore::NV("Prototype", F.getFunctionType());
4086 reportTranslationError(*MF, *TPC, *ORE, R);
4087 return false;
4088 }
4089
4090 // Lower the actual args into this basic block.
4091 SmallVector<ArrayRef<Register>, 8> VRegArgs;
4092 for (const Argument &Arg: F.args()) {
4093 if (DL->getTypeStoreSize(Arg.getType()).isZero())
4094 continue; // Don't handle zero sized types.
4095 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
4096 VRegArgs.push_back(VRegs);
4097
4098 if (Arg.hasSwiftErrorAttr()) {
4099 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
4100 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4101 }
4102 }
4103
4104 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
4105 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4106 F.getSubprogram(), &F.getEntryBlock());
4107 R << "unable to lower arguments: "
4108 << ore::NV("Prototype", F.getFunctionType());
4109 reportTranslationError(*MF, *TPC, *ORE, R);
4110 return false;
4111 }
4112
4113 // Need to visit defs before uses when translating instructions.
4114 GISelObserverWrapper WrapperObserver;
4115 if (EnableCSE && CSEInfo)
4116 WrapperObserver.addObserver(CSEInfo);
4117 {
4118 ReversePostOrderTraversal<const Function *> RPOT(&F);
4119 #ifndef NDEBUG
4120 DILocationVerifier Verifier;
4121 WrapperObserver.addObserver(&Verifier);
4122 #endif // ifndef NDEBUG
4123 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
4124 for (const BasicBlock *BB : RPOT) {
4125 MachineBasicBlock &MBB = getMBB(*BB);
4126 // Set the insertion point of all the following translations to
4127 // the end of this basic block.
4128 CurBuilder->setMBB(MBB);
4129 HasTailCall = false;
4130 for (const Instruction &Inst : *BB) {
4131 // If we translated a tail call in the last step, then we know
4132 // everything after the call is either a return, or something that is
4133 // handled by the call itself. (E.g. a lifetime marker or assume
4134 // intrinsic.) In this case, we should stop translating the block and
4135 // move on.
4136 if (HasTailCall)
4137 break;
4138 #ifndef NDEBUG
4139 Verifier.setCurrentInst(&Inst);
4140 #endif // ifndef NDEBUG
4141
4142 // Translate any debug-info attached to the instruction.
4143 translateDbgInfo(Inst, *CurBuilder);
4144
4145 if (translate(Inst))
4146 continue;
4147
4148 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4149 Inst.getDebugLoc(), BB);
4150 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
4151
4152 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
4153 std::string InstStrStorage;
4154 raw_string_ostream InstStr(InstStrStorage);
4155 InstStr << Inst;
4156
4157 R << ": '" << InstStrStorage << "'";
4158 }
4159
4160 reportTranslationError(*MF, *TPC, *ORE, R);
4161 return false;
4162 }
4163
4164 if (!finalizeBasicBlock(*BB, MBB)) {
4165 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4166 BB->getTerminator()->getDebugLoc(), BB);
4167 R << "unable to translate basic block";
4168 reportTranslationError(*MF, *TPC, *ORE, R);
4169 return false;
4170 }
4171 }
4172 #ifndef NDEBUG
4173 WrapperObserver.removeObserver(&Verifier);
4174 #endif
4175 }
4176
4177 finishPendingPhis();
4178
4179 SwiftError.propagateVRegs();
4180
4181 // Merge the argument lowering and constants block with its single
4182 // successor, the LLVM-IR entry block. We want the basic block to
4183 // be maximal.
4184 assert(EntryBB->succ_size() == 1 &&
4185 "Custom BB used for lowering should have only one successor");
4186 // Get the successor of the current entry block.
4187 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4188 assert(NewEntryBB.pred_size() == 1 &&
4189 "LLVM-IR entry block has a predecessor!?");
4190 // Move all the instruction from the current entry block to the
4191 // new entry block.
4192 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
4193 EntryBB->end());
4194
4195 // Update the live-in information for the new entry block.
4196 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4197 NewEntryBB.addLiveIn(LiveIn);
4198 NewEntryBB.sortUniqueLiveIns();
4199
4200 // Get rid of the now empty basic block.
4201 EntryBB->removeSuccessor(&NewEntryBB);
4202 MF->remove(EntryBB);
4203 MF->deleteMachineBasicBlock(EntryBB);
4204
4205 assert(&MF->front() == &NewEntryBB &&
4206 "New entry wasn't next in the list of basic block!");
4207
4208 // Initialize stack protector information.
4209 StackProtector &SP = getAnalysis<StackProtector>();
4210 SP.copyToMachineFrameInfo(MF->getFrameInfo());
4211
4212 return false;
4213 }
4214