xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 //===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAGISel class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/SelectionDAGISel.h"
14 #include "ScheduleDAGSDNodes.h"
15 #include "SelectionDAGBuilder.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PostOrderIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/CFG.h"
28 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
29 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/UniformityAnalysis.h"
34 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/FastISel.h"
37 #include "llvm/CodeGen/FunctionLoweringInfo.h"
38 #include "llvm/CodeGen/GCMetadata.h"
39 #include "llvm/CodeGen/ISDOpcodes.h"
40 #include "llvm/CodeGen/MachineBasicBlock.h"
41 #include "llvm/CodeGen/MachineFrameInfo.h"
42 #include "llvm/CodeGen/MachineFunction.h"
43 #include "llvm/CodeGen/MachineFunctionPass.h"
44 #include "llvm/CodeGen/MachineInstr.h"
45 #include "llvm/CodeGen/MachineInstrBuilder.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachinePassRegistry.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/MachineValueType.h"
52 #include "llvm/CodeGen/SchedulerRegistry.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/CodeGen/StackProtector.h"
57 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetRegisterInfo.h"
61 #include "llvm/CodeGen/TargetSubtargetInfo.h"
62 #include "llvm/CodeGen/ValueTypes.h"
63 #include "llvm/CodeGen/WinEHFuncInfo.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfo.h"
68 #include "llvm/IR/DebugInfoMetadata.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DiagnosticInfo.h"
71 #include "llvm/IR/EHPersonalities.h"
72 #include "llvm/IR/Function.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstIterator.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/IntrinsicsWebAssembly.h"
80 #include "llvm/IR/Metadata.h"
81 #include "llvm/IR/Statepoint.h"
82 #include "llvm/IR/Type.h"
83 #include "llvm/IR/User.h"
84 #include "llvm/IR/Value.h"
85 #include "llvm/InitializePasses.h"
86 #include "llvm/MC/MCInstrDesc.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/BranchProbability.h"
89 #include "llvm/Support/Casting.h"
90 #include "llvm/Support/CodeGen.h"
91 #include "llvm/Support/CommandLine.h"
92 #include "llvm/Support/Compiler.h"
93 #include "llvm/Support/Debug.h"
94 #include "llvm/Support/ErrorHandling.h"
95 #include "llvm/Support/KnownBits.h"
96 #include "llvm/Support/Timer.h"
97 #include "llvm/Support/raw_ostream.h"
98 #include "llvm/Target/TargetIntrinsicInfo.h"
99 #include "llvm/Target/TargetMachine.h"
100 #include "llvm/Target/TargetOptions.h"
101 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
102 #include <algorithm>
103 #include <cassert>
104 #include <cstdint>
105 #include <iterator>
106 #include <limits>
107 #include <memory>
108 #include <optional>
109 #include <string>
110 #include <utility>
111 #include <vector>
112 
113 using namespace llvm;
114 
115 #define DEBUG_TYPE "isel"
116 
117 STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
118 STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
119 STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
120 STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
121 STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
122 STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
123 STATISTIC(NumFastIselFailLowerArguments,
124           "Number of entry blocks where fast isel failed to lower arguments");
125 
126 static cl::opt<int> EnableFastISelAbort(
127     "fast-isel-abort", cl::Hidden,
128     cl::desc("Enable abort calls when \"fast\" instruction selection "
129              "fails to lower an instruction: 0 disable the abort, 1 will "
130              "abort but for args, calls and terminators, 2 will also "
131              "abort for argument lowering, and 3 will never fallback "
132              "to SelectionDAG."));
133 
134 static cl::opt<bool> EnableFastISelFallbackReport(
135     "fast-isel-report-on-fallback", cl::Hidden,
136     cl::desc("Emit a diagnostic when \"fast\" instruction selection "
137              "falls back to SelectionDAG."));
138 
139 static cl::opt<bool>
140 UseMBPI("use-mbpi",
141         cl::desc("use Machine Branch Probability Info"),
142         cl::init(true), cl::Hidden);
143 
144 #ifndef NDEBUG
145 static cl::opt<std::string>
146 FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
147                         cl::desc("Only display the basic block whose name "
148                                  "matches this for all view-*-dags options"));
149 static cl::opt<bool>
150 ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
151           cl::desc("Pop up a window to show dags before the first "
152                    "dag combine pass"));
153 static cl::opt<bool>
154 ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
155           cl::desc("Pop up a window to show dags before legalize types"));
156 static cl::opt<bool>
157     ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
158                      cl::desc("Pop up a window to show dags before the post "
159                               "legalize types dag combine pass"));
160 static cl::opt<bool>
161     ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
162                      cl::desc("Pop up a window to show dags before legalize"));
163 static cl::opt<bool>
164 ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
165           cl::desc("Pop up a window to show dags before the second "
166                    "dag combine pass"));
167 static cl::opt<bool>
168 ViewISelDAGs("view-isel-dags", cl::Hidden,
169           cl::desc("Pop up a window to show isel dags as they are selected"));
170 static cl::opt<bool>
171 ViewSchedDAGs("view-sched-dags", cl::Hidden,
172           cl::desc("Pop up a window to show sched dags as they are processed"));
173 static cl::opt<bool>
174 ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
175       cl::desc("Pop up a window to show SUnit dags after they are processed"));
176 #else
177 static const bool ViewDAGCombine1 = false, ViewLegalizeTypesDAGs = false,
178                   ViewDAGCombineLT = false, ViewLegalizeDAGs = false,
179                   ViewDAGCombine2 = false, ViewISelDAGs = false,
180                   ViewSchedDAGs = false, ViewSUnitDAGs = false;
181 #endif
182 
183 //===---------------------------------------------------------------------===//
184 ///
185 /// RegisterScheduler class - Track the registration of instruction schedulers.
186 ///
187 //===---------------------------------------------------------------------===//
188 MachinePassRegistry<RegisterScheduler::FunctionPassCtor>
189     RegisterScheduler::Registry;
190 
191 //===---------------------------------------------------------------------===//
192 ///
193 /// ISHeuristic command line option for instruction schedulers.
194 ///
195 //===---------------------------------------------------------------------===//
196 static cl::opt<RegisterScheduler::FunctionPassCtor, false,
197                RegisterPassParser<RegisterScheduler>>
198 ISHeuristic("pre-RA-sched",
199             cl::init(&createDefaultScheduler), cl::Hidden,
200             cl::desc("Instruction schedulers available (before register"
201                      " allocation):"));
202 
203 static RegisterScheduler
204 defaultListDAGScheduler("default", "Best scheduler for the target",
205                         createDefaultScheduler);
206 
207 namespace llvm {
208 
209   //===--------------------------------------------------------------------===//
210   /// This class is used by SelectionDAGISel to temporarily override
211   /// the optimization level on a per-function basis.
212   class OptLevelChanger {
213     SelectionDAGISel &IS;
214     CodeGenOpt::Level SavedOptLevel;
215     bool SavedFastISel;
216 
217   public:
218     OptLevelChanger(SelectionDAGISel &ISel,
219                     CodeGenOpt::Level NewOptLevel) : IS(ISel) {
220       SavedOptLevel = IS.OptLevel;
221       SavedFastISel = IS.TM.Options.EnableFastISel;
222       if (NewOptLevel == SavedOptLevel)
223         return;
224       IS.OptLevel = NewOptLevel;
225       IS.TM.setOptLevel(NewOptLevel);
226       LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
227                         << IS.MF->getFunction().getName() << "\n");
228       LLVM_DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel << " ; After: -O"
229                         << NewOptLevel << "\n");
230       if (NewOptLevel == CodeGenOpt::None) {
231         IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
232         LLVM_DEBUG(
233             dbgs() << "\tFastISel is "
234                    << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
235                    << "\n");
236       }
237     }
238 
239     ~OptLevelChanger() {
240       if (IS.OptLevel == SavedOptLevel)
241         return;
242       LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
243                         << IS.MF->getFunction().getName() << "\n");
244       LLVM_DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel << " ; After: -O"
245                         << SavedOptLevel << "\n");
246       IS.OptLevel = SavedOptLevel;
247       IS.TM.setOptLevel(SavedOptLevel);
248       IS.TM.setFastISel(SavedFastISel);
249     }
250   };
251 
252   //===--------------------------------------------------------------------===//
253   /// createDefaultScheduler - This creates an instruction scheduler appropriate
254   /// for the target.
255   ScheduleDAGSDNodes* createDefaultScheduler(SelectionDAGISel *IS,
256                                              CodeGenOpt::Level OptLevel) {
257     const TargetLowering *TLI = IS->TLI;
258     const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
259 
260     // Try first to see if the Target has its own way of selecting a scheduler
261     if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
262       return SchedulerCtor(IS, OptLevel);
263     }
264 
265     if (OptLevel == CodeGenOpt::None ||
266         (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
267         TLI->getSchedulingPreference() == Sched::Source)
268       return createSourceListDAGScheduler(IS, OptLevel);
269     if (TLI->getSchedulingPreference() == Sched::RegPressure)
270       return createBURRListDAGScheduler(IS, OptLevel);
271     if (TLI->getSchedulingPreference() == Sched::Hybrid)
272       return createHybridListDAGScheduler(IS, OptLevel);
273     if (TLI->getSchedulingPreference() == Sched::VLIW)
274       return createVLIWDAGScheduler(IS, OptLevel);
275     if (TLI->getSchedulingPreference() == Sched::Fast)
276       return createFastDAGScheduler(IS, OptLevel);
277     if (TLI->getSchedulingPreference() == Sched::Linearize)
278       return createDAGLinearizer(IS, OptLevel);
279     assert(TLI->getSchedulingPreference() == Sched::ILP &&
280            "Unknown sched type!");
281     return createILPListDAGScheduler(IS, OptLevel);
282   }
283 
284 } // end namespace llvm
285 
286 // EmitInstrWithCustomInserter - This method should be implemented by targets
287 // that mark instructions with the 'usesCustomInserter' flag.  These
288 // instructions are special in various ways, which require special support to
289 // insert.  The specified MachineInstr is created but not inserted into any
290 // basic blocks, and this method is called to expand it into a sequence of
291 // instructions, potentially also creating new basic blocks and control flow.
292 // When new basic blocks are inserted and the edges from MBB to its successors
293 // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
294 // DenseMap.
295 MachineBasicBlock *
296 TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
297                                             MachineBasicBlock *MBB) const {
298 #ifndef NDEBUG
299   dbgs() << "If a target marks an instruction with "
300           "'usesCustomInserter', it must implement "
301           "TargetLowering::EmitInstrWithCustomInserter!\n";
302 #endif
303   llvm_unreachable(nullptr);
304 }
305 
306 void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
307                                                    SDNode *Node) const {
308   assert(!MI.hasPostISelHook() &&
309          "If a target marks an instruction with 'hasPostISelHook', "
310          "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
311 }
312 
313 //===----------------------------------------------------------------------===//
314 // SelectionDAGISel code
315 //===----------------------------------------------------------------------===//
316 
317 SelectionDAGISel::SelectionDAGISel(char &ID, TargetMachine &tm,
318                                    CodeGenOpt::Level OL)
319     : MachineFunctionPass(ID), TM(tm), FuncInfo(new FunctionLoweringInfo()),
320       SwiftError(new SwiftErrorValueTracking()),
321       CurDAG(new SelectionDAG(tm, OL)),
322       SDB(std::make_unique<SelectionDAGBuilder>(*CurDAG, *FuncInfo, *SwiftError,
323                                                 OL)),
324       OptLevel(OL) {
325   initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
326   initializeBranchProbabilityInfoWrapperPassPass(
327       *PassRegistry::getPassRegistry());
328   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
329   initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
330 }
331 
332 SelectionDAGISel::~SelectionDAGISel() {
333   delete CurDAG;
334   delete SwiftError;
335 }
336 
337 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
338   if (OptLevel != CodeGenOpt::None)
339     AU.addRequired<AAResultsWrapperPass>();
340   AU.addRequired<GCModuleInfo>();
341   AU.addRequired<StackProtector>();
342   AU.addPreserved<GCModuleInfo>();
343   AU.addRequired<TargetLibraryInfoWrapperPass>();
344   AU.addRequired<TargetTransformInfoWrapperPass>();
345   AU.addRequired<AssumptionCacheTracker>();
346   if (UseMBPI && OptLevel != CodeGenOpt::None)
347     AU.addRequired<BranchProbabilityInfoWrapperPass>();
348   AU.addRequired<ProfileSummaryInfoWrapperPass>();
349   // AssignmentTrackingAnalysis only runs if assignment tracking is enabled for
350   // the module.
351   AU.addRequired<AssignmentTrackingAnalysis>();
352   AU.addPreserved<AssignmentTrackingAnalysis>();
353   if (OptLevel != CodeGenOpt::None)
354     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
355   MachineFunctionPass::getAnalysisUsage(AU);
356 }
357 
358 static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
359                                          MachineModuleInfo &MMI) {
360   // Only needed for MSVC
361   if (!TT.isWindowsMSVCEnvironment())
362     return;
363 
364   // If it's already set, nothing to do.
365   if (MMI.usesMSVCFloatingPoint())
366     return;
367 
368   for (const Instruction &I : instructions(F)) {
369     if (I.getType()->isFPOrFPVectorTy()) {
370       MMI.setUsesMSVCFloatingPoint(true);
371       return;
372     }
373     for (const auto &Op : I.operands()) {
374       if (Op->getType()->isFPOrFPVectorTy()) {
375         MMI.setUsesMSVCFloatingPoint(true);
376         return;
377       }
378     }
379   }
380 }
381 
382 bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
383   // If we already selected that function, we do not need to run SDISel.
384   if (mf.getProperties().hasProperty(
385           MachineFunctionProperties::Property::Selected))
386     return false;
387   // Do some sanity-checking on the command-line options.
388   assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
389          "-fast-isel-abort > 0 requires -fast-isel");
390 
391   const Function &Fn = mf.getFunction();
392   MF = &mf;
393 
394   // Decide what flavour of variable location debug-info will be used, before
395   // we change the optimisation level.
396   bool InstrRef = mf.shouldUseDebugInstrRef();
397   mf.setUseDebugInstrRef(InstrRef);
398 
399   // Reset the target options before resetting the optimization
400   // level below.
401   // FIXME: This is a horrible hack and should be processed via
402   // codegen looking at the optimization level explicitly when
403   // it wants to look at it.
404   TM.resetTargetOptions(Fn);
405   // Reset OptLevel to None for optnone functions.
406   CodeGenOpt::Level NewOptLevel = OptLevel;
407   if (OptLevel != CodeGenOpt::None && skipFunction(Fn))
408     NewOptLevel = CodeGenOpt::None;
409   OptLevelChanger OLC(*this, NewOptLevel);
410 
411   TII = MF->getSubtarget().getInstrInfo();
412   TLI = MF->getSubtarget().getTargetLowering();
413   RegInfo = &MF->getRegInfo();
414   LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(Fn);
415   GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : nullptr;
416   ORE = std::make_unique<OptimizationRemarkEmitter>(&Fn);
417   AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(mf.getFunction());
418   auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
419   BlockFrequencyInfo *BFI = nullptr;
420   if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOpt::None)
421     BFI = &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
422 
423   FunctionVarLocs const *FnVarLocs = nullptr;
424   if (isAssignmentTrackingEnabled(*Fn.getParent()))
425     FnVarLocs = getAnalysis<AssignmentTrackingAnalysis>().getResults();
426 
427   LLVM_DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
428 
429   UniformityInfo *UA = nullptr;
430   if (auto *UAPass = getAnalysisIfAvailable<UniformityInfoWrapperPass>())
431     UA = &UAPass->getUniformityInfo();
432   CurDAG->init(*MF, *ORE, this, LibInfo, UA, PSI, BFI, FnVarLocs);
433   FuncInfo->set(Fn, *MF, CurDAG);
434   SwiftError->setFunction(*MF);
435 
436   // Now get the optional analyzes if we want to.
437   // This is based on the possibly changed OptLevel (after optnone is taken
438   // into account).  That's unfortunate but OK because it just means we won't
439   // ask for passes that have been required anyway.
440 
441   if (UseMBPI && OptLevel != CodeGenOpt::None)
442     FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
443   else
444     FuncInfo->BPI = nullptr;
445 
446   if (OptLevel != CodeGenOpt::None)
447     AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
448   else
449     AA = nullptr;
450 
451   SDB->init(GFI, AA, AC, LibInfo);
452 
453   MF->setHasInlineAsm(false);
454 
455   FuncInfo->SplitCSR = false;
456 
457   // We split CSR if the target supports it for the given function
458   // and the function has only return exits.
459   if (OptLevel != CodeGenOpt::None && TLI->supportSplitCSR(MF)) {
460     FuncInfo->SplitCSR = true;
461 
462     // Collect all the return blocks.
463     for (const BasicBlock &BB : Fn) {
464       if (!succ_empty(&BB))
465         continue;
466 
467       const Instruction *Term = BB.getTerminator();
468       if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term))
469         continue;
470 
471       // Bail out if the exit block is not Return nor Unreachable.
472       FuncInfo->SplitCSR = false;
473       break;
474     }
475   }
476 
477   MachineBasicBlock *EntryMBB = &MF->front();
478   if (FuncInfo->SplitCSR)
479     // This performs initialization so lowering for SplitCSR will be correct.
480     TLI->initializeSplitCSR(EntryMBB);
481 
482   SelectAllBasicBlocks(Fn);
483   if (FastISelFailed && EnableFastISelFallbackReport) {
484     DiagnosticInfoISelFallback DiagFallback(Fn);
485     Fn.getContext().diagnose(DiagFallback);
486   }
487 
488   // Replace forward-declared registers with the registers containing
489   // the desired value.
490   // Note: it is important that this happens **before** the call to
491   // EmitLiveInCopies, since implementations can skip copies of unused
492   // registers. If we don't apply the reg fixups before, some registers may
493   // appear as unused and will be skipped, resulting in bad MI.
494   MachineRegisterInfo &MRI = MF->getRegInfo();
495   for (DenseMap<Register, Register>::iterator I = FuncInfo->RegFixups.begin(),
496                                               E = FuncInfo->RegFixups.end();
497        I != E; ++I) {
498     Register From = I->first;
499     Register To = I->second;
500     // If To is also scheduled to be replaced, find what its ultimate
501     // replacement is.
502     while (true) {
503       DenseMap<Register, Register>::iterator J = FuncInfo->RegFixups.find(To);
504       if (J == E)
505         break;
506       To = J->second;
507     }
508     // Make sure the new register has a sufficiently constrained register class.
509     if (From.isVirtual() && To.isVirtual())
510       MRI.constrainRegClass(To, MRI.getRegClass(From));
511     // Replace it.
512 
513     // Replacing one register with another won't touch the kill flags.
514     // We need to conservatively clear the kill flags as a kill on the old
515     // register might dominate existing uses of the new register.
516     if (!MRI.use_empty(To))
517       MRI.clearKillFlags(From);
518     MRI.replaceRegWith(From, To);
519   }
520 
521   // If the first basic block in the function has live ins that need to be
522   // copied into vregs, emit the copies into the top of the block before
523   // emitting the code for the block.
524   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
525   RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII);
526 
527   // Insert copies in the entry block and the return blocks.
528   if (FuncInfo->SplitCSR) {
529     SmallVector<MachineBasicBlock*, 4> Returns;
530     // Collect all the return blocks.
531     for (MachineBasicBlock &MBB : mf) {
532       if (!MBB.succ_empty())
533         continue;
534 
535       MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
536       if (Term != MBB.end() && Term->isReturn()) {
537         Returns.push_back(&MBB);
538         continue;
539       }
540     }
541     TLI->insertCopiesSplitCSR(EntryMBB, Returns);
542   }
543 
544   DenseMap<unsigned, unsigned> LiveInMap;
545   if (!FuncInfo->ArgDbgValues.empty())
546     for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
547       if (LI.second)
548         LiveInMap.insert(LI);
549 
550   // Insert DBG_VALUE instructions for function arguments to the entry block.
551   for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
552     MachineInstr *MI = FuncInfo->ArgDbgValues[e - i - 1];
553     assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
554            "Function parameters should not be described by DBG_VALUE_LIST.");
555     bool hasFI = MI->getDebugOperand(0).isFI();
556     Register Reg =
557         hasFI ? TRI.getFrameRegister(*MF) : MI->getDebugOperand(0).getReg();
558     if (Reg.isPhysical())
559       EntryMBB->insert(EntryMBB->begin(), MI);
560     else {
561       MachineInstr *Def = RegInfo->getVRegDef(Reg);
562       if (Def) {
563         MachineBasicBlock::iterator InsertPos = Def;
564         // FIXME: VR def may not be in entry block.
565         Def->getParent()->insert(std::next(InsertPos), MI);
566       } else
567         LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
568                           << Register::virtReg2Index(Reg) << "\n");
569     }
570 
571     // Don't try and extend through copies in instruction referencing mode.
572     if (InstrRef)
573       continue;
574 
575     // If Reg is live-in then update debug info to track its copy in a vreg.
576     DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
577     if (LDI != LiveInMap.end()) {
578       assert(!hasFI && "There's no handling of frame pointer updating here yet "
579                        "- add if needed");
580       MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
581       MachineBasicBlock::iterator InsertPos = Def;
582       const MDNode *Variable = MI->getDebugVariable();
583       const MDNode *Expr = MI->getDebugExpression();
584       DebugLoc DL = MI->getDebugLoc();
585       bool IsIndirect = MI->isIndirectDebugValue();
586       if (IsIndirect)
587         assert(MI->getDebugOffset().getImm() == 0 &&
588                "DBG_VALUE with nonzero offset");
589       assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
590              "Expected inlined-at fields to agree");
591       assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
592              "Didn't expect to see a DBG_VALUE_LIST here");
593       // Def is never a terminator here, so it is ok to increment InsertPos.
594       BuildMI(*EntryMBB, ++InsertPos, DL, TII->get(TargetOpcode::DBG_VALUE),
595               IsIndirect, LDI->second, Variable, Expr);
596 
597       // If this vreg is directly copied into an exported register then
598       // that COPY instructions also need DBG_VALUE, if it is the only
599       // user of LDI->second.
600       MachineInstr *CopyUseMI = nullptr;
601       for (MachineRegisterInfo::use_instr_iterator
602            UI = RegInfo->use_instr_begin(LDI->second),
603            E = RegInfo->use_instr_end(); UI != E; ) {
604         MachineInstr *UseMI = &*(UI++);
605         if (UseMI->isDebugValue()) continue;
606         if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
607           CopyUseMI = UseMI; continue;
608         }
609         // Otherwise this is another use or second copy use.
610         CopyUseMI = nullptr; break;
611       }
612       if (CopyUseMI &&
613           TRI.getRegSizeInBits(LDI->second, MRI) ==
614               TRI.getRegSizeInBits(CopyUseMI->getOperand(0).getReg(), MRI)) {
615         // Use MI's debug location, which describes where Variable was
616         // declared, rather than whatever is attached to CopyUseMI.
617         MachineInstr *NewMI =
618             BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
619                     CopyUseMI->getOperand(0).getReg(), Variable, Expr);
620         MachineBasicBlock::iterator Pos = CopyUseMI;
621         EntryMBB->insertAfter(Pos, NewMI);
622       }
623     }
624   }
625 
626   // For debug-info, in instruction referencing mode, we need to perform some
627   // post-isel maintenence.
628   if (MF->useDebugInstrRef())
629     MF->finalizeDebugInstrRefs();
630 
631   // Determine if there are any calls in this machine function.
632   MachineFrameInfo &MFI = MF->getFrameInfo();
633   for (const auto &MBB : *MF) {
634     if (MFI.hasCalls() && MF->hasInlineAsm())
635       break;
636 
637     for (const auto &MI : MBB) {
638       const MCInstrDesc &MCID = TII->get(MI.getOpcode());
639       if ((MCID.isCall() && !MCID.isReturn()) ||
640           MI.isStackAligningInlineAsm()) {
641         MFI.setHasCalls(true);
642       }
643       if (MI.isInlineAsm()) {
644         MF->setHasInlineAsm(true);
645       }
646     }
647   }
648 
649   // Determine if there is a call to setjmp in the machine function.
650   MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
651 
652   // Determine if floating point is used for msvc
653   computeUsesMSVCFloatingPoint(TM.getTargetTriple(), Fn, MF->getMMI());
654 
655   // Release function-specific state. SDB and CurDAG are already cleared
656   // at this point.
657   FuncInfo->clear();
658 
659   LLVM_DEBUG(dbgs() << "*** MachineFunction at end of ISel ***\n");
660   LLVM_DEBUG(MF->print(dbgs()));
661 
662   return true;
663 }
664 
665 static void reportFastISelFailure(MachineFunction &MF,
666                                   OptimizationRemarkEmitter &ORE,
667                                   OptimizationRemarkMissed &R,
668                                   bool ShouldAbort) {
669   // Print the function name explicitly if we don't have a debug location (which
670   // makes the diagnostic less useful) or if we're going to emit a raw error.
671   if (!R.getLocation().isValid() || ShouldAbort)
672     R << (" (in function: " + MF.getName() + ")").str();
673 
674   if (ShouldAbort)
675     report_fatal_error(Twine(R.getMsg()));
676 
677   ORE.emit(R);
678   LLVM_DEBUG(dbgs() << R.getMsg() << "\n");
679 }
680 
681 void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
682                                         BasicBlock::const_iterator End,
683                                         bool &HadTailCall) {
684   // Allow creating illegal types during DAG building for the basic block.
685   CurDAG->NewNodesMustHaveLegalTypes = false;
686 
687   // Lower the instructions. If a call is emitted as a tail call, cease emitting
688   // nodes for this block.
689   for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
690     if (!ElidedArgCopyInstrs.count(&*I))
691       SDB->visit(*I);
692   }
693 
694   // Make sure the root of the DAG is up-to-date.
695   CurDAG->setRoot(SDB->getControlRoot());
696   HadTailCall = SDB->HasTailCall;
697   SDB->resolveOrClearDbgInfo();
698   SDB->clear();
699 
700   // Final step, emit the lowered DAG as machine code.
701   CodeGenAndEmitDAG();
702 }
703 
704 void SelectionDAGISel::ComputeLiveOutVRegInfo() {
705   SmallPtrSet<SDNode *, 16> Added;
706   SmallVector<SDNode*, 128> Worklist;
707 
708   Worklist.push_back(CurDAG->getRoot().getNode());
709   Added.insert(CurDAG->getRoot().getNode());
710 
711   KnownBits Known;
712 
713   do {
714     SDNode *N = Worklist.pop_back_val();
715 
716     // Otherwise, add all chain operands to the worklist.
717     for (const SDValue &Op : N->op_values())
718       if (Op.getValueType() == MVT::Other && Added.insert(Op.getNode()).second)
719         Worklist.push_back(Op.getNode());
720 
721     // If this is a CopyToReg with a vreg dest, process it.
722     if (N->getOpcode() != ISD::CopyToReg)
723       continue;
724 
725     unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
726     if (!Register::isVirtualRegister(DestReg))
727       continue;
728 
729     // Ignore non-integer values.
730     SDValue Src = N->getOperand(2);
731     EVT SrcVT = Src.getValueType();
732     if (!SrcVT.isInteger())
733       continue;
734 
735     unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
736     Known = CurDAG->computeKnownBits(Src);
737     FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, Known);
738   } while (!Worklist.empty());
739 }
740 
741 void SelectionDAGISel::CodeGenAndEmitDAG() {
742   StringRef GroupName = "sdag";
743   StringRef GroupDescription = "Instruction Selection and Scheduling";
744   std::string BlockName;
745   bool MatchFilterBB = false; (void)MatchFilterBB;
746 #ifndef NDEBUG
747   TargetTransformInfo &TTI =
748       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*FuncInfo->Fn);
749 #endif
750 
751   // Pre-type legalization allow creation of any node types.
752   CurDAG->NewNodesMustHaveLegalTypes = false;
753 
754 #ifndef NDEBUG
755   MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
756                    FilterDAGBasicBlockName ==
757                        FuncInfo->MBB->getBasicBlock()->getName());
758 #endif
759 #ifdef NDEBUG
760   if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewDAGCombineLT ||
761       ViewLegalizeDAGs || ViewDAGCombine2 || ViewISelDAGs || ViewSchedDAGs ||
762       ViewSUnitDAGs)
763 #endif
764   {
765     BlockName =
766         (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
767   }
768   LLVM_DEBUG(dbgs() << "Initial selection DAG: "
769                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
770                     << "'\n";
771              CurDAG->dump());
772 
773 #ifndef NDEBUG
774   if (TTI.hasBranchDivergence())
775     CurDAG->VerifyDAGDivergence();
776 #endif
777 
778   if (ViewDAGCombine1 && MatchFilterBB)
779     CurDAG->viewGraph("dag-combine1 input for " + BlockName);
780 
781   // Run the DAG combiner in pre-legalize mode.
782   {
783     NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
784                        GroupDescription, TimePassesIsEnabled);
785     CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel);
786   }
787 
788   LLVM_DEBUG(dbgs() << "Optimized lowered selection DAG: "
789                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
790                     << "'\n";
791              CurDAG->dump());
792 
793 #ifndef NDEBUG
794   if (TTI.hasBranchDivergence())
795     CurDAG->VerifyDAGDivergence();
796 #endif
797 
798   // Second step, hack on the DAG until it only uses operations and types that
799   // the target supports.
800   if (ViewLegalizeTypesDAGs && MatchFilterBB)
801     CurDAG->viewGraph("legalize-types input for " + BlockName);
802 
803   bool Changed;
804   {
805     NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
806                        GroupDescription, TimePassesIsEnabled);
807     Changed = CurDAG->LegalizeTypes();
808   }
809 
810   LLVM_DEBUG(dbgs() << "Type-legalized selection DAG: "
811                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
812                     << "'\n";
813              CurDAG->dump());
814 
815 #ifndef NDEBUG
816   if (TTI.hasBranchDivergence())
817     CurDAG->VerifyDAGDivergence();
818 #endif
819 
820   // Only allow creation of legal node types.
821   CurDAG->NewNodesMustHaveLegalTypes = true;
822 
823   if (Changed) {
824     if (ViewDAGCombineLT && MatchFilterBB)
825       CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
826 
827     // Run the DAG combiner in post-type-legalize mode.
828     {
829       NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
830                          GroupName, GroupDescription, TimePassesIsEnabled);
831       CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel);
832     }
833 
834     LLVM_DEBUG(dbgs() << "Optimized type-legalized selection DAG: "
835                       << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
836                       << "'\n";
837                CurDAG->dump());
838 
839 #ifndef NDEBUG
840     if (TTI.hasBranchDivergence())
841       CurDAG->VerifyDAGDivergence();
842 #endif
843   }
844 
845   {
846     NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
847                        GroupDescription, TimePassesIsEnabled);
848     Changed = CurDAG->LegalizeVectors();
849   }
850 
851   if (Changed) {
852     LLVM_DEBUG(dbgs() << "Vector-legalized selection DAG: "
853                       << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
854                       << "'\n";
855                CurDAG->dump());
856 
857 #ifndef NDEBUG
858     if (TTI.hasBranchDivergence())
859       CurDAG->VerifyDAGDivergence();
860 #endif
861 
862     {
863       NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
864                          GroupDescription, TimePassesIsEnabled);
865       CurDAG->LegalizeTypes();
866     }
867 
868     LLVM_DEBUG(dbgs() << "Vector/type-legalized selection DAG: "
869                       << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
870                       << "'\n";
871                CurDAG->dump());
872 
873 #ifndef NDEBUG
874     if (TTI.hasBranchDivergence())
875       CurDAG->VerifyDAGDivergence();
876 #endif
877 
878     if (ViewDAGCombineLT && MatchFilterBB)
879       CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
880 
881     // Run the DAG combiner in post-type-legalize mode.
882     {
883       NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
884                          GroupName, GroupDescription, TimePassesIsEnabled);
885       CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel);
886     }
887 
888     LLVM_DEBUG(dbgs() << "Optimized vector-legalized selection DAG: "
889                       << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
890                       << "'\n";
891                CurDAG->dump());
892 
893 #ifndef NDEBUG
894     if (TTI.hasBranchDivergence())
895       CurDAG->VerifyDAGDivergence();
896 #endif
897   }
898 
899   if (ViewLegalizeDAGs && MatchFilterBB)
900     CurDAG->viewGraph("legalize input for " + BlockName);
901 
902   {
903     NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
904                        GroupDescription, TimePassesIsEnabled);
905     CurDAG->Legalize();
906   }
907 
908   LLVM_DEBUG(dbgs() << "Legalized selection DAG: "
909                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
910                     << "'\n";
911              CurDAG->dump());
912 
913 #ifndef NDEBUG
914   if (TTI.hasBranchDivergence())
915     CurDAG->VerifyDAGDivergence();
916 #endif
917 
918   if (ViewDAGCombine2 && MatchFilterBB)
919     CurDAG->viewGraph("dag-combine2 input for " + BlockName);
920 
921   // Run the DAG combiner in post-legalize mode.
922   {
923     NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
924                        GroupDescription, TimePassesIsEnabled);
925     CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel);
926   }
927 
928   LLVM_DEBUG(dbgs() << "Optimized legalized selection DAG: "
929                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
930                     << "'\n";
931              CurDAG->dump());
932 
933 #ifndef NDEBUG
934   if (TTI.hasBranchDivergence())
935     CurDAG->VerifyDAGDivergence();
936 #endif
937 
938   if (OptLevel != CodeGenOpt::None)
939     ComputeLiveOutVRegInfo();
940 
941   if (ViewISelDAGs && MatchFilterBB)
942     CurDAG->viewGraph("isel input for " + BlockName);
943 
944   // Third, instruction select all of the operations to machine code, adding the
945   // code to the MachineBasicBlock.
946   {
947     NamedRegionTimer T("isel", "Instruction Selection", GroupName,
948                        GroupDescription, TimePassesIsEnabled);
949     DoInstructionSelection();
950   }
951 
952   LLVM_DEBUG(dbgs() << "Selected selection DAG: "
953                     << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
954                     << "'\n";
955              CurDAG->dump());
956 
957   if (ViewSchedDAGs && MatchFilterBB)
958     CurDAG->viewGraph("scheduler input for " + BlockName);
959 
960   // Schedule machine code.
961   ScheduleDAGSDNodes *Scheduler = CreateScheduler();
962   {
963     NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
964                        GroupDescription, TimePassesIsEnabled);
965     Scheduler->Run(CurDAG, FuncInfo->MBB);
966   }
967 
968   if (ViewSUnitDAGs && MatchFilterBB)
969     Scheduler->viewGraph();
970 
971   // Emit machine code to BB.  This can change 'BB' to the last block being
972   // inserted into.
973   MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
974   {
975     NamedRegionTimer T("emit", "Instruction Creation", GroupName,
976                        GroupDescription, TimePassesIsEnabled);
977 
978     // FuncInfo->InsertPt is passed by reference and set to the end of the
979     // scheduled instructions.
980     LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
981   }
982 
983   // If the block was split, make sure we update any references that are used to
984   // update PHI nodes later on.
985   if (FirstMBB != LastMBB)
986     SDB->UpdateSplitBlock(FirstMBB, LastMBB);
987 
988   // Free the scheduler state.
989   {
990     NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
991                        GroupDescription, TimePassesIsEnabled);
992     delete Scheduler;
993   }
994 
995   // Free the SelectionDAG state, now that we're finished with it.
996   CurDAG->clear();
997 }
998 
999 namespace {
1000 
1001 /// ISelUpdater - helper class to handle updates of the instruction selection
1002 /// graph.
1003 class ISelUpdater : public SelectionDAG::DAGUpdateListener {
1004   SelectionDAG::allnodes_iterator &ISelPosition;
1005 
1006 public:
1007   ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
1008     : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
1009 
1010   /// NodeDeleted - Handle nodes deleted from the graph. If the node being
1011   /// deleted is the current ISelPosition node, update ISelPosition.
1012   ///
1013   void NodeDeleted(SDNode *N, SDNode *E) override {
1014     if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1015       ++ISelPosition;
1016   }
1017 
1018   /// NodeInserted - Handle new nodes inserted into the graph: propagate
1019   /// metadata from root nodes that also applies to new nodes, in case the root
1020   /// is later deleted.
1021   void NodeInserted(SDNode *N) override {
1022     SDNode *CurNode = &*ISelPosition;
1023     if (MDNode *MD = DAG.getPCSections(CurNode))
1024       DAG.addPCSections(N, MD);
1025   }
1026 };
1027 
1028 } // end anonymous namespace
1029 
1030 // This function is used to enforce the topological node id property
1031 // leveraged during instruction selection. Before the selection process all
1032 // nodes are given a non-negative id such that all nodes have a greater id than
1033 // their operands. As this holds transitively we can prune checks that a node N
1034 // is a predecessor of M another by not recursively checking through M's
1035 // operands if N's ID is larger than M's ID. This significantly improves
1036 // performance of various legality checks (e.g. IsLegalToFold / UpdateChains).
1037 
1038 // However, when we fuse multiple nodes into a single node during the
1039 // selection we may induce a predecessor relationship between inputs and
1040 // outputs of distinct nodes being merged, violating the topological property.
1041 // Should a fused node have a successor which has yet to be selected,
1042 // our legality checks would be incorrect. To avoid this we mark all unselected
1043 // successor nodes, i.e. id != -1, as invalid for pruning by bit-negating (x =>
1044 // (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1045 // We use bit-negation to more clearly enforce that node id -1 can only be
1046 // achieved by selected nodes. As the conversion is reversable to the original
1047 // Id, topological pruning can still be leveraged when looking for unselected
1048 // nodes. This method is called internally in all ISel replacement related
1049 // functions.
1050 void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) {
1051   SmallVector<SDNode *, 4> Nodes;
1052   Nodes.push_back(Node);
1053 
1054   while (!Nodes.empty()) {
1055     SDNode *N = Nodes.pop_back_val();
1056     for (auto *U : N->uses()) {
1057       auto UId = U->getNodeId();
1058       if (UId > 0) {
1059         InvalidateNodeId(U);
1060         Nodes.push_back(U);
1061       }
1062     }
1063   }
1064 }
1065 
1066 // InvalidateNodeId - As explained in EnforceNodeIdInvariant, mark a
1067 // NodeId with the equivalent node id which is invalid for topological
1068 // pruning.
1069 void SelectionDAGISel::InvalidateNodeId(SDNode *N) {
1070   int InvalidId = -(N->getNodeId() + 1);
1071   N->setNodeId(InvalidId);
1072 }
1073 
1074 // getUninvalidatedNodeId - get original uninvalidated node id.
1075 int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) {
1076   int Id = N->getNodeId();
1077   if (Id < -1)
1078     return -(Id + 1);
1079   return Id;
1080 }
1081 
1082 void SelectionDAGISel::DoInstructionSelection() {
1083   LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1084                     << printMBBReference(*FuncInfo->MBB) << " '"
1085                     << FuncInfo->MBB->getName() << "'\n");
1086 
1087   PreprocessISelDAG();
1088 
1089   // Select target instructions for the DAG.
1090   {
1091     // Number all nodes with a topological order and set DAGSize.
1092     DAGSize = CurDAG->AssignTopologicalOrder();
1093 
1094     // Create a dummy node (which is not added to allnodes), that adds
1095     // a reference to the root node, preventing it from being deleted,
1096     // and tracking any changes of the root.
1097     HandleSDNode Dummy(CurDAG->getRoot());
1098     SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
1099     ++ISelPosition;
1100 
1101     // Make sure that ISelPosition gets properly updated when nodes are deleted
1102     // in calls made from this function. New nodes inherit relevant metadata.
1103     ISelUpdater ISU(*CurDAG, ISelPosition);
1104 
1105     // The AllNodes list is now topological-sorted. Visit the
1106     // nodes by starting at the end of the list (the root of the
1107     // graph) and preceding back toward the beginning (the entry
1108     // node).
1109     while (ISelPosition != CurDAG->allnodes_begin()) {
1110       SDNode *Node = &*--ISelPosition;
1111       // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1112       // but there are currently some corner cases that it misses. Also, this
1113       // makes it theoretically possible to disable the DAGCombiner.
1114       if (Node->use_empty())
1115         continue;
1116 
1117 #ifndef NDEBUG
1118       SmallVector<SDNode *, 4> Nodes;
1119       Nodes.push_back(Node);
1120 
1121       while (!Nodes.empty()) {
1122         auto N = Nodes.pop_back_val();
1123         if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1124           continue;
1125         for (const SDValue &Op : N->op_values()) {
1126           if (Op->getOpcode() == ISD::TokenFactor)
1127             Nodes.push_back(Op.getNode());
1128           else {
1129             // We rely on topological ordering of node ids for checking for
1130             // cycles when fusing nodes during selection. All unselected nodes
1131             // successors of an already selected node should have a negative id.
1132             // This assertion will catch such cases. If this assertion triggers
1133             // it is likely you using DAG-level Value/Node replacement functions
1134             // (versus equivalent ISEL replacement) in backend-specific
1135             // selections. See comment in EnforceNodeIdInvariant for more
1136             // details.
1137             assert(Op->getNodeId() != -1 &&
1138                    "Node has already selected predecessor node");
1139           }
1140         }
1141       }
1142 #endif
1143 
1144       // When we are using non-default rounding modes or FP exception behavior
1145       // FP operations are represented by StrictFP pseudo-operations.  For
1146       // targets that do not (yet) understand strict FP operations directly,
1147       // we convert them to normal FP opcodes instead at this point.  This
1148       // will allow them to be handled by existing target-specific instruction
1149       // selectors.
1150       if (!TLI->isStrictFPEnabled() && Node->isStrictFPOpcode()) {
1151         // For some opcodes, we need to call TLI->getOperationAction using
1152         // the first operand type instead of the result type.  Note that this
1153         // must match what SelectionDAGLegalize::LegalizeOp is doing.
1154         EVT ActionVT;
1155         switch (Node->getOpcode()) {
1156         case ISD::STRICT_SINT_TO_FP:
1157         case ISD::STRICT_UINT_TO_FP:
1158         case ISD::STRICT_LRINT:
1159         case ISD::STRICT_LLRINT:
1160         case ISD::STRICT_LROUND:
1161         case ISD::STRICT_LLROUND:
1162         case ISD::STRICT_FSETCC:
1163         case ISD::STRICT_FSETCCS:
1164           ActionVT = Node->getOperand(1).getValueType();
1165           break;
1166         default:
1167           ActionVT = Node->getValueType(0);
1168           break;
1169         }
1170         if (TLI->getOperationAction(Node->getOpcode(), ActionVT)
1171             == TargetLowering::Expand)
1172           Node = CurDAG->mutateStrictFPToFP(Node);
1173       }
1174 
1175       LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1176                  Node->dump(CurDAG));
1177 
1178       Select(Node);
1179     }
1180 
1181     CurDAG->setRoot(Dummy.getValue());
1182   }
1183 
1184   LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1185 
1186   PostprocessISelDAG();
1187 }
1188 
1189 static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
1190   for (const User *U : CPI->users()) {
1191     if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(U)) {
1192       Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1193       if (IID == Intrinsic::eh_exceptionpointer ||
1194           IID == Intrinsic::eh_exceptioncode)
1195         return true;
1196     }
1197   }
1198   return false;
1199 }
1200 
1201 // wasm.landingpad.index intrinsic is for associating a landing pad index number
1202 // with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1203 // and store the mapping in the function.
1204 static void mapWasmLandingPadIndex(MachineBasicBlock *MBB,
1205                                    const CatchPadInst *CPI) {
1206   MachineFunction *MF = MBB->getParent();
1207   // In case of single catch (...), we don't emit LSDA, so we don't need
1208   // this information.
1209   bool IsSingleCatchAllClause =
1210       CPI->arg_size() == 1 &&
1211       cast<Constant>(CPI->getArgOperand(0))->isNullValue();
1212   // cathchpads for longjmp use an empty type list, e.g. catchpad within %0 []
1213   // and they don't need LSDA info
1214   bool IsCatchLongjmp = CPI->arg_size() == 0;
1215   if (!IsSingleCatchAllClause && !IsCatchLongjmp) {
1216     // Create a mapping from landing pad label to landing pad index.
1217     bool IntrFound = false;
1218     for (const User *U : CPI->users()) {
1219       if (const auto *Call = dyn_cast<IntrinsicInst>(U)) {
1220         Intrinsic::ID IID = Call->getIntrinsicID();
1221         if (IID == Intrinsic::wasm_landingpad_index) {
1222           Value *IndexArg = Call->getArgOperand(1);
1223           int Index = cast<ConstantInt>(IndexArg)->getZExtValue();
1224           MF->setWasmLandingPadIndex(MBB, Index);
1225           IntrFound = true;
1226           break;
1227         }
1228       }
1229     }
1230     assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1231     (void)IntrFound;
1232   }
1233 }
1234 
1235 /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1236 /// do other setup for EH landing-pad blocks.
1237 bool SelectionDAGISel::PrepareEHLandingPad() {
1238   MachineBasicBlock *MBB = FuncInfo->MBB;
1239   const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1240   const BasicBlock *LLVMBB = MBB->getBasicBlock();
1241   const TargetRegisterClass *PtrRC =
1242       TLI->getRegClassFor(TLI->getPointerTy(CurDAG->getDataLayout()));
1243 
1244   auto Pers = classifyEHPersonality(PersonalityFn);
1245 
1246   // Catchpads have one live-in register, which typically holds the exception
1247   // pointer or code.
1248   if (isFuncletEHPersonality(Pers)) {
1249     if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI())) {
1250       if (hasExceptionPointerOrCodeUser(CPI)) {
1251         // Get or create the virtual register to hold the pointer or code.  Mark
1252         // the live in physreg and copy into the vreg.
1253         MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1254         assert(EHPhysReg && "target lacks exception pointer register");
1255         MBB->addLiveIn(EHPhysReg);
1256         unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, PtrRC);
1257         BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(),
1258                 TII->get(TargetOpcode::COPY), VReg)
1259             .addReg(EHPhysReg, RegState::Kill);
1260       }
1261     }
1262     return true;
1263   }
1264 
1265   // Add a label to mark the beginning of the landing pad.  Deletion of the
1266   // landing pad can thus be detected via the MachineModuleInfo.
1267   MCSymbol *Label = MF->addLandingPad(MBB);
1268 
1269   const MCInstrDesc &II = TII->get(TargetOpcode::EH_LABEL);
1270   BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
1271     .addSym(Label);
1272 
1273   // If the unwinder does not preserve all registers, ensure that the
1274   // function marks the clobbered registers as used.
1275   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
1276   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
1277     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
1278 
1279   if (Pers == EHPersonality::Wasm_CXX) {
1280     if (const auto *CPI = dyn_cast<CatchPadInst>(LLVMBB->getFirstNonPHI()))
1281       mapWasmLandingPadIndex(MBB, CPI);
1282   } else {
1283     // Assign the call site to the landing pad's begin label.
1284     MF->setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
1285     // Mark exception register as live in.
1286     if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1287       FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);
1288     // Mark exception selector register as live in.
1289     if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1290       FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(Reg, PtrRC);
1291   }
1292 
1293   return true;
1294 }
1295 
1296 // Mark and Report IPToState for each Block under IsEHa
1297 void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) {
1298   MachineModuleInfo &MMI = MF->getMMI();
1299   llvm::WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo();
1300   if (!EHInfo)
1301     return;
1302   for (auto MBBI = MF->begin(), E = MF->end(); MBBI != E; ++MBBI) {
1303     MachineBasicBlock *MBB = &*MBBI;
1304     const BasicBlock *BB = MBB->getBasicBlock();
1305     int State = EHInfo->BlockToStateMap[BB];
1306     if (BB->getFirstMayFaultInst()) {
1307       // Report IP range only for blocks with Faulty inst
1308       auto MBBb = MBB->getFirstNonPHI();
1309       MachineInstr *MIb = &*MBBb;
1310       if (MIb->isTerminator())
1311         continue;
1312 
1313       // Insert EH Labels
1314       MCSymbol *BeginLabel = MMI.getContext().createTempSymbol();
1315       MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
1316       EHInfo->addIPToStateRange(State, BeginLabel, EndLabel);
1317       BuildMI(*MBB, MBBb, SDB->getCurDebugLoc(),
1318               TII->get(TargetOpcode::EH_LABEL))
1319           .addSym(BeginLabel);
1320       auto MBBe = MBB->instr_end();
1321       MachineInstr *MIe = &*(--MBBe);
1322       // insert before (possible multiple) terminators
1323       while (MIe->isTerminator())
1324         MIe = &*(--MBBe);
1325       ++MBBe;
1326       BuildMI(*MBB, MBBe, SDB->getCurDebugLoc(),
1327               TII->get(TargetOpcode::EH_LABEL))
1328           .addSym(EndLabel);
1329     }
1330   }
1331 }
1332 
1333 /// isFoldedOrDeadInstruction - Return true if the specified instruction is
1334 /// side-effect free and is either dead or folded into a generated instruction.
1335 /// Return false if it needs to be emitted.
1336 static bool isFoldedOrDeadInstruction(const Instruction *I,
1337                                       const FunctionLoweringInfo &FuncInfo) {
1338   return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1339          !I->isTerminator() &&     // Terminators aren't folded.
1340          !isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
1341          !I->isEHPad() &&             // EH pad instructions aren't folded.
1342          !FuncInfo.isExportedInst(I); // Exported instrs must be computed.
1343 }
1344 
1345 static bool processIfEntryValueDbgDeclare(FunctionLoweringInfo &FuncInfo,
1346                                           const Value *Arg, DIExpression *Expr,
1347                                           DILocalVariable *Var,
1348                                           DebugLoc DbgLoc) {
1349   if (!Expr->isEntryValue() || !isa<Argument>(Arg))
1350     return false;
1351 
1352   auto ArgIt = FuncInfo.ValueMap.find(Arg);
1353   if (ArgIt == FuncInfo.ValueMap.end())
1354     return false;
1355   Register ArgVReg = ArgIt->getSecond();
1356 
1357   // Find the corresponding livein physical register to this argument.
1358   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1359     if (VirtReg == ArgVReg) {
1360       FuncInfo.MF->setVariableDbgInfo(Var, Expr, PhysReg, DbgLoc);
1361       LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1362                         << ", Expr=" << *Expr << ",  MCRegister=" << PhysReg
1363                         << ", DbgLoc=" << DbgLoc << "\n");
1364       return true;
1365     }
1366   return false;
1367 }
1368 
1369 static bool processDbgDeclare(FunctionLoweringInfo &FuncInfo,
1370                               const Value *Address, DIExpression *Expr,
1371                               DILocalVariable *Var, DebugLoc DbgLoc) {
1372   if (!Address) {
1373     LLVM_DEBUG(dbgs() << "processDbgDeclares skipping " << *Var
1374                       << " (bad address)\n");
1375     return false;
1376   }
1377 
1378   if (processIfEntryValueDbgDeclare(FuncInfo, Address, Expr, Var, DbgLoc))
1379     return true;
1380 
1381   MachineFunction *MF = FuncInfo.MF;
1382   const DataLayout &DL = MF->getDataLayout();
1383 
1384   assert(Var && "Missing variable");
1385   assert(DbgLoc && "Missing location");
1386 
1387   // Look through casts and constant offset GEPs. These mostly come from
1388   // inalloca.
1389   APInt Offset(DL.getTypeSizeInBits(Address->getType()), 0);
1390   Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1391 
1392   // Check if the variable is a static alloca or a byval or inalloca
1393   // argument passed in memory. If it is not, then we will ignore this
1394   // intrinsic and handle this during isel like dbg.value.
1395   int FI = std::numeric_limits<int>::max();
1396   if (const auto *AI = dyn_cast<AllocaInst>(Address)) {
1397     auto SI = FuncInfo.StaticAllocaMap.find(AI);
1398     if (SI != FuncInfo.StaticAllocaMap.end())
1399       FI = SI->second;
1400   } else if (const auto *Arg = dyn_cast<Argument>(Address))
1401     FI = FuncInfo.getArgumentFrameIndex(Arg);
1402 
1403   if (FI == std::numeric_limits<int>::max())
1404     return false;
1405 
1406   if (Offset.getBoolValue())
1407     Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset,
1408                                  Offset.getZExtValue());
1409 
1410   LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1411                     << ", Expr=" << *Expr << ",  FI=" << FI
1412                     << ", DbgLoc=" << DbgLoc << "\n");
1413   MF->setVariableDbgInfo(Var, Expr, FI, DbgLoc);
1414   return true;
1415 }
1416 
1417 /// Collect llvm.dbg.declare information. This is done after argument lowering
1418 /// in case the declarations refer to arguments.
1419 static void processDbgDeclares(FunctionLoweringInfo &FuncInfo) {
1420   for (const auto &I : instructions(*FuncInfo.Fn)) {
1421     const auto *DI = dyn_cast<DbgDeclareInst>(&I);
1422     if (DI && processDbgDeclare(FuncInfo, DI->getAddress(), DI->getExpression(),
1423                                 DI->getVariable(), DI->getDebugLoc()))
1424       FuncInfo.PreprocessedDbgDeclares.insert(DI);
1425   }
1426 }
1427 
1428 /// Collect single location variable information generated with assignment
1429 /// tracking. This is done after argument lowering in case the declarations
1430 /// refer to arguments.
1431 static void processSingleLocVars(FunctionLoweringInfo &FuncInfo,
1432                                  FunctionVarLocs const *FnVarLocs) {
1433   for (auto It = FnVarLocs->single_locs_begin(),
1434             End = FnVarLocs->single_locs_end();
1435        It != End; ++It) {
1436     assert(!It->Values.hasArgList() && "Single loc variadic ops not supported");
1437     processDbgDeclare(FuncInfo, It->Values.getVariableLocationOp(0), It->Expr,
1438                       FnVarLocs->getDILocalVariable(It->VariableID), It->DL);
1439   }
1440 }
1441 
1442 void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1443   FastISelFailed = false;
1444   // Initialize the Fast-ISel state, if needed.
1445   FastISel *FastIS = nullptr;
1446   if (TM.Options.EnableFastISel) {
1447     LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1448     FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1449   }
1450 
1451   ReversePostOrderTraversal<const Function*> RPOT(&Fn);
1452 
1453   // Lower arguments up front. An RPO iteration always visits the entry block
1454   // first.
1455   assert(*RPOT.begin() == &Fn.getEntryBlock());
1456   ++NumEntryBlocks;
1457 
1458   // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1459   FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
1460   FuncInfo->InsertPt = FuncInfo->MBB->begin();
1461 
1462   CurDAG->setFunctionLoweringInfo(FuncInfo.get());
1463 
1464   if (!FastIS) {
1465     LowerArguments(Fn);
1466   } else {
1467     // See if fast isel can lower the arguments.
1468     FastIS->startNewBlock();
1469     if (!FastIS->lowerArguments()) {
1470       FastISelFailed = true;
1471       // Fast isel failed to lower these arguments
1472       ++NumFastIselFailLowerArguments;
1473 
1474       OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1475                                  Fn.getSubprogram(),
1476                                  &Fn.getEntryBlock());
1477       R << "FastISel didn't lower all arguments: "
1478         << ore::NV("Prototype", Fn.getFunctionType());
1479       reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 1);
1480 
1481       // Use SelectionDAG argument lowering
1482       LowerArguments(Fn);
1483       CurDAG->setRoot(SDB->getControlRoot());
1484       SDB->clear();
1485       CodeGenAndEmitDAG();
1486     }
1487 
1488     // If we inserted any instructions at the beginning, make a note of
1489     // where they are, so we can be sure to emit subsequent instructions
1490     // after them.
1491     if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1492       FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1493     else
1494       FastIS->setLastLocalValue(nullptr);
1495   }
1496 
1497   bool Inserted = SwiftError->createEntriesInEntryBlock(SDB->getCurDebugLoc());
1498 
1499   if (FastIS && Inserted)
1500     FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt));
1501 
1502   if (isAssignmentTrackingEnabled(*Fn.getParent())) {
1503     assert(CurDAG->getFunctionVarLocs() &&
1504            "expected AssignmentTrackingAnalysis pass results");
1505     processSingleLocVars(*FuncInfo, CurDAG->getFunctionVarLocs());
1506   } else {
1507     processDbgDeclares(*FuncInfo);
1508   }
1509 
1510   // Iterate over all basic blocks in the function.
1511   StackProtector &SP = getAnalysis<StackProtector>();
1512   for (const BasicBlock *LLVMBB : RPOT) {
1513     if (OptLevel != CodeGenOpt::None) {
1514       bool AllPredsVisited = true;
1515       for (const BasicBlock *Pred : predecessors(LLVMBB)) {
1516         if (!FuncInfo->VisitedBBs.count(Pred)) {
1517           AllPredsVisited = false;
1518           break;
1519         }
1520       }
1521 
1522       if (AllPredsVisited) {
1523         for (const PHINode &PN : LLVMBB->phis())
1524           FuncInfo->ComputePHILiveOutRegInfo(&PN);
1525       } else {
1526         for (const PHINode &PN : LLVMBB->phis())
1527           FuncInfo->InvalidatePHILiveOutRegInfo(&PN);
1528       }
1529 
1530       FuncInfo->VisitedBBs.insert(LLVMBB);
1531     }
1532 
1533     BasicBlock::const_iterator const Begin =
1534         LLVMBB->getFirstNonPHI()->getIterator();
1535     BasicBlock::const_iterator const End = LLVMBB->end();
1536     BasicBlock::const_iterator BI = End;
1537 
1538     FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1539     if (!FuncInfo->MBB)
1540       continue; // Some blocks like catchpads have no code or MBB.
1541 
1542     // Insert new instructions after any phi or argument setup code.
1543     FuncInfo->InsertPt = FuncInfo->MBB->end();
1544 
1545     // Setup an EH landing-pad block.
1546     FuncInfo->ExceptionPointerVirtReg = 0;
1547     FuncInfo->ExceptionSelectorVirtReg = 0;
1548     if (LLVMBB->isEHPad())
1549       if (!PrepareEHLandingPad())
1550         continue;
1551 
1552     // Before doing SelectionDAG ISel, see if FastISel has been requested.
1553     if (FastIS) {
1554       if (LLVMBB != &Fn.getEntryBlock())
1555         FastIS->startNewBlock();
1556 
1557       unsigned NumFastIselRemaining = std::distance(Begin, End);
1558 
1559       // Pre-assign swifterror vregs.
1560       SwiftError->preassignVRegs(FuncInfo->MBB, Begin, End);
1561 
1562       // Do FastISel on as many instructions as possible.
1563       for (; BI != Begin; --BI) {
1564         const Instruction *Inst = &*std::prev(BI);
1565 
1566         // If we no longer require this instruction, skip it.
1567         if (isFoldedOrDeadInstruction(Inst, *FuncInfo) ||
1568             ElidedArgCopyInstrs.count(Inst)) {
1569           --NumFastIselRemaining;
1570           continue;
1571         }
1572 
1573         // Bottom-up: reset the insert pos at the top, after any local-value
1574         // instructions.
1575         FastIS->recomputeInsertPt();
1576 
1577         // Try to select the instruction with FastISel.
1578         if (FastIS->selectInstruction(Inst)) {
1579           --NumFastIselRemaining;
1580           ++NumFastIselSuccess;
1581           // If fast isel succeeded, skip over all the folded instructions, and
1582           // then see if there is a load right before the selected instructions.
1583           // Try to fold the load if so.
1584           const Instruction *BeforeInst = Inst;
1585           while (BeforeInst != &*Begin) {
1586             BeforeInst = &*std::prev(BasicBlock::const_iterator(BeforeInst));
1587             if (!isFoldedOrDeadInstruction(BeforeInst, *FuncInfo))
1588               break;
1589           }
1590           if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
1591               BeforeInst->hasOneUse() &&
1592               FastIS->tryToFoldLoad(cast<LoadInst>(BeforeInst), Inst)) {
1593             // If we succeeded, don't re-select the load.
1594             LLVM_DEBUG(dbgs()
1595                        << "FastISel folded load: " << *BeforeInst << "\n");
1596             BI = std::next(BasicBlock::const_iterator(BeforeInst));
1597             --NumFastIselRemaining;
1598             ++NumFastIselSuccess;
1599           }
1600           continue;
1601         }
1602 
1603         FastISelFailed = true;
1604 
1605         // Then handle certain instructions as single-LLVM-Instruction blocks.
1606         // We cannot separate out GCrelocates to their own blocks since we need
1607         // to keep track of gc-relocates for a particular gc-statepoint. This is
1608         // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1609         // visitGCRelocate.
1610         if (isa<CallInst>(Inst) && !isa<GCStatepointInst>(Inst) &&
1611             !isa<GCRelocateInst>(Inst) && !isa<GCResultInst>(Inst)) {
1612           OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1613                                      Inst->getDebugLoc(), LLVMBB);
1614 
1615           R << "FastISel missed call";
1616 
1617           if (R.isEnabled() || EnableFastISelAbort) {
1618             std::string InstStrStorage;
1619             raw_string_ostream InstStr(InstStrStorage);
1620             InstStr << *Inst;
1621 
1622             R << ": " << InstStr.str();
1623           }
1624 
1625           reportFastISelFailure(*MF, *ORE, R, EnableFastISelAbort > 2);
1626 
1627           if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1628               !Inst->use_empty()) {
1629             Register &R = FuncInfo->ValueMap[Inst];
1630             if (!R)
1631               R = FuncInfo->CreateRegs(Inst);
1632           }
1633 
1634           bool HadTailCall = false;
1635           MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
1636           SelectBasicBlock(Inst->getIterator(), BI, HadTailCall);
1637 
1638           // If the call was emitted as a tail call, we're done with the block.
1639           // We also need to delete any previously emitted instructions.
1640           if (HadTailCall) {
1641             FastIS->removeDeadCode(SavedInsertPt, FuncInfo->MBB->end());
1642             --BI;
1643             break;
1644           }
1645 
1646           // Recompute NumFastIselRemaining as Selection DAG instruction
1647           // selection may have handled the call, input args, etc.
1648           unsigned RemainingNow = std::distance(Begin, BI);
1649           NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1650           NumFastIselRemaining = RemainingNow;
1651           continue;
1652         }
1653 
1654         OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1655                                    Inst->getDebugLoc(), LLVMBB);
1656 
1657         bool ShouldAbort = EnableFastISelAbort;
1658         if (Inst->isTerminator()) {
1659           // Use a different message for terminator misses.
1660           R << "FastISel missed terminator";
1661           // Don't abort for terminator unless the level is really high
1662           ShouldAbort = (EnableFastISelAbort > 2);
1663         } else {
1664           R << "FastISel missed";
1665         }
1666 
1667         if (R.isEnabled() || EnableFastISelAbort) {
1668           std::string InstStrStorage;
1669           raw_string_ostream InstStr(InstStrStorage);
1670           InstStr << *Inst;
1671           R << ": " << InstStr.str();
1672         }
1673 
1674         reportFastISelFailure(*MF, *ORE, R, ShouldAbort);
1675 
1676         NumFastIselFailures += NumFastIselRemaining;
1677         break;
1678       }
1679 
1680       FastIS->recomputeInsertPt();
1681     }
1682 
1683     if (SP.shouldEmitSDCheck(*LLVMBB)) {
1684       bool FunctionBasedInstrumentation =
1685           TLI->getSSPStackGuardCheck(*Fn.getParent());
1686       SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB],
1687                                    FunctionBasedInstrumentation);
1688     }
1689 
1690     if (Begin != BI)
1691       ++NumDAGBlocks;
1692     else
1693       ++NumFastIselBlocks;
1694 
1695     if (Begin != BI) {
1696       // Run SelectionDAG instruction selection on the remainder of the block
1697       // not handled by FastISel. If FastISel is not run, this is the entire
1698       // block.
1699       bool HadTailCall;
1700       SelectBasicBlock(Begin, BI, HadTailCall);
1701 
1702       // But if FastISel was run, we already selected some of the block.
1703       // If we emitted a tail-call, we need to delete any previously emitted
1704       // instruction that follows it.
1705       if (FastIS && HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1706         FastIS->removeDeadCode(FuncInfo->InsertPt, FuncInfo->MBB->end());
1707     }
1708 
1709     if (FastIS)
1710       FastIS->finishBasicBlock();
1711     FinishBasicBlock();
1712     FuncInfo->PHINodesToUpdate.clear();
1713     ElidedArgCopyInstrs.clear();
1714   }
1715 
1716   // AsynchEH: Report Block State under -AsynchEH
1717   if (Fn.getParent()->getModuleFlag("eh-asynch"))
1718     reportIPToStateForBlocks(MF);
1719 
1720   SP.copyToMachineFrameInfo(MF->getFrameInfo());
1721 
1722   SwiftError->propagateVRegs();
1723 
1724   delete FastIS;
1725   SDB->clearDanglingDebugInfo();
1726   SDB->SPDescriptor.resetPerFunctionState();
1727 }
1728 
1729 void
1730 SelectionDAGISel::FinishBasicBlock() {
1731   LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1732                     << FuncInfo->PHINodesToUpdate.size() << "\n";
1733              for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1734                   ++i) dbgs()
1735              << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1736              << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1737 
1738   // Next, now that we know what the last MBB the LLVM BB expanded is, update
1739   // PHI nodes in successors.
1740   for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1741     MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
1742     assert(PHI->isPHI() &&
1743            "This is not a machine PHI node that we are updating!");
1744     if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
1745       continue;
1746     PHI.addReg(FuncInfo->PHINodesToUpdate[i].second).addMBB(FuncInfo->MBB);
1747   }
1748 
1749   // Handle stack protector.
1750   if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1751     // The target provides a guard check function. There is no need to
1752     // generate error handling code or to split current basic block.
1753     MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1754 
1755     // Add load and check to the basicblock.
1756     FuncInfo->MBB = ParentMBB;
1757     FuncInfo->InsertPt =
1758         findSplitPointForStackProtector(ParentMBB, *TII);
1759     SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
1760     CurDAG->setRoot(SDB->getRoot());
1761     SDB->clear();
1762     CodeGenAndEmitDAG();
1763 
1764     // Clear the Per-BB State.
1765     SDB->SPDescriptor.resetPerBBState();
1766   } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1767     MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1768     MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1769 
1770     // Find the split point to split the parent mbb. At the same time copy all
1771     // physical registers used in the tail of parent mbb into virtual registers
1772     // before the split point and back into physical registers after the split
1773     // point. This prevents us needing to deal with Live-ins and many other
1774     // register allocation issues caused by us splitting the parent mbb. The
1775     // register allocator will clean up said virtual copies later on.
1776     MachineBasicBlock::iterator SplitPoint =
1777         findSplitPointForStackProtector(ParentMBB, *TII);
1778 
1779     // Splice the terminator of ParentMBB into SuccessMBB.
1780     SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
1781                        SplitPoint,
1782                        ParentMBB->end());
1783 
1784     // Add compare/jump on neq/jump to the parent BB.
1785     FuncInfo->MBB = ParentMBB;
1786     FuncInfo->InsertPt = ParentMBB->end();
1787     SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
1788     CurDAG->setRoot(SDB->getRoot());
1789     SDB->clear();
1790     CodeGenAndEmitDAG();
1791 
1792     // CodeGen Failure MBB if we have not codegened it yet.
1793     MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1794     if (FailureMBB->empty()) {
1795       FuncInfo->MBB = FailureMBB;
1796       FuncInfo->InsertPt = FailureMBB->end();
1797       SDB->visitSPDescriptorFailure(SDB->SPDescriptor);
1798       CurDAG->setRoot(SDB->getRoot());
1799       SDB->clear();
1800       CodeGenAndEmitDAG();
1801     }
1802 
1803     // Clear the Per-BB State.
1804     SDB->SPDescriptor.resetPerBBState();
1805   }
1806 
1807   // Lower each BitTestBlock.
1808   for (auto &BTB : SDB->SL->BitTestCases) {
1809     // Lower header first, if it wasn't already lowered
1810     if (!BTB.Emitted) {
1811       // Set the current basic block to the mbb we wish to insert the code into
1812       FuncInfo->MBB = BTB.Parent;
1813       FuncInfo->InsertPt = FuncInfo->MBB->end();
1814       // Emit the code
1815       SDB->visitBitTestHeader(BTB, FuncInfo->MBB);
1816       CurDAG->setRoot(SDB->getRoot());
1817       SDB->clear();
1818       CodeGenAndEmitDAG();
1819     }
1820 
1821     BranchProbability UnhandledProb = BTB.Prob;
1822     for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1823       UnhandledProb -= BTB.Cases[j].ExtraProb;
1824       // Set the current basic block to the mbb we wish to insert the code into
1825       FuncInfo->MBB = BTB.Cases[j].ThisBB;
1826       FuncInfo->InsertPt = FuncInfo->MBB->end();
1827       // Emit the code
1828 
1829       // If all cases cover a contiguous range, it is not necessary to jump to
1830       // the default block after the last bit test fails. This is because the
1831       // range check during bit test header creation has guaranteed that every
1832       // case here doesn't go outside the range. In this case, there is no need
1833       // to perform the last bit test, as it will always be true. Instead, make
1834       // the second-to-last bit-test fall through to the target of the last bit
1835       // test, and delete the last bit test.
1836 
1837       MachineBasicBlock *NextMBB;
1838       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1839         // Second-to-last bit-test with contiguous range or omitted range
1840         // check: fall through to the target of the final bit test.
1841         NextMBB = BTB.Cases[j + 1].TargetBB;
1842       } else if (j + 1 == ej) {
1843         // For the last bit test, fall through to Default.
1844         NextMBB = BTB.Default;
1845       } else {
1846         // Otherwise, fall through to the next bit test.
1847         NextMBB = BTB.Cases[j + 1].ThisBB;
1848       }
1849 
1850       SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
1851                             FuncInfo->MBB);
1852 
1853       CurDAG->setRoot(SDB->getRoot());
1854       SDB->clear();
1855       CodeGenAndEmitDAG();
1856 
1857       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1858         // Since we're not going to use the final bit test, remove it.
1859         BTB.Cases.pop_back();
1860         break;
1861       }
1862     }
1863 
1864     // Update PHI Nodes
1865     for (const std::pair<MachineInstr *, unsigned> &P :
1866          FuncInfo->PHINodesToUpdate) {
1867       MachineInstrBuilder PHI(*MF, P.first);
1868       MachineBasicBlock *PHIBB = PHI->getParent();
1869       assert(PHI->isPHI() &&
1870              "This is not a machine PHI node that we are updating!");
1871       // This is "default" BB. We have two jumps to it. From "header" BB and
1872       // from last "case" BB, unless the latter was skipped.
1873       if (PHIBB == BTB.Default) {
1874         PHI.addReg(P.second).addMBB(BTB.Parent);
1875         if (!BTB.ContiguousRange) {
1876           PHI.addReg(P.second).addMBB(BTB.Cases.back().ThisBB);
1877          }
1878       }
1879       // One of "cases" BB.
1880       for (const SwitchCG::BitTestCase &BT : BTB.Cases) {
1881         MachineBasicBlock* cBB = BT.ThisBB;
1882         if (cBB->isSuccessor(PHIBB))
1883           PHI.addReg(P.second).addMBB(cBB);
1884       }
1885     }
1886   }
1887   SDB->SL->BitTestCases.clear();
1888 
1889   // If the JumpTable record is filled in, then we need to emit a jump table.
1890   // Updating the PHI nodes is tricky in this case, since we need to determine
1891   // whether the PHI is a successor of the range check MBB or the jump table MBB
1892   for (unsigned i = 0, e = SDB->SL->JTCases.size(); i != e; ++i) {
1893     // Lower header first, if it wasn't already lowered
1894     if (!SDB->SL->JTCases[i].first.Emitted) {
1895       // Set the current basic block to the mbb we wish to insert the code into
1896       FuncInfo->MBB = SDB->SL->JTCases[i].first.HeaderBB;
1897       FuncInfo->InsertPt = FuncInfo->MBB->end();
1898       // Emit the code
1899       SDB->visitJumpTableHeader(SDB->SL->JTCases[i].second,
1900                                 SDB->SL->JTCases[i].first, FuncInfo->MBB);
1901       CurDAG->setRoot(SDB->getRoot());
1902       SDB->clear();
1903       CodeGenAndEmitDAG();
1904     }
1905 
1906     // Set the current basic block to the mbb we wish to insert the code into
1907     FuncInfo->MBB = SDB->SL->JTCases[i].second.MBB;
1908     FuncInfo->InsertPt = FuncInfo->MBB->end();
1909     // Emit the code
1910     SDB->visitJumpTable(SDB->SL->JTCases[i].second);
1911     CurDAG->setRoot(SDB->getRoot());
1912     SDB->clear();
1913     CodeGenAndEmitDAG();
1914 
1915     // Update PHI Nodes
1916     for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1917          pi != pe; ++pi) {
1918       MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
1919       MachineBasicBlock *PHIBB = PHI->getParent();
1920       assert(PHI->isPHI() &&
1921              "This is not a machine PHI node that we are updating!");
1922       // "default" BB. We can go there only from header BB.
1923       if (PHIBB == SDB->SL->JTCases[i].second.Default)
1924         PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second)
1925            .addMBB(SDB->SL->JTCases[i].first.HeaderBB);
1926       // JT BB. Just iterate over successors here
1927       if (FuncInfo->MBB->isSuccessor(PHIBB))
1928         PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(FuncInfo->MBB);
1929     }
1930   }
1931   SDB->SL->JTCases.clear();
1932 
1933   // If we generated any switch lowering information, build and codegen any
1934   // additional DAGs necessary.
1935   for (unsigned i = 0, e = SDB->SL->SwitchCases.size(); i != e; ++i) {
1936     // Set the current basic block to the mbb we wish to insert the code into
1937     FuncInfo->MBB = SDB->SL->SwitchCases[i].ThisBB;
1938     FuncInfo->InsertPt = FuncInfo->MBB->end();
1939 
1940     // Determine the unique successors.
1941     SmallVector<MachineBasicBlock *, 2> Succs;
1942     Succs.push_back(SDB->SL->SwitchCases[i].TrueBB);
1943     if (SDB->SL->SwitchCases[i].TrueBB != SDB->SL->SwitchCases[i].FalseBB)
1944       Succs.push_back(SDB->SL->SwitchCases[i].FalseBB);
1945 
1946     // Emit the code. Note that this could result in FuncInfo->MBB being split.
1947     SDB->visitSwitchCase(SDB->SL->SwitchCases[i], FuncInfo->MBB);
1948     CurDAG->setRoot(SDB->getRoot());
1949     SDB->clear();
1950     CodeGenAndEmitDAG();
1951 
1952     // Remember the last block, now that any splitting is done, for use in
1953     // populating PHI nodes in successors.
1954     MachineBasicBlock *ThisBB = FuncInfo->MBB;
1955 
1956     // Handle any PHI nodes in successors of this chunk, as if we were coming
1957     // from the original BB before switch expansion.  Note that PHI nodes can
1958     // occur multiple times in PHINodesToUpdate.  We have to be very careful to
1959     // handle them the right number of times.
1960     for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
1961       FuncInfo->MBB = Succs[i];
1962       FuncInfo->InsertPt = FuncInfo->MBB->end();
1963       // FuncInfo->MBB may have been removed from the CFG if a branch was
1964       // constant folded.
1965       if (ThisBB->isSuccessor(FuncInfo->MBB)) {
1966         for (MachineBasicBlock::iterator
1967              MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
1968              MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
1969           MachineInstrBuilder PHI(*MF, MBBI);
1970           // This value for this PHI node is recorded in PHINodesToUpdate.
1971           for (unsigned pn = 0; ; ++pn) {
1972             assert(pn != FuncInfo->PHINodesToUpdate.size() &&
1973                    "Didn't find PHI entry!");
1974             if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
1975               PHI.addReg(FuncInfo->PHINodesToUpdate[pn].second).addMBB(ThisBB);
1976               break;
1977             }
1978           }
1979         }
1980       }
1981     }
1982   }
1983   SDB->SL->SwitchCases.clear();
1984 }
1985 
1986 /// Create the scheduler. If a specific scheduler was specified
1987 /// via the SchedulerRegistry, use it, otherwise select the
1988 /// one preferred by the target.
1989 ///
1990 ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
1991   return ISHeuristic(this, OptLevel);
1992 }
1993 
1994 //===----------------------------------------------------------------------===//
1995 // Helper functions used by the generated instruction selector.
1996 //===----------------------------------------------------------------------===//
1997 // Calls to these methods are generated by tblgen.
1998 
1999 /// CheckAndMask - The isel is trying to match something like (and X, 255).  If
2000 /// the dag combiner simplified the 255, we still want to match.  RHS is the
2001 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2002 /// specified in the .td file (e.g. 255).
2003 bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
2004                                     int64_t DesiredMaskS) const {
2005   const APInt &ActualMask = RHS->getAPIntValue();
2006   const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2007 
2008   // If the actual mask exactly matches, success!
2009   if (ActualMask == DesiredMask)
2010     return true;
2011 
2012   // If the actual AND mask is allowing unallowed bits, this doesn't match.
2013   if (!ActualMask.isSubsetOf(DesiredMask))
2014     return false;
2015 
2016   // Otherwise, the DAG Combiner may have proven that the value coming in is
2017   // either already zero or is not demanded.  Check for known zero input bits.
2018   APInt NeededMask = DesiredMask & ~ActualMask;
2019   if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
2020     return true;
2021 
2022   // TODO: check to see if missing bits are just not demanded.
2023 
2024   // Otherwise, this pattern doesn't match.
2025   return false;
2026 }
2027 
2028 /// CheckOrMask - The isel is trying to match something like (or X, 255).  If
2029 /// the dag combiner simplified the 255, we still want to match.  RHS is the
2030 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2031 /// specified in the .td file (e.g. 255).
2032 bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
2033                                    int64_t DesiredMaskS) const {
2034   const APInt &ActualMask = RHS->getAPIntValue();
2035   const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2036 
2037   // If the actual mask exactly matches, success!
2038   if (ActualMask == DesiredMask)
2039     return true;
2040 
2041   // If the actual AND mask is allowing unallowed bits, this doesn't match.
2042   if (!ActualMask.isSubsetOf(DesiredMask))
2043     return false;
2044 
2045   // Otherwise, the DAG Combiner may have proven that the value coming in is
2046   // either already zero or is not demanded.  Check for known zero input bits.
2047   APInt NeededMask = DesiredMask & ~ActualMask;
2048   KnownBits Known = CurDAG->computeKnownBits(LHS);
2049 
2050   // If all the missing bits in the or are already known to be set, match!
2051   if (NeededMask.isSubsetOf(Known.One))
2052     return true;
2053 
2054   // TODO: check to see if missing bits are just not demanded.
2055 
2056   // Otherwise, this pattern doesn't match.
2057   return false;
2058 }
2059 
2060 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2061 /// by tblgen.  Others should not call it.
2062 void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
2063                                                      const SDLoc &DL) {
2064   std::vector<SDValue> InOps;
2065   std::swap(InOps, Ops);
2066 
2067   Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
2068   Ops.push_back(InOps[InlineAsm::Op_AsmString]);  // 1
2069   Ops.push_back(InOps[InlineAsm::Op_MDNode]);     // 2, !srcloc
2070   Ops.push_back(InOps[InlineAsm::Op_ExtraInfo]);  // 3 (SideEffect, AlignStack)
2071 
2072   unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2073   if (InOps[e-1].getValueType() == MVT::Glue)
2074     --e;  // Don't process a glue operand if it is here.
2075 
2076   while (i != e) {
2077     unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
2078     if (!InlineAsm::isMemKind(Flags) && !InlineAsm::isFuncKind(Flags)) {
2079       // Just skip over this operand, copying the operands verbatim.
2080       Ops.insert(Ops.end(), InOps.begin()+i,
2081                  InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
2082       i += InlineAsm::getNumOperandRegisters(Flags) + 1;
2083     } else {
2084       assert(InlineAsm::getNumOperandRegisters(Flags) == 1 &&
2085              "Memory operand with multiple values?");
2086 
2087       unsigned TiedToOperand;
2088       if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) {
2089         // We need the constraint ID from the operand this is tied to.
2090         unsigned CurOp = InlineAsm::Op_FirstOperand;
2091         Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2092         for (; TiedToOperand; --TiedToOperand) {
2093           CurOp += InlineAsm::getNumOperandRegisters(Flags)+1;
2094           Flags = cast<ConstantSDNode>(InOps[CurOp])->getZExtValue();
2095         }
2096       }
2097 
2098       // Otherwise, this is a memory operand.  Ask the target to select it.
2099       std::vector<SDValue> SelOps;
2100       unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
2101       if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
2102         report_fatal_error("Could not match memory address.  Inline asm"
2103                            " failure!");
2104 
2105       // Add this to the output node.
2106       unsigned NewFlags =
2107           InlineAsm::isMemKind(Flags)
2108               ? InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size())
2109               : InlineAsm::getFlagWord(InlineAsm::Kind_Func, SelOps.size());
2110       NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
2111       Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
2112       llvm::append_range(Ops, SelOps);
2113       i += 2;
2114     }
2115   }
2116 
2117   // Add the glue input back if present.
2118   if (e != InOps.size())
2119     Ops.push_back(InOps.back());
2120 }
2121 
2122 /// findGlueUse - Return use of MVT::Glue value produced by the specified
2123 /// SDNode.
2124 ///
2125 static SDNode *findGlueUse(SDNode *N) {
2126   unsigned FlagResNo = N->getNumValues()-1;
2127   for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2128     SDUse &Use = I.getUse();
2129     if (Use.getResNo() == FlagResNo)
2130       return Use.getUser();
2131   }
2132   return nullptr;
2133 }
2134 
2135 /// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2136 /// beyond "ImmedUse".  We may ignore chains as they are checked separately.
2137 static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2138                           bool IgnoreChains) {
2139   SmallPtrSet<const SDNode *, 16> Visited;
2140   SmallVector<const SDNode *, 16> WorkList;
2141   // Only check if we have non-immediate uses of Def.
2142   if (ImmedUse->isOnlyUserOf(Def))
2143     return false;
2144 
2145   // We don't care about paths to Def that go through ImmedUse so mark it
2146   // visited and mark non-def operands as used.
2147   Visited.insert(ImmedUse);
2148   for (const SDValue &Op : ImmedUse->op_values()) {
2149     SDNode *N = Op.getNode();
2150     // Ignore chain deps (they are validated by
2151     // HandleMergeInputChains) and immediate uses
2152     if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2153       continue;
2154     if (!Visited.insert(N).second)
2155       continue;
2156     WorkList.push_back(N);
2157   }
2158 
2159   // Initialize worklist to operands of Root.
2160   if (Root != ImmedUse) {
2161     for (const SDValue &Op : Root->op_values()) {
2162       SDNode *N = Op.getNode();
2163       // Ignore chains (they are validated by HandleMergeInputChains)
2164       if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2165         continue;
2166       if (!Visited.insert(N).second)
2167         continue;
2168       WorkList.push_back(N);
2169     }
2170   }
2171 
2172   return SDNode::hasPredecessorHelper(Def, Visited, WorkList, 0, true);
2173 }
2174 
2175 /// IsProfitableToFold - Returns true if it's profitable to fold the specific
2176 /// operand node N of U during instruction selection that starts at Root.
2177 bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
2178                                           SDNode *Root) const {
2179   if (OptLevel == CodeGenOpt::None) return false;
2180   return N.hasOneUse();
2181 }
2182 
2183 /// IsLegalToFold - Returns true if the specific operand node N of
2184 /// U can be folded during instruction selection that starts at Root.
2185 bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
2186                                      CodeGenOpt::Level OptLevel,
2187                                      bool IgnoreChains) {
2188   if (OptLevel == CodeGenOpt::None) return false;
2189 
2190   // If Root use can somehow reach N through a path that that doesn't contain
2191   // U then folding N would create a cycle. e.g. In the following
2192   // diagram, Root can reach N through X. If N is folded into Root, then
2193   // X is both a predecessor and a successor of U.
2194   //
2195   //          [N*]           //
2196   //         ^   ^           //
2197   //        /     \          //
2198   //      [U*]    [X]?       //
2199   //        ^     ^          //
2200   //         \   /           //
2201   //          \ /            //
2202   //         [Root*]         //
2203   //
2204   // * indicates nodes to be folded together.
2205   //
2206   // If Root produces glue, then it gets (even more) interesting. Since it
2207   // will be "glued" together with its glue use in the scheduler, we need to
2208   // check if it might reach N.
2209   //
2210   //          [N*]           //
2211   //         ^   ^           //
2212   //        /     \          //
2213   //      [U*]    [X]?       //
2214   //        ^       ^        //
2215   //         \       \       //
2216   //          \      |       //
2217   //         [Root*] |       //
2218   //          ^      |       //
2219   //          f      |       //
2220   //          |      /       //
2221   //         [Y]    /        //
2222   //           ^   /         //
2223   //           f  /          //
2224   //           | /           //
2225   //          [GU]           //
2226   //
2227   // If GU (glue use) indirectly reaches N (the load), and Root folds N
2228   // (call it Fold), then X is a predecessor of GU and a successor of
2229   // Fold. But since Fold and GU are glued together, this will create
2230   // a cycle in the scheduling graph.
2231 
2232   // If the node has glue, walk down the graph to the "lowest" node in the
2233   // glueged set.
2234   EVT VT = Root->getValueType(Root->getNumValues()-1);
2235   while (VT == MVT::Glue) {
2236     SDNode *GU = findGlueUse(Root);
2237     if (!GU)
2238       break;
2239     Root = GU;
2240     VT = Root->getValueType(Root->getNumValues()-1);
2241 
2242     // If our query node has a glue result with a use, we've walked up it.  If
2243     // the user (which has already been selected) has a chain or indirectly uses
2244     // the chain, HandleMergeInputChains will not consider it.  Because of
2245     // this, we cannot ignore chains in this predicate.
2246     IgnoreChains = false;
2247   }
2248 
2249   return !findNonImmUse(Root, N.getNode(), U, IgnoreChains);
2250 }
2251 
2252 void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2253   SDLoc DL(N);
2254 
2255   std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2256   SelectInlineAsmMemoryOperands(Ops, DL);
2257 
2258   const EVT VTs[] = {MVT::Other, MVT::Glue};
2259   SDValue New = CurDAG->getNode(N->getOpcode(), DL, VTs, Ops);
2260   New->setNodeId(-1);
2261   ReplaceUses(N, New.getNode());
2262   CurDAG->RemoveDeadNode(N);
2263 }
2264 
2265 void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2266   SDLoc dl(Op);
2267   MDNodeSDNode *MD = cast<MDNodeSDNode>(Op->getOperand(1));
2268   const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
2269 
2270   EVT VT = Op->getValueType(0);
2271   LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
2272   Register Reg =
2273       TLI->getRegisterByName(RegStr->getString().data(), Ty,
2274                              CurDAG->getMachineFunction());
2275   SDValue New = CurDAG->getCopyFromReg(
2276                         Op->getOperand(0), dl, Reg, Op->getValueType(0));
2277   New->setNodeId(-1);
2278   ReplaceUses(Op, New.getNode());
2279   CurDAG->RemoveDeadNode(Op);
2280 }
2281 
2282 void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2283   SDLoc dl(Op);
2284   MDNodeSDNode *MD = cast<MDNodeSDNode>(Op->getOperand(1));
2285   const MDString *RegStr = cast<MDString>(MD->getMD()->getOperand(0));
2286 
2287   EVT VT = Op->getOperand(2).getValueType();
2288   LLT Ty = VT.isSimple() ? getLLTForMVT(VT.getSimpleVT()) : LLT();
2289 
2290   Register Reg = TLI->getRegisterByName(RegStr->getString().data(), Ty,
2291                                         CurDAG->getMachineFunction());
2292   SDValue New = CurDAG->getCopyToReg(
2293                         Op->getOperand(0), dl, Reg, Op->getOperand(2));
2294   New->setNodeId(-1);
2295   ReplaceUses(Op, New.getNode());
2296   CurDAG->RemoveDeadNode(Op);
2297 }
2298 
2299 void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2300   CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2301 }
2302 
2303 void SelectionDAGISel::Select_FREEZE(SDNode *N) {
2304   // TODO: We don't have FREEZE pseudo-instruction in MachineInstr-level now.
2305   // If FREEZE instruction is added later, the code below must be changed as
2306   // well.
2307   CurDAG->SelectNodeTo(N, TargetOpcode::COPY, N->getValueType(0),
2308                        N->getOperand(0));
2309 }
2310 
2311 void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) {
2312   CurDAG->SelectNodeTo(N, TargetOpcode::ARITH_FENCE, N->getValueType(0),
2313                        N->getOperand(0));
2314 }
2315 
2316 void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
2317   CurDAG->SelectNodeTo(N, TargetOpcode::MEMBARRIER, N->getValueType(0),
2318                        N->getOperand(0));
2319 }
2320 
2321 void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
2322                                                 SDValue OpVal, SDLoc DL) {
2323   SDNode *OpNode = OpVal.getNode();
2324 
2325   // FrameIndex nodes should have been directly emitted to TargetFrameIndex
2326   // nodes at DAG-construction time.
2327   assert(OpNode->getOpcode() != ISD::FrameIndex);
2328 
2329   if (OpNode->getOpcode() == ISD::Constant) {
2330     Ops.push_back(
2331         CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
2332     Ops.push_back(
2333         CurDAG->getTargetConstant(cast<ConstantSDNode>(OpNode)->getZExtValue(),
2334                                   DL, OpVal.getValueType()));
2335   } else {
2336     Ops.push_back(OpVal);
2337   }
2338 }
2339 
2340 void SelectionDAGISel::Select_STACKMAP(SDNode *N) {
2341   SmallVector<SDValue, 32> Ops;
2342   auto *It = N->op_begin();
2343   SDLoc DL(N);
2344 
2345   // Stash the chain and glue operands so we can move them to the end.
2346   SDValue Chain = *It++;
2347   SDValue InGlue = *It++;
2348 
2349   // <id> operand.
2350   SDValue ID = *It++;
2351   assert(ID.getValueType() == MVT::i64);
2352   Ops.push_back(ID);
2353 
2354   // <numShadowBytes> operand.
2355   SDValue Shad = *It++;
2356   assert(Shad.getValueType() == MVT::i32);
2357   Ops.push_back(Shad);
2358 
2359   // Live variable operands.
2360   for (; It != N->op_end(); It++)
2361     pushStackMapLiveVariable(Ops, *It, DL);
2362 
2363   Ops.push_back(Chain);
2364   Ops.push_back(InGlue);
2365 
2366   SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Glue);
2367   CurDAG->SelectNodeTo(N, TargetOpcode::STACKMAP, NodeTys, Ops);
2368 }
2369 
2370 void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
2371   SmallVector<SDValue, 32> Ops;
2372   auto *It = N->op_begin();
2373   SDLoc DL(N);
2374 
2375   // Cache arguments that will be moved to the end in the target node.
2376   SDValue Chain = *It++;
2377   std::optional<SDValue> Glue;
2378   if (It->getValueType() == MVT::Glue)
2379     Glue = *It++;
2380   SDValue RegMask = *It++;
2381 
2382   // <id> operand.
2383   SDValue ID = *It++;
2384   assert(ID.getValueType() == MVT::i64);
2385   Ops.push_back(ID);
2386 
2387   // <numShadowBytes> operand.
2388   SDValue Shad = *It++;
2389   assert(Shad.getValueType() == MVT::i32);
2390   Ops.push_back(Shad);
2391 
2392   // Add the callee.
2393   Ops.push_back(*It++);
2394 
2395   // Add <numArgs>.
2396   SDValue NumArgs = *It++;
2397   assert(NumArgs.getValueType() == MVT::i32);
2398   Ops.push_back(NumArgs);
2399 
2400   // Calling convention.
2401   Ops.push_back(*It++);
2402 
2403   // Push the args for the call.
2404   for (uint64_t I = cast<ConstantSDNode>(NumArgs)->getZExtValue(); I != 0; I--)
2405     Ops.push_back(*It++);
2406 
2407   // Now push the live variables.
2408   for (; It != N->op_end(); It++)
2409     pushStackMapLiveVariable(Ops, *It, DL);
2410 
2411   // Finally, the regmask, chain and (if present) glue are moved to the end.
2412   Ops.push_back(RegMask);
2413   Ops.push_back(Chain);
2414   if (Glue.has_value())
2415     Ops.push_back(*Glue);
2416 
2417   SDVTList NodeTys = N->getVTList();
2418   CurDAG->SelectNodeTo(N, TargetOpcode::PATCHPOINT, NodeTys, Ops);
2419 }
2420 
2421 /// GetVBR - decode a vbr encoding whose top bit is set.
2422 LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
2423 GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2424   assert(Val >= 128 && "Not a VBR");
2425   Val &= 127;  // Remove first vbr bit.
2426 
2427   unsigned Shift = 7;
2428   uint64_t NextBits;
2429   do {
2430     NextBits = MatcherTable[Idx++];
2431     Val |= (NextBits&127) << Shift;
2432     Shift += 7;
2433   } while (NextBits & 128);
2434 
2435   return Val;
2436 }
2437 
2438 /// When a match is complete, this method updates uses of interior chain results
2439 /// to use the new results.
2440 void SelectionDAGISel::UpdateChains(
2441     SDNode *NodeToMatch, SDValue InputChain,
2442     SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2443   SmallVector<SDNode*, 4> NowDeadNodes;
2444 
2445   // Now that all the normal results are replaced, we replace the chain and
2446   // glue results if present.
2447   if (!ChainNodesMatched.empty()) {
2448     assert(InputChain.getNode() &&
2449            "Matched input chains but didn't produce a chain");
2450     // Loop over all of the nodes we matched that produced a chain result.
2451     // Replace all the chain results with the final chain we ended up with.
2452     for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2453       SDNode *ChainNode = ChainNodesMatched[i];
2454       // If ChainNode is null, it's because we replaced it on a previous
2455       // iteration and we cleared it out of the map. Just skip it.
2456       if (!ChainNode)
2457         continue;
2458 
2459       assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2460              "Deleted node left in chain");
2461 
2462       // Don't replace the results of the root node if we're doing a
2463       // MorphNodeTo.
2464       if (ChainNode == NodeToMatch && isMorphNodeTo)
2465         continue;
2466 
2467       SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2468       if (ChainVal.getValueType() == MVT::Glue)
2469         ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
2470       assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2471       SelectionDAG::DAGNodeDeletedListener NDL(
2472           *CurDAG, [&](SDNode *N, SDNode *E) {
2473             std::replace(ChainNodesMatched.begin(), ChainNodesMatched.end(), N,
2474                          static_cast<SDNode *>(nullptr));
2475           });
2476       if (ChainNode->getOpcode() != ISD::TokenFactor)
2477         ReplaceUses(ChainVal, InputChain);
2478 
2479       // If the node became dead and we haven't already seen it, delete it.
2480       if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2481           !llvm::is_contained(NowDeadNodes, ChainNode))
2482         NowDeadNodes.push_back(ChainNode);
2483     }
2484   }
2485 
2486   if (!NowDeadNodes.empty())
2487     CurDAG->RemoveDeadNodes(NowDeadNodes);
2488 
2489   LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2490 }
2491 
2492 /// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2493 /// operation for when the pattern matched at least one node with a chains.  The
2494 /// input vector contains a list of all of the chained nodes that we match.  We
2495 /// must determine if this is a valid thing to cover (i.e. matching it won't
2496 /// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2497 /// be used as the input node chain for the generated nodes.
2498 static SDValue
2499 HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
2500                        SelectionDAG *CurDAG) {
2501 
2502   SmallPtrSet<const SDNode *, 16> Visited;
2503   SmallVector<const SDNode *, 8> Worklist;
2504   SmallVector<SDValue, 3> InputChains;
2505   unsigned int Max = 8192;
2506 
2507   // Quick exit on trivial merge.
2508   if (ChainNodesMatched.size() == 1)
2509     return ChainNodesMatched[0]->getOperand(0);
2510 
2511   // Add chains that aren't already added (internal). Peek through
2512   // token factors.
2513   std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2514     if (V.getValueType() != MVT::Other)
2515       return;
2516     if (V->getOpcode() == ISD::EntryToken)
2517       return;
2518     if (!Visited.insert(V.getNode()).second)
2519       return;
2520     if (V->getOpcode() == ISD::TokenFactor) {
2521       for (const SDValue &Op : V->op_values())
2522         AddChains(Op);
2523     } else
2524       InputChains.push_back(V);
2525   };
2526 
2527   for (auto *N : ChainNodesMatched) {
2528     Worklist.push_back(N);
2529     Visited.insert(N);
2530   }
2531 
2532   while (!Worklist.empty())
2533     AddChains(Worklist.pop_back_val()->getOperand(0));
2534 
2535   // Skip the search if there are no chain dependencies.
2536   if (InputChains.size() == 0)
2537     return CurDAG->getEntryNode();
2538 
2539   // If one of these chains is a successor of input, we must have a
2540   // node that is both the predecessor and successor of the
2541   // to-be-merged nodes. Fail.
2542   Visited.clear();
2543   for (SDValue V : InputChains)
2544     Worklist.push_back(V.getNode());
2545 
2546   for (auto *N : ChainNodesMatched)
2547     if (SDNode::hasPredecessorHelper(N, Visited, Worklist, Max, true))
2548       return SDValue();
2549 
2550   // Return merged chain.
2551   if (InputChains.size() == 1)
2552     return InputChains[0];
2553   return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2554                          MVT::Other, InputChains);
2555 }
2556 
2557 /// MorphNode - Handle morphing a node in place for the selector.
2558 SDNode *SelectionDAGISel::
2559 MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2560           ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2561   // It is possible we're using MorphNodeTo to replace a node with no
2562   // normal results with one that has a normal result (or we could be
2563   // adding a chain) and the input could have glue and chains as well.
2564   // In this case we need to shift the operands down.
2565   // FIXME: This is a horrible hack and broken in obscure cases, no worse
2566   // than the old isel though.
2567   int OldGlueResultNo = -1, OldChainResultNo = -1;
2568 
2569   unsigned NTMNumResults = Node->getNumValues();
2570   if (Node->getValueType(NTMNumResults-1) == MVT::Glue) {
2571     OldGlueResultNo = NTMNumResults-1;
2572     if (NTMNumResults != 1 &&
2573         Node->getValueType(NTMNumResults-2) == MVT::Other)
2574       OldChainResultNo = NTMNumResults-2;
2575   } else if (Node->getValueType(NTMNumResults-1) == MVT::Other)
2576     OldChainResultNo = NTMNumResults-1;
2577 
2578   // Call the underlying SelectionDAG routine to do the transmogrification. Note
2579   // that this deletes operands of the old node that become dead.
2580   SDNode *Res = CurDAG->MorphNodeTo(Node, ~TargetOpc, VTList, Ops);
2581 
2582   // MorphNodeTo can operate in two ways: if an existing node with the
2583   // specified operands exists, it can just return it.  Otherwise, it
2584   // updates the node in place to have the requested operands.
2585   if (Res == Node) {
2586     // If we updated the node in place, reset the node ID.  To the isel,
2587     // this should be just like a newly allocated machine node.
2588     Res->setNodeId(-1);
2589   }
2590 
2591   unsigned ResNumResults = Res->getNumValues();
2592   // Move the glue if needed.
2593   if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2594       (unsigned)OldGlueResultNo != ResNumResults-1)
2595     ReplaceUses(SDValue(Node, OldGlueResultNo),
2596                 SDValue(Res, ResNumResults - 1));
2597 
2598   if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2599     --ResNumResults;
2600 
2601   // Move the chain reference if needed.
2602   if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2603       (unsigned)OldChainResultNo != ResNumResults-1)
2604     ReplaceUses(SDValue(Node, OldChainResultNo),
2605                 SDValue(Res, ResNumResults - 1));
2606 
2607   // Otherwise, no replacement happened because the node already exists. Replace
2608   // Uses of the old node with the new one.
2609   if (Res != Node) {
2610     ReplaceNode(Node, Res);
2611   } else {
2612     EnforceNodeIdInvariant(Res);
2613   }
2614 
2615   return Res;
2616 }
2617 
2618 /// CheckSame - Implements OP_CheckSame.
2619 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2620 CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2621           const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
2622   // Accept if it is exactly the same as a previously recorded node.
2623   unsigned RecNo = MatcherTable[MatcherIndex++];
2624   assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2625   return N == RecordedNodes[RecNo].first;
2626 }
2627 
2628 /// CheckChildSame - Implements OP_CheckChildXSame.
2629 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
2630     const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2631     const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
2632     unsigned ChildNo) {
2633   if (ChildNo >= N.getNumOperands())
2634     return false;  // Match fails if out of range child #.
2635   return ::CheckSame(MatcherTable, MatcherIndex, N.getOperand(ChildNo),
2636                      RecordedNodes);
2637 }
2638 
2639 /// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2640 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2641 CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2642                       const SelectionDAGISel &SDISel) {
2643   return SDISel.CheckPatternPredicate(MatcherTable[MatcherIndex++]);
2644 }
2645 
2646 /// CheckNodePredicate - Implements OP_CheckNodePredicate.
2647 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2648 CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2649                    const SelectionDAGISel &SDISel, SDNode *N) {
2650   return SDISel.CheckNodePredicate(N, MatcherTable[MatcherIndex++]);
2651 }
2652 
2653 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2654 CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2655             SDNode *N) {
2656   uint16_t Opc = MatcherTable[MatcherIndex++];
2657   Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
2658   return N->getOpcode() == Opc;
2659 }
2660 
2661 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2662 CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2663           const TargetLowering *TLI, const DataLayout &DL) {
2664   MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2665   if (N.getValueType() == VT) return true;
2666 
2667   // Handle the case when VT is iPTR.
2668   return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2669 }
2670 
2671 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2672 CheckChildType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2673                SDValue N, const TargetLowering *TLI, const DataLayout &DL,
2674                unsigned ChildNo) {
2675   if (ChildNo >= N.getNumOperands())
2676     return false;  // Match fails if out of range child #.
2677   return ::CheckType(MatcherTable, MatcherIndex, N.getOperand(ChildNo), TLI,
2678                      DL);
2679 }
2680 
2681 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2682 CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2683               SDValue N) {
2684   return cast<CondCodeSDNode>(N)->get() ==
2685       (ISD::CondCode)MatcherTable[MatcherIndex++];
2686 }
2687 
2688 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2689 CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2690                     SDValue N) {
2691   if (2 >= N.getNumOperands())
2692     return false;
2693   return ::CheckCondCode(MatcherTable, MatcherIndex, N.getOperand(2));
2694 }
2695 
2696 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2697 CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2698                SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2699   MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
2700   if (cast<VTSDNode>(N)->getVT() == VT)
2701     return true;
2702 
2703   // Handle the case when VT is iPTR.
2704   return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI->getPointerTy(DL);
2705 }
2706 
2707 // Bit 0 stores the sign of the immediate. The upper bits contain the magnitude
2708 // shifted left by 1.
2709 static uint64_t decodeSignRotatedValue(uint64_t V) {
2710   if ((V & 1) == 0)
2711     return V >> 1;
2712   if (V != 1)
2713     return -(V >> 1);
2714   // There is no such thing as -0 with integers.  "-0" really means MININT.
2715   return 1ULL << 63;
2716 }
2717 
2718 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2719 CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2720              SDValue N) {
2721   int64_t Val = MatcherTable[MatcherIndex++];
2722   if (Val & 128)
2723     Val = GetVBR(Val, MatcherTable, MatcherIndex);
2724 
2725   Val = decodeSignRotatedValue(Val);
2726 
2727   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
2728   return C && C->getSExtValue() == Val;
2729 }
2730 
2731 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2732 CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2733                   SDValue N, unsigned ChildNo) {
2734   if (ChildNo >= N.getNumOperands())
2735     return false;  // Match fails if out of range child #.
2736   return ::CheckInteger(MatcherTable, MatcherIndex, N.getOperand(ChildNo));
2737 }
2738 
2739 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2740 CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2741             SDValue N, const SelectionDAGISel &SDISel) {
2742   int64_t Val = MatcherTable[MatcherIndex++];
2743   if (Val & 128)
2744     Val = GetVBR(Val, MatcherTable, MatcherIndex);
2745 
2746   if (N->getOpcode() != ISD::AND) return false;
2747 
2748   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2749   return C && SDISel.CheckAndMask(N.getOperand(0), C, Val);
2750 }
2751 
2752 LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2753 CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2754            const SelectionDAGISel &SDISel) {
2755   int64_t Val = MatcherTable[MatcherIndex++];
2756   if (Val & 128)
2757     Val = GetVBR(Val, MatcherTable, MatcherIndex);
2758 
2759   if (N->getOpcode() != ISD::OR) return false;
2760 
2761   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2762   return C && SDISel.CheckOrMask(N.getOperand(0), C, Val);
2763 }
2764 
2765 /// IsPredicateKnownToFail - If we know how and can do so without pushing a
2766 /// scope, evaluate the current node.  If the current predicate is known to
2767 /// fail, set Result=true and return anything.  If the current predicate is
2768 /// known to pass, set Result=false and return the MatcherIndex to continue
2769 /// with.  If the current predicate is unknown, set Result=false and return the
2770 /// MatcherIndex to continue with.
2771 static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2772                                        unsigned Index, SDValue N,
2773                                        bool &Result,
2774                                        const SelectionDAGISel &SDISel,
2775                   SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2776   switch (Table[Index++]) {
2777   default:
2778     Result = false;
2779     return Index-1;  // Could not evaluate this predicate.
2780   case SelectionDAGISel::OPC_CheckSame:
2781     Result = !::CheckSame(Table, Index, N, RecordedNodes);
2782     return Index;
2783   case SelectionDAGISel::OPC_CheckChild0Same:
2784   case SelectionDAGISel::OPC_CheckChild1Same:
2785   case SelectionDAGISel::OPC_CheckChild2Same:
2786   case SelectionDAGISel::OPC_CheckChild3Same:
2787     Result = !::CheckChildSame(Table, Index, N, RecordedNodes,
2788                         Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2789     return Index;
2790   case SelectionDAGISel::OPC_CheckPatternPredicate:
2791     Result = !::CheckPatternPredicate(Table, Index, SDISel);
2792     return Index;
2793   case SelectionDAGISel::OPC_CheckPredicate:
2794     Result = !::CheckNodePredicate(Table, Index, SDISel, N.getNode());
2795     return Index;
2796   case SelectionDAGISel::OPC_CheckOpcode:
2797     Result = !::CheckOpcode(Table, Index, N.getNode());
2798     return Index;
2799   case SelectionDAGISel::OPC_CheckType:
2800     Result = !::CheckType(Table, Index, N, SDISel.TLI,
2801                           SDISel.CurDAG->getDataLayout());
2802     return Index;
2803   case SelectionDAGISel::OPC_CheckTypeRes: {
2804     unsigned Res = Table[Index++];
2805     Result = !::CheckType(Table, Index, N.getValue(Res), SDISel.TLI,
2806                           SDISel.CurDAG->getDataLayout());
2807     return Index;
2808   }
2809   case SelectionDAGISel::OPC_CheckChild0Type:
2810   case SelectionDAGISel::OPC_CheckChild1Type:
2811   case SelectionDAGISel::OPC_CheckChild2Type:
2812   case SelectionDAGISel::OPC_CheckChild3Type:
2813   case SelectionDAGISel::OPC_CheckChild4Type:
2814   case SelectionDAGISel::OPC_CheckChild5Type:
2815   case SelectionDAGISel::OPC_CheckChild6Type:
2816   case SelectionDAGISel::OPC_CheckChild7Type:
2817     Result = !::CheckChildType(
2818                  Table, Index, N, SDISel.TLI, SDISel.CurDAG->getDataLayout(),
2819                  Table[Index - 1] - SelectionDAGISel::OPC_CheckChild0Type);
2820     return Index;
2821   case SelectionDAGISel::OPC_CheckCondCode:
2822     Result = !::CheckCondCode(Table, Index, N);
2823     return Index;
2824   case SelectionDAGISel::OPC_CheckChild2CondCode:
2825     Result = !::CheckChild2CondCode(Table, Index, N);
2826     return Index;
2827   case SelectionDAGISel::OPC_CheckValueType:
2828     Result = !::CheckValueType(Table, Index, N, SDISel.TLI,
2829                                SDISel.CurDAG->getDataLayout());
2830     return Index;
2831   case SelectionDAGISel::OPC_CheckInteger:
2832     Result = !::CheckInteger(Table, Index, N);
2833     return Index;
2834   case SelectionDAGISel::OPC_CheckChild0Integer:
2835   case SelectionDAGISel::OPC_CheckChild1Integer:
2836   case SelectionDAGISel::OPC_CheckChild2Integer:
2837   case SelectionDAGISel::OPC_CheckChild3Integer:
2838   case SelectionDAGISel::OPC_CheckChild4Integer:
2839     Result = !::CheckChildInteger(Table, Index, N,
2840                      Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2841     return Index;
2842   case SelectionDAGISel::OPC_CheckAndImm:
2843     Result = !::CheckAndImm(Table, Index, N, SDISel);
2844     return Index;
2845   case SelectionDAGISel::OPC_CheckOrImm:
2846     Result = !::CheckOrImm(Table, Index, N, SDISel);
2847     return Index;
2848   }
2849 }
2850 
2851 namespace {
2852 
2853 struct MatchScope {
2854   /// FailIndex - If this match fails, this is the index to continue with.
2855   unsigned FailIndex;
2856 
2857   /// NodeStack - The node stack when the scope was formed.
2858   SmallVector<SDValue, 4> NodeStack;
2859 
2860   /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2861   unsigned NumRecordedNodes;
2862 
2863   /// NumMatchedMemRefs - The number of matched memref entries.
2864   unsigned NumMatchedMemRefs;
2865 
2866   /// InputChain/InputGlue - The current chain/glue
2867   SDValue InputChain, InputGlue;
2868 
2869   /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
2870   bool HasChainNodesMatched;
2871 };
2872 
2873 /// \A DAG update listener to keep the matching state
2874 /// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
2875 /// change the DAG while matching.  X86 addressing mode matcher is an example
2876 /// for this.
2877 class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
2878 {
2879   SDNode **NodeToMatch;
2880   SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
2881   SmallVectorImpl<MatchScope> &MatchScopes;
2882 
2883 public:
2884   MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
2885                     SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
2886                     SmallVectorImpl<MatchScope> &MS)
2887       : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
2888         RecordedNodes(RN), MatchScopes(MS) {}
2889 
2890   void NodeDeleted(SDNode *N, SDNode *E) override {
2891     // Some early-returns here to avoid the search if we deleted the node or
2892     // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
2893     // do, so it's unnecessary to update matching state at that point).
2894     // Neither of these can occur currently because we only install this
2895     // update listener during matching a complex patterns.
2896     if (!E || E->isMachineOpcode())
2897       return;
2898     // Check if NodeToMatch was updated.
2899     if (N == *NodeToMatch)
2900       *NodeToMatch = E;
2901     // Performing linear search here does not matter because we almost never
2902     // run this code.  You'd have to have a CSE during complex pattern
2903     // matching.
2904     for (auto &I : RecordedNodes)
2905       if (I.first.getNode() == N)
2906         I.first.setNode(E);
2907 
2908     for (auto &I : MatchScopes)
2909       for (auto &J : I.NodeStack)
2910         if (J.getNode() == N)
2911           J.setNode(E);
2912   }
2913 };
2914 
2915 } // end anonymous namespace
2916 
2917 void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
2918                                         const unsigned char *MatcherTable,
2919                                         unsigned TableSize) {
2920   // FIXME: Should these even be selected?  Handle these cases in the caller?
2921   switch (NodeToMatch->getOpcode()) {
2922   default:
2923     break;
2924   case ISD::EntryToken:       // These nodes remain the same.
2925   case ISD::BasicBlock:
2926   case ISD::Register:
2927   case ISD::RegisterMask:
2928   case ISD::HANDLENODE:
2929   case ISD::MDNODE_SDNODE:
2930   case ISD::TargetConstant:
2931   case ISD::TargetConstantFP:
2932   case ISD::TargetConstantPool:
2933   case ISD::TargetFrameIndex:
2934   case ISD::TargetExternalSymbol:
2935   case ISD::MCSymbol:
2936   case ISD::TargetBlockAddress:
2937   case ISD::TargetJumpTable:
2938   case ISD::TargetGlobalTLSAddress:
2939   case ISD::TargetGlobalAddress:
2940   case ISD::TokenFactor:
2941   case ISD::CopyFromReg:
2942   case ISD::CopyToReg:
2943   case ISD::EH_LABEL:
2944   case ISD::ANNOTATION_LABEL:
2945   case ISD::LIFETIME_START:
2946   case ISD::LIFETIME_END:
2947   case ISD::PSEUDO_PROBE:
2948     NodeToMatch->setNodeId(-1); // Mark selected.
2949     return;
2950   case ISD::AssertSext:
2951   case ISD::AssertZext:
2952   case ISD::AssertAlign:
2953     ReplaceUses(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0));
2954     CurDAG->RemoveDeadNode(NodeToMatch);
2955     return;
2956   case ISD::INLINEASM:
2957   case ISD::INLINEASM_BR:
2958     Select_INLINEASM(NodeToMatch);
2959     return;
2960   case ISD::READ_REGISTER:
2961     Select_READ_REGISTER(NodeToMatch);
2962     return;
2963   case ISD::WRITE_REGISTER:
2964     Select_WRITE_REGISTER(NodeToMatch);
2965     return;
2966   case ISD::UNDEF:
2967     Select_UNDEF(NodeToMatch);
2968     return;
2969   case ISD::FREEZE:
2970     Select_FREEZE(NodeToMatch);
2971     return;
2972   case ISD::ARITH_FENCE:
2973     Select_ARITH_FENCE(NodeToMatch);
2974     return;
2975   case ISD::MEMBARRIER:
2976     Select_MEMBARRIER(NodeToMatch);
2977     return;
2978   case ISD::STACKMAP:
2979     Select_STACKMAP(NodeToMatch);
2980     return;
2981   case ISD::PATCHPOINT:
2982     Select_PATCHPOINT(NodeToMatch);
2983     return;
2984   }
2985 
2986   assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
2987 
2988   // Set up the node stack with NodeToMatch as the only node on the stack.
2989   SmallVector<SDValue, 8> NodeStack;
2990   SDValue N = SDValue(NodeToMatch, 0);
2991   NodeStack.push_back(N);
2992 
2993   // MatchScopes - Scopes used when matching, if a match failure happens, this
2994   // indicates where to continue checking.
2995   SmallVector<MatchScope, 8> MatchScopes;
2996 
2997   // RecordedNodes - This is the set of nodes that have been recorded by the
2998   // state machine.  The second value is the parent of the node, or null if the
2999   // root is recorded.
3000   SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3001 
3002   // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3003   // pattern.
3004   SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3005 
3006   // These are the current input chain and glue for use when generating nodes.
3007   // Various Emit operations change these.  For example, emitting a copytoreg
3008   // uses and updates these.
3009   SDValue InputChain, InputGlue;
3010 
3011   // ChainNodesMatched - If a pattern matches nodes that have input/output
3012   // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3013   // which ones they are.  The result is captured into this list so that we can
3014   // update the chain results when the pattern is complete.
3015   SmallVector<SDNode*, 3> ChainNodesMatched;
3016 
3017   LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3018 
3019   // Determine where to start the interpreter.  Normally we start at opcode #0,
3020   // but if the state machine starts with an OPC_SwitchOpcode, then we
3021   // accelerate the first lookup (which is guaranteed to be hot) with the
3022   // OpcodeOffset table.
3023   unsigned MatcherIndex = 0;
3024 
3025   if (!OpcodeOffset.empty()) {
3026     // Already computed the OpcodeOffset table, just index into it.
3027     if (N.getOpcode() < OpcodeOffset.size())
3028       MatcherIndex = OpcodeOffset[N.getOpcode()];
3029     LLVM_DEBUG(dbgs() << "  Initial Opcode index to " << MatcherIndex << "\n");
3030 
3031   } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3032     // Otherwise, the table isn't computed, but the state machine does start
3033     // with an OPC_SwitchOpcode instruction.  Populate the table now, since this
3034     // is the first time we're selecting an instruction.
3035     unsigned Idx = 1;
3036     while (true) {
3037       // Get the size of this case.
3038       unsigned CaseSize = MatcherTable[Idx++];
3039       if (CaseSize & 128)
3040         CaseSize = GetVBR(CaseSize, MatcherTable, Idx);
3041       if (CaseSize == 0) break;
3042 
3043       // Get the opcode, add the index to the table.
3044       uint16_t Opc = MatcherTable[Idx++];
3045       Opc |= (unsigned short)MatcherTable[Idx++] << 8;
3046       if (Opc >= OpcodeOffset.size())
3047         OpcodeOffset.resize((Opc+1)*2);
3048       OpcodeOffset[Opc] = Idx;
3049       Idx += CaseSize;
3050     }
3051 
3052     // Okay, do the lookup for the first opcode.
3053     if (N.getOpcode() < OpcodeOffset.size())
3054       MatcherIndex = OpcodeOffset[N.getOpcode()];
3055   }
3056 
3057   while (true) {
3058     assert(MatcherIndex < TableSize && "Invalid index");
3059 #ifndef NDEBUG
3060     unsigned CurrentOpcodeIndex = MatcherIndex;
3061 #endif
3062     BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
3063     switch (Opcode) {
3064     case OPC_Scope: {
3065       // Okay, the semantics of this operation are that we should push a scope
3066       // then evaluate the first child.  However, pushing a scope only to have
3067       // the first check fail (which then pops it) is inefficient.  If we can
3068       // determine immediately that the first check (or first several) will
3069       // immediately fail, don't even bother pushing a scope for them.
3070       unsigned FailIndex;
3071 
3072       while (true) {
3073         unsigned NumToSkip = MatcherTable[MatcherIndex++];
3074         if (NumToSkip & 128)
3075           NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3076         // Found the end of the scope with no match.
3077         if (NumToSkip == 0) {
3078           FailIndex = 0;
3079           break;
3080         }
3081 
3082         FailIndex = MatcherIndex+NumToSkip;
3083 
3084         unsigned MatcherIndexOfPredicate = MatcherIndex;
3085         (void)MatcherIndexOfPredicate; // silence warning.
3086 
3087         // If we can't evaluate this predicate without pushing a scope (e.g. if
3088         // it is a 'MoveParent') or if the predicate succeeds on this node, we
3089         // push the scope and evaluate the full predicate chain.
3090         bool Result;
3091         MatcherIndex = IsPredicateKnownToFail(MatcherTable, MatcherIndex, N,
3092                                               Result, *this, RecordedNodes);
3093         if (!Result)
3094           break;
3095 
3096         LLVM_DEBUG(
3097             dbgs() << "  Skipped scope entry (due to false predicate) at "
3098                    << "index " << MatcherIndexOfPredicate << ", continuing at "
3099                    << FailIndex << "\n");
3100         ++NumDAGIselRetries;
3101 
3102         // Otherwise, we know that this case of the Scope is guaranteed to fail,
3103         // move to the next case.
3104         MatcherIndex = FailIndex;
3105       }
3106 
3107       // If the whole scope failed to match, bail.
3108       if (FailIndex == 0) break;
3109 
3110       // Push a MatchScope which indicates where to go if the first child fails
3111       // to match.
3112       MatchScope NewEntry;
3113       NewEntry.FailIndex = FailIndex;
3114       NewEntry.NodeStack.append(NodeStack.begin(), NodeStack.end());
3115       NewEntry.NumRecordedNodes = RecordedNodes.size();
3116       NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3117       NewEntry.InputChain = InputChain;
3118       NewEntry.InputGlue = InputGlue;
3119       NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3120       MatchScopes.push_back(NewEntry);
3121       continue;
3122     }
3123     case OPC_RecordNode: {
3124       // Remember this node, it may end up being an operand in the pattern.
3125       SDNode *Parent = nullptr;
3126       if (NodeStack.size() > 1)
3127         Parent = NodeStack[NodeStack.size()-2].getNode();
3128       RecordedNodes.push_back(std::make_pair(N, Parent));
3129       continue;
3130     }
3131 
3132     case OPC_RecordChild0: case OPC_RecordChild1:
3133     case OPC_RecordChild2: case OPC_RecordChild3:
3134     case OPC_RecordChild4: case OPC_RecordChild5:
3135     case OPC_RecordChild6: case OPC_RecordChild7: {
3136       unsigned ChildNo = Opcode-OPC_RecordChild0;
3137       if (ChildNo >= N.getNumOperands())
3138         break;  // Match fails if out of range child #.
3139 
3140       RecordedNodes.push_back(std::make_pair(N->getOperand(ChildNo),
3141                                              N.getNode()));
3142       continue;
3143     }
3144     case OPC_RecordMemRef:
3145       if (auto *MN = dyn_cast<MemSDNode>(N))
3146         MatchedMemRefs.push_back(MN->getMemOperand());
3147       else {
3148         LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3149                    dbgs() << '\n');
3150       }
3151 
3152       continue;
3153 
3154     case OPC_CaptureGlueInput:
3155       // If the current node has an input glue, capture it in InputGlue.
3156       if (N->getNumOperands() != 0 &&
3157           N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
3158         InputGlue = N->getOperand(N->getNumOperands()-1);
3159       continue;
3160 
3161     case OPC_MoveChild: {
3162       unsigned ChildNo = MatcherTable[MatcherIndex++];
3163       if (ChildNo >= N.getNumOperands())
3164         break;  // Match fails if out of range child #.
3165       N = N.getOperand(ChildNo);
3166       NodeStack.push_back(N);
3167       continue;
3168     }
3169 
3170     case OPC_MoveChild0: case OPC_MoveChild1:
3171     case OPC_MoveChild2: case OPC_MoveChild3:
3172     case OPC_MoveChild4: case OPC_MoveChild5:
3173     case OPC_MoveChild6: case OPC_MoveChild7: {
3174       unsigned ChildNo = Opcode-OPC_MoveChild0;
3175       if (ChildNo >= N.getNumOperands())
3176         break;  // Match fails if out of range child #.
3177       N = N.getOperand(ChildNo);
3178       NodeStack.push_back(N);
3179       continue;
3180     }
3181 
3182     case OPC_MoveParent:
3183       // Pop the current node off the NodeStack.
3184       NodeStack.pop_back();
3185       assert(!NodeStack.empty() && "Node stack imbalance!");
3186       N = NodeStack.back();
3187       continue;
3188 
3189     case OPC_CheckSame:
3190       if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3191       continue;
3192 
3193     case OPC_CheckChild0Same: case OPC_CheckChild1Same:
3194     case OPC_CheckChild2Same: case OPC_CheckChild3Same:
3195       if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3196                             Opcode-OPC_CheckChild0Same))
3197         break;
3198       continue;
3199 
3200     case OPC_CheckPatternPredicate:
3201       if (!::CheckPatternPredicate(MatcherTable, MatcherIndex, *this)) break;
3202       continue;
3203     case OPC_CheckPredicate:
3204       if (!::CheckNodePredicate(MatcherTable, MatcherIndex, *this,
3205                                 N.getNode()))
3206         break;
3207       continue;
3208     case OPC_CheckPredicateWithOperands: {
3209       unsigned OpNum = MatcherTable[MatcherIndex++];
3210       SmallVector<SDValue, 8> Operands;
3211 
3212       for (unsigned i = 0; i < OpNum; ++i)
3213         Operands.push_back(RecordedNodes[MatcherTable[MatcherIndex++]].first);
3214 
3215       unsigned PredNo = MatcherTable[MatcherIndex++];
3216       if (!CheckNodePredicateWithOperands(N.getNode(), PredNo, Operands))
3217         break;
3218       continue;
3219     }
3220     case OPC_CheckComplexPat: {
3221       unsigned CPNum = MatcherTable[MatcherIndex++];
3222       unsigned RecNo = MatcherTable[MatcherIndex++];
3223       assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3224 
3225       // If target can modify DAG during matching, keep the matching state
3226       // consistent.
3227       std::unique_ptr<MatchStateUpdater> MSU;
3228       if (ComplexPatternFuncMutatesDAG())
3229         MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3230                                         MatchScopes));
3231 
3232       if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,
3233                                RecordedNodes[RecNo].first, CPNum,
3234                                RecordedNodes))
3235         break;
3236       continue;
3237     }
3238     case OPC_CheckOpcode:
3239       if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
3240       continue;
3241 
3242     case OPC_CheckType:
3243       if (!::CheckType(MatcherTable, MatcherIndex, N, TLI,
3244                        CurDAG->getDataLayout()))
3245         break;
3246       continue;
3247 
3248     case OPC_CheckTypeRes: {
3249       unsigned Res = MatcherTable[MatcherIndex++];
3250       if (!::CheckType(MatcherTable, MatcherIndex, N.getValue(Res), TLI,
3251                        CurDAG->getDataLayout()))
3252         break;
3253       continue;
3254     }
3255 
3256     case OPC_SwitchOpcode: {
3257       unsigned CurNodeOpcode = N.getOpcode();
3258       unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3259       unsigned CaseSize;
3260       while (true) {
3261         // Get the size of this case.
3262         CaseSize = MatcherTable[MatcherIndex++];
3263         if (CaseSize & 128)
3264           CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3265         if (CaseSize == 0) break;
3266 
3267         uint16_t Opc = MatcherTable[MatcherIndex++];
3268         Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3269 
3270         // If the opcode matches, then we will execute this case.
3271         if (CurNodeOpcode == Opc)
3272           break;
3273 
3274         // Otherwise, skip over this case.
3275         MatcherIndex += CaseSize;
3276       }
3277 
3278       // If no cases matched, bail out.
3279       if (CaseSize == 0) break;
3280 
3281       // Otherwise, execute the case we found.
3282       LLVM_DEBUG(dbgs() << "  OpcodeSwitch from " << SwitchStart << " to "
3283                         << MatcherIndex << "\n");
3284       continue;
3285     }
3286 
3287     case OPC_SwitchType: {
3288       MVT CurNodeVT = N.getSimpleValueType();
3289       unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3290       unsigned CaseSize;
3291       while (true) {
3292         // Get the size of this case.
3293         CaseSize = MatcherTable[MatcherIndex++];
3294         if (CaseSize & 128)
3295           CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
3296         if (CaseSize == 0) break;
3297 
3298         MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3299         if (CaseVT == MVT::iPTR)
3300           CaseVT = TLI->getPointerTy(CurDAG->getDataLayout());
3301 
3302         // If the VT matches, then we will execute this case.
3303         if (CurNodeVT == CaseVT)
3304           break;
3305 
3306         // Otherwise, skip over this case.
3307         MatcherIndex += CaseSize;
3308       }
3309 
3310       // If no cases matched, bail out.
3311       if (CaseSize == 0) break;
3312 
3313       // Otherwise, execute the case we found.
3314       LLVM_DEBUG(dbgs() << "  TypeSwitch[" << CurNodeVT
3315                         << "] from " << SwitchStart << " to " << MatcherIndex
3316                         << '\n');
3317       continue;
3318     }
3319     case OPC_CheckChild0Type: case OPC_CheckChild1Type:
3320     case OPC_CheckChild2Type: case OPC_CheckChild3Type:
3321     case OPC_CheckChild4Type: case OPC_CheckChild5Type:
3322     case OPC_CheckChild6Type: case OPC_CheckChild7Type:
3323       if (!::CheckChildType(MatcherTable, MatcherIndex, N, TLI,
3324                             CurDAG->getDataLayout(),
3325                             Opcode - OPC_CheckChild0Type))
3326         break;
3327       continue;
3328     case OPC_CheckCondCode:
3329       if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3330       continue;
3331     case OPC_CheckChild2CondCode:
3332       if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3333       continue;
3334     case OPC_CheckValueType:
3335       if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3336                             CurDAG->getDataLayout()))
3337         break;
3338       continue;
3339     case OPC_CheckInteger:
3340       if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3341       continue;
3342     case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
3343     case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
3344     case OPC_CheckChild4Integer:
3345       if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3346                                Opcode-OPC_CheckChild0Integer)) break;
3347       continue;
3348     case OPC_CheckAndImm:
3349       if (!::CheckAndImm(MatcherTable, MatcherIndex, N, *this)) break;
3350       continue;
3351     case OPC_CheckOrImm:
3352       if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
3353       continue;
3354     case OPC_CheckImmAllOnesV:
3355       if (!ISD::isConstantSplatVectorAllOnes(N.getNode()))
3356         break;
3357       continue;
3358     case OPC_CheckImmAllZerosV:
3359       if (!ISD::isConstantSplatVectorAllZeros(N.getNode()))
3360         break;
3361       continue;
3362 
3363     case OPC_CheckFoldableChainNode: {
3364       assert(NodeStack.size() != 1 && "No parent node");
3365       // Verify that all intermediate nodes between the root and this one have
3366       // a single use (ignoring chains, which are handled in UpdateChains).
3367       bool HasMultipleUses = false;
3368       for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) {
3369         unsigned NNonChainUses = 0;
3370         SDNode *NS = NodeStack[i].getNode();
3371         for (auto UI = NS->use_begin(), UE = NS->use_end(); UI != UE; ++UI)
3372           if (UI.getUse().getValueType() != MVT::Other)
3373             if (++NNonChainUses > 1) {
3374               HasMultipleUses = true;
3375               break;
3376             }
3377         if (HasMultipleUses) break;
3378       }
3379       if (HasMultipleUses) break;
3380 
3381       // Check to see that the target thinks this is profitable to fold and that
3382       // we can fold it without inducing cycles in the graph.
3383       if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3384                               NodeToMatch) ||
3385           !IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
3386                          NodeToMatch, OptLevel,
3387                          true/*We validate our own chains*/))
3388         break;
3389 
3390       continue;
3391     }
3392     case OPC_EmitInteger:
3393     case OPC_EmitStringInteger: {
3394       MVT::SimpleValueType VT =
3395         (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3396       int64_t Val = MatcherTable[MatcherIndex++];
3397       if (Val & 128)
3398         Val = GetVBR(Val, MatcherTable, MatcherIndex);
3399       if (Opcode == OPC_EmitInteger)
3400         Val = decodeSignRotatedValue(Val);
3401       RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3402                               CurDAG->getTargetConstant(Val, SDLoc(NodeToMatch),
3403                                                         VT), nullptr));
3404       continue;
3405     }
3406     case OPC_EmitRegister: {
3407       MVT::SimpleValueType VT =
3408         (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3409       unsigned RegNo = MatcherTable[MatcherIndex++];
3410       RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3411                               CurDAG->getRegister(RegNo, VT), nullptr));
3412       continue;
3413     }
3414     case OPC_EmitRegister2: {
3415       // For targets w/ more than 256 register names, the register enum
3416       // values are stored in two bytes in the matcher table (just like
3417       // opcodes).
3418       MVT::SimpleValueType VT =
3419         (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3420       unsigned RegNo = MatcherTable[MatcherIndex++];
3421       RegNo |= MatcherTable[MatcherIndex++] << 8;
3422       RecordedNodes.push_back(std::pair<SDValue, SDNode*>(
3423                               CurDAG->getRegister(RegNo, VT), nullptr));
3424       continue;
3425     }
3426 
3427     case OPC_EmitConvertToTarget:  {
3428       // Convert from IMM/FPIMM to target version.
3429       unsigned RecNo = MatcherTable[MatcherIndex++];
3430       assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3431       SDValue Imm = RecordedNodes[RecNo].first;
3432 
3433       if (Imm->getOpcode() == ISD::Constant) {
3434         const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue();
3435         Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch),
3436                                         Imm.getValueType());
3437       } else if (Imm->getOpcode() == ISD::ConstantFP) {
3438         const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
3439         Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch),
3440                                           Imm.getValueType());
3441       }
3442 
3443       RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
3444       continue;
3445     }
3446 
3447     case OPC_EmitMergeInputChains1_0:    // OPC_EmitMergeInputChains, 1, 0
3448     case OPC_EmitMergeInputChains1_1:    // OPC_EmitMergeInputChains, 1, 1
3449     case OPC_EmitMergeInputChains1_2: {  // OPC_EmitMergeInputChains, 1, 2
3450       // These are space-optimized forms of OPC_EmitMergeInputChains.
3451       assert(!InputChain.getNode() &&
3452              "EmitMergeInputChains should be the first chain producing node");
3453       assert(ChainNodesMatched.empty() &&
3454              "Should only have one EmitMergeInputChains per match");
3455 
3456       // Read all of the chained nodes.
3457       unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3458       assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3459       ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3460 
3461       // If the chained node is not the root, we can't fold it if it has
3462       // multiple uses.
3463       // FIXME: What if other value results of the node have uses not matched
3464       // by this pattern?
3465       if (ChainNodesMatched.back() != NodeToMatch &&
3466           !RecordedNodes[RecNo].first.hasOneUse()) {
3467         ChainNodesMatched.clear();
3468         break;
3469       }
3470 
3471       // Merge the input chains if they are not intra-pattern references.
3472       InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3473 
3474       if (!InputChain.getNode())
3475         break;  // Failed to merge.
3476       continue;
3477     }
3478 
3479     case OPC_EmitMergeInputChains: {
3480       assert(!InputChain.getNode() &&
3481              "EmitMergeInputChains should be the first chain producing node");
3482       // This node gets a list of nodes we matched in the input that have
3483       // chains.  We want to token factor all of the input chains to these nodes
3484       // together.  However, if any of the input chains is actually one of the
3485       // nodes matched in this pattern, then we have an intra-match reference.
3486       // Ignore these because the newly token factored chain should not refer to
3487       // the old nodes.
3488       unsigned NumChains = MatcherTable[MatcherIndex++];
3489       assert(NumChains != 0 && "Can't TF zero chains");
3490 
3491       assert(ChainNodesMatched.empty() &&
3492              "Should only have one EmitMergeInputChains per match");
3493 
3494       // Read all of the chained nodes.
3495       for (unsigned i = 0; i != NumChains; ++i) {
3496         unsigned RecNo = MatcherTable[MatcherIndex++];
3497         assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3498         ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
3499 
3500         // If the chained node is not the root, we can't fold it if it has
3501         // multiple uses.
3502         // FIXME: What if other value results of the node have uses not matched
3503         // by this pattern?
3504         if (ChainNodesMatched.back() != NodeToMatch &&
3505             !RecordedNodes[RecNo].first.hasOneUse()) {
3506           ChainNodesMatched.clear();
3507           break;
3508         }
3509       }
3510 
3511       // If the inner loop broke out, the match fails.
3512       if (ChainNodesMatched.empty())
3513         break;
3514 
3515       // Merge the input chains if they are not intra-pattern references.
3516       InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3517 
3518       if (!InputChain.getNode())
3519         break;  // Failed to merge.
3520 
3521       continue;
3522     }
3523 
3524     case OPC_EmitCopyToReg:
3525     case OPC_EmitCopyToReg2: {
3526       unsigned RecNo = MatcherTable[MatcherIndex++];
3527       assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3528       unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3529       if (Opcode == OPC_EmitCopyToReg2)
3530         DestPhysReg |= MatcherTable[MatcherIndex++] << 8;
3531 
3532       if (!InputChain.getNode())
3533         InputChain = CurDAG->getEntryNode();
3534 
3535       InputChain = CurDAG->getCopyToReg(InputChain, SDLoc(NodeToMatch),
3536                                         DestPhysReg, RecordedNodes[RecNo].first,
3537                                         InputGlue);
3538 
3539       InputGlue = InputChain.getValue(1);
3540       continue;
3541     }
3542 
3543     case OPC_EmitNodeXForm: {
3544       unsigned XFormNo = MatcherTable[MatcherIndex++];
3545       unsigned RecNo = MatcherTable[MatcherIndex++];
3546       assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3547       SDValue Res = RunSDNodeXForm(RecordedNodes[RecNo].first, XFormNo);
3548       RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, nullptr));
3549       continue;
3550     }
3551     case OPC_Coverage: {
3552       // This is emitted right before MorphNode/EmitNode.
3553       // So it should be safe to assume that this node has been selected
3554       unsigned index = MatcherTable[MatcherIndex++];
3555       index |= (MatcherTable[MatcherIndex++] << 8);
3556       dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3557       dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3558       continue;
3559     }
3560 
3561     case OPC_EmitNode:     case OPC_MorphNodeTo:
3562     case OPC_EmitNode0:    case OPC_EmitNode1:    case OPC_EmitNode2:
3563     case OPC_MorphNodeTo0: case OPC_MorphNodeTo1: case OPC_MorphNodeTo2: {
3564       uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3565       TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
3566       unsigned EmitNodeInfo = MatcherTable[MatcherIndex++];
3567       // Get the result VT list.
3568       unsigned NumVTs;
3569       // If this is one of the compressed forms, get the number of VTs based
3570       // on the Opcode. Otherwise read the next byte from the table.
3571       if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3572         NumVTs = Opcode - OPC_MorphNodeTo0;
3573       else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3574         NumVTs = Opcode - OPC_EmitNode0;
3575       else
3576         NumVTs = MatcherTable[MatcherIndex++];
3577       SmallVector<EVT, 4> VTs;
3578       for (unsigned i = 0; i != NumVTs; ++i) {
3579         MVT::SimpleValueType VT =
3580           (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
3581         if (VT == MVT::iPTR)
3582           VT = TLI->getPointerTy(CurDAG->getDataLayout()).SimpleTy;
3583         VTs.push_back(VT);
3584       }
3585 
3586       if (EmitNodeInfo & OPFL_Chain)
3587         VTs.push_back(MVT::Other);
3588       if (EmitNodeInfo & OPFL_GlueOutput)
3589         VTs.push_back(MVT::Glue);
3590 
3591       // This is hot code, so optimize the two most common cases of 1 and 2
3592       // results.
3593       SDVTList VTList;
3594       if (VTs.size() == 1)
3595         VTList = CurDAG->getVTList(VTs[0]);
3596       else if (VTs.size() == 2)
3597         VTList = CurDAG->getVTList(VTs[0], VTs[1]);
3598       else
3599         VTList = CurDAG->getVTList(VTs);
3600 
3601       // Get the operand list.
3602       unsigned NumOps = MatcherTable[MatcherIndex++];
3603       SmallVector<SDValue, 8> Ops;
3604       for (unsigned i = 0; i != NumOps; ++i) {
3605         unsigned RecNo = MatcherTable[MatcherIndex++];
3606         if (RecNo & 128)
3607           RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
3608 
3609         assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3610         Ops.push_back(RecordedNodes[RecNo].first);
3611       }
3612 
3613       // If there are variadic operands to add, handle them now.
3614       if (EmitNodeInfo & OPFL_VariadicInfo) {
3615         // Determine the start index to copy from.
3616         unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(EmitNodeInfo);
3617         FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3618         assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3619                "Invalid variadic node");
3620         // Copy all of the variadic operands, not including a potential glue
3621         // input.
3622         for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3623              i != e; ++i) {
3624           SDValue V = NodeToMatch->getOperand(i);
3625           if (V.getValueType() == MVT::Glue) break;
3626           Ops.push_back(V);
3627         }
3628       }
3629 
3630       // If this has chain/glue inputs, add them.
3631       if (EmitNodeInfo & OPFL_Chain)
3632         Ops.push_back(InputChain);
3633       if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
3634         Ops.push_back(InputGlue);
3635 
3636       // Check whether any matched node could raise an FP exception.  Since all
3637       // such nodes must have a chain, it suffices to check ChainNodesMatched.
3638       // We need to perform this check before potentially modifying one of the
3639       // nodes via MorphNode.
3640       bool MayRaiseFPException =
3641           llvm::any_of(ChainNodesMatched, [this](SDNode *N) {
3642             return mayRaiseFPException(N) && !N->getFlags().hasNoFPExcept();
3643           });
3644 
3645       // Create the node.
3646       MachineSDNode *Res = nullptr;
3647       bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo ||
3648                      (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2);
3649       if (!IsMorphNodeTo) {
3650         // If this is a normal EmitNode command, just create the new node and
3651         // add the results to the RecordedNodes list.
3652         Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch),
3653                                      VTList, Ops);
3654 
3655         // Add all the non-glue/non-chain results to the RecordedNodes list.
3656         for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
3657           if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
3658           RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
3659                                                              nullptr));
3660         }
3661       } else {
3662         assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
3663                "NodeToMatch was removed partway through selection");
3664         SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
3665                                                               SDNode *E) {
3666           CurDAG->salvageDebugInfo(*N);
3667           auto &Chain = ChainNodesMatched;
3668           assert((!E || !is_contained(Chain, N)) &&
3669                  "Chain node replaced during MorphNode");
3670           llvm::erase_value(Chain, N);
3671         });
3672         Res = cast<MachineSDNode>(MorphNode(NodeToMatch, TargetOpc, VTList,
3673                                             Ops, EmitNodeInfo));
3674       }
3675 
3676       // Set the NoFPExcept flag when no original matched node could
3677       // raise an FP exception, but the new node potentially might.
3678       if (!MayRaiseFPException && mayRaiseFPException(Res)) {
3679         SDNodeFlags Flags = Res->getFlags();
3680         Flags.setNoFPExcept(true);
3681         Res->setFlags(Flags);
3682       }
3683 
3684       // If the node had chain/glue results, update our notion of the current
3685       // chain and glue.
3686       if (EmitNodeInfo & OPFL_GlueOutput) {
3687         InputGlue = SDValue(Res, VTs.size()-1);
3688         if (EmitNodeInfo & OPFL_Chain)
3689           InputChain = SDValue(Res, VTs.size()-2);
3690       } else if (EmitNodeInfo & OPFL_Chain)
3691         InputChain = SDValue(Res, VTs.size()-1);
3692 
3693       // If the OPFL_MemRefs glue is set on this node, slap all of the
3694       // accumulated memrefs onto it.
3695       //
3696       // FIXME: This is vastly incorrect for patterns with multiple outputs
3697       // instructions that access memory and for ComplexPatterns that match
3698       // loads.
3699       if (EmitNodeInfo & OPFL_MemRefs) {
3700         // Only attach load or store memory operands if the generated
3701         // instruction may load or store.
3702         const MCInstrDesc &MCID = TII->get(TargetOpc);
3703         bool mayLoad = MCID.mayLoad();
3704         bool mayStore = MCID.mayStore();
3705 
3706         // We expect to have relatively few of these so just filter them into a
3707         // temporary buffer so that we can easily add them to the instruction.
3708         SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
3709         for (MachineMemOperand *MMO : MatchedMemRefs) {
3710           if (MMO->isLoad()) {
3711             if (mayLoad)
3712               FilteredMemRefs.push_back(MMO);
3713           } else if (MMO->isStore()) {
3714             if (mayStore)
3715               FilteredMemRefs.push_back(MMO);
3716           } else {
3717             FilteredMemRefs.push_back(MMO);
3718           }
3719         }
3720 
3721         CurDAG->setNodeMemRefs(Res, FilteredMemRefs);
3722       }
3723 
3724       LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
3725                      << "  Dropping mem operands\n";
3726                  dbgs() << "  " << (IsMorphNodeTo ? "Morphed" : "Created")
3727                         << " node: ";
3728                  Res->dump(CurDAG););
3729 
3730       // If this was a MorphNodeTo then we're completely done!
3731       if (IsMorphNodeTo) {
3732         // Update chain uses.
3733         UpdateChains(Res, InputChain, ChainNodesMatched, true);
3734         return;
3735       }
3736       continue;
3737     }
3738 
3739     case OPC_CompleteMatch: {
3740       // The match has been completed, and any new nodes (if any) have been
3741       // created.  Patch up references to the matched dag to use the newly
3742       // created nodes.
3743       unsigned NumResults = MatcherTable[MatcherIndex++];
3744 
3745       for (unsigned i = 0; i != NumResults; ++i) {
3746         unsigned ResSlot = MatcherTable[MatcherIndex++];
3747         if (ResSlot & 128)
3748           ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
3749 
3750         assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
3751         SDValue Res = RecordedNodes[ResSlot].first;
3752 
3753         assert(i < NodeToMatch->getNumValues() &&
3754                NodeToMatch->getValueType(i) != MVT::Other &&
3755                NodeToMatch->getValueType(i) != MVT::Glue &&
3756                "Invalid number of results to complete!");
3757         assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
3758                 NodeToMatch->getValueType(i) == MVT::iPTR ||
3759                 Res.getValueType() == MVT::iPTR ||
3760                 NodeToMatch->getValueType(i).getSizeInBits() ==
3761                     Res.getValueSizeInBits()) &&
3762                "invalid replacement");
3763         ReplaceUses(SDValue(NodeToMatch, i), Res);
3764       }
3765 
3766       // Update chain uses.
3767       UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false);
3768 
3769       // If the root node defines glue, we need to update it to the glue result.
3770       // TODO: This never happens in our tests and I think it can be removed /
3771       // replaced with an assert, but if we do it this the way the change is
3772       // NFC.
3773       if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
3774               MVT::Glue &&
3775           InputGlue.getNode())
3776         ReplaceUses(SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
3777                     InputGlue);
3778 
3779       assert(NodeToMatch->use_empty() &&
3780              "Didn't replace all uses of the node?");
3781       CurDAG->RemoveDeadNode(NodeToMatch);
3782 
3783       return;
3784     }
3785     }
3786 
3787     // If the code reached this point, then the match failed.  See if there is
3788     // another child to try in the current 'Scope', otherwise pop it until we
3789     // find a case to check.
3790     LLVM_DEBUG(dbgs() << "  Match failed at index " << CurrentOpcodeIndex
3791                       << "\n");
3792     ++NumDAGIselRetries;
3793     while (true) {
3794       if (MatchScopes.empty()) {
3795         CannotYetSelect(NodeToMatch);
3796         return;
3797       }
3798 
3799       // Restore the interpreter state back to the point where the scope was
3800       // formed.
3801       MatchScope &LastScope = MatchScopes.back();
3802       RecordedNodes.resize(LastScope.NumRecordedNodes);
3803       NodeStack.clear();
3804       NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
3805       N = NodeStack.back();
3806 
3807       if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
3808         MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
3809       MatcherIndex = LastScope.FailIndex;
3810 
3811       LLVM_DEBUG(dbgs() << "  Continuing at " << MatcherIndex << "\n");
3812 
3813       InputChain = LastScope.InputChain;
3814       InputGlue = LastScope.InputGlue;
3815       if (!LastScope.HasChainNodesMatched)
3816         ChainNodesMatched.clear();
3817 
3818       // Check to see what the offset is at the new MatcherIndex.  If it is zero
3819       // we have reached the end of this scope, otherwise we have another child
3820       // in the current scope to try.
3821       unsigned NumToSkip = MatcherTable[MatcherIndex++];
3822       if (NumToSkip & 128)
3823         NumToSkip = GetVBR(NumToSkip, MatcherTable, MatcherIndex);
3824 
3825       // If we have another child in this scope to match, update FailIndex and
3826       // try it.
3827       if (NumToSkip != 0) {
3828         LastScope.FailIndex = MatcherIndex+NumToSkip;
3829         break;
3830       }
3831 
3832       // End of this scope, pop it and try the next child in the containing
3833       // scope.
3834       MatchScopes.pop_back();
3835     }
3836   }
3837 }
3838 
3839 /// Return whether the node may raise an FP exception.
3840 bool SelectionDAGISel::mayRaiseFPException(SDNode *N) const {
3841   // For machine opcodes, consult the MCID flag.
3842   if (N->isMachineOpcode()) {
3843     const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
3844     return MCID.mayRaiseFPException();
3845   }
3846 
3847   // For ISD opcodes, only StrictFP opcodes may raise an FP
3848   // exception.
3849   if (N->isTargetOpcode())
3850     return N->isTargetStrictFPOpcode();
3851   return N->isStrictFPOpcode();
3852 }
3853 
3854 bool SelectionDAGISel::isOrEquivalentToAdd(const SDNode *N) const {
3855   assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
3856   auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3857   if (!C)
3858     return false;
3859 
3860   // Detect when "or" is used to add an offset to a stack object.
3861   if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
3862     MachineFrameInfo &MFI = MF->getFrameInfo();
3863     Align A = MFI.getObjectAlign(FN->getIndex());
3864     int32_t Off = C->getSExtValue();
3865     // If the alleged offset fits in the zero bits guaranteed by
3866     // the alignment, then this or is really an add.
3867     return (Off >= 0) && (((A.value() - 1) & Off) == unsigned(Off));
3868   }
3869   return false;
3870 }
3871 
3872 void SelectionDAGISel::CannotYetSelect(SDNode *N) {
3873   std::string msg;
3874   raw_string_ostream Msg(msg);
3875   Msg << "Cannot select: ";
3876 
3877   if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
3878       N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
3879       N->getOpcode() != ISD::INTRINSIC_VOID) {
3880     N->printrFull(Msg, CurDAG);
3881     Msg << "\nIn function: " << MF->getName();
3882   } else {
3883     bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
3884     unsigned iid =
3885       cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
3886     if (iid < Intrinsic::num_intrinsics)
3887       Msg << "intrinsic %" << Intrinsic::getBaseName((Intrinsic::ID)iid);
3888     else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
3889       Msg << "target intrinsic %" << TII->getName(iid);
3890     else
3891       Msg << "unknown intrinsic #" << iid;
3892   }
3893   report_fatal_error(Twine(Msg.str()));
3894 }
3895