xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the FastISel class.
10 //
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time.  For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
18 //
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support.  In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
23 //
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated.  Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time.  Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
31 // in -O0 compiles.
32 //
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators.  More complicated operations currently require
37 // target-specific code.
38 //
39 //===----------------------------------------------------------------------===//
40 
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallString.h"
47 #include "llvm/ADT/SmallVector.h"
48 #include "llvm/ADT/Statistic.h"
49 #include "llvm/Analysis/BranchProbabilityInfo.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/CodeGen/Analysis.h"
52 #include "llvm/CodeGen/FunctionLoweringInfo.h"
53 #include "llvm/CodeGen/ISDOpcodes.h"
54 #include "llvm/CodeGen/MachineBasicBlock.h"
55 #include "llvm/CodeGen/MachineFrameInfo.h"
56 #include "llvm/CodeGen/MachineInstr.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineMemOperand.h"
59 #include "llvm/CodeGen/MachineModuleInfo.h"
60 #include "llvm/CodeGen/MachineOperand.h"
61 #include "llvm/CodeGen/MachineRegisterInfo.h"
62 #include "llvm/CodeGen/StackMaps.h"
63 #include "llvm/CodeGen/TargetInstrInfo.h"
64 #include "llvm/CodeGen/TargetLowering.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/IR/Argument.h"
68 #include "llvm/IR/Attributes.h"
69 #include "llvm/IR/BasicBlock.h"
70 #include "llvm/IR/CallingConv.h"
71 #include "llvm/IR/Constant.h"
72 #include "llvm/IR/Constants.h"
73 #include "llvm/IR/DataLayout.h"
74 #include "llvm/IR/DebugLoc.h"
75 #include "llvm/IR/DerivedTypes.h"
76 #include "llvm/IR/DiagnosticInfo.h"
77 #include "llvm/IR/Function.h"
78 #include "llvm/IR/GetElementPtrTypeIterator.h"
79 #include "llvm/IR/GlobalValue.h"
80 #include "llvm/IR/InlineAsm.h"
81 #include "llvm/IR/InstrTypes.h"
82 #include "llvm/IR/Instruction.h"
83 #include "llvm/IR/Instructions.h"
84 #include "llvm/IR/IntrinsicInst.h"
85 #include "llvm/IR/LLVMContext.h"
86 #include "llvm/IR/Mangler.h"
87 #include "llvm/IR/Metadata.h"
88 #include "llvm/IR/Operator.h"
89 #include "llvm/IR/PatternMatch.h"
90 #include "llvm/IR/Type.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/MC/MCContext.h"
94 #include "llvm/MC/MCInstrDesc.h"
95 #include "llvm/Support/Casting.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/ErrorHandling.h"
98 #include "llvm/Support/MachineValueType.h"
99 #include "llvm/Support/MathExtras.h"
100 #include "llvm/Support/raw_ostream.h"
101 #include "llvm/Target/TargetMachine.h"
102 #include "llvm/Target/TargetOptions.h"
103 #include <algorithm>
104 #include <cassert>
105 #include <cstdint>
106 #include <iterator>
107 #include <optional>
108 #include <utility>
109 
110 using namespace llvm;
111 using namespace PatternMatch;
112 
113 #define DEBUG_TYPE "isel"
114 
115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116                                          "target-independent selector");
117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118                                     "target-specific selector");
119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120 
121 /// Set the current block to which generated machine instructions will be
122 /// appended.
123 void FastISel::startNewBlock() {
124   assert(LocalValueMap.empty() &&
125          "local values should be cleared after finishing a BB");
126 
127   // Instructions are appended to FuncInfo.MBB. If the basic block already
128   // contains labels or copies, use the last instruction as the last local
129   // value.
130   EmitStartPt = nullptr;
131   if (!FuncInfo.MBB->empty())
132     EmitStartPt = &FuncInfo.MBB->back();
133   LastLocalValue = EmitStartPt;
134 }
135 
136 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137 
138 bool FastISel::lowerArguments() {
139   if (!FuncInfo.CanLowerReturn)
140     // Fallback to SDISel argument lowering code to deal with sret pointer
141     // parameter.
142     return false;
143 
144   if (!fastLowerArguments())
145     return false;
146 
147   // Enter arguments into ValueMap for uses in non-entry BBs.
148   for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
149                                     E = FuncInfo.Fn->arg_end();
150        I != E; ++I) {
151     DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
152     assert(VI != LocalValueMap.end() && "Missed an argument?");
153     FuncInfo.ValueMap[&*I] = VI->second;
154   }
155   return true;
156 }
157 
158 /// Return the defined register if this instruction defines exactly one
159 /// virtual register and uses no other virtual registers. Otherwise return 0.
160 static Register findLocalRegDef(MachineInstr &MI) {
161   Register RegDef;
162   for (const MachineOperand &MO : MI.operands()) {
163     if (!MO.isReg())
164       continue;
165     if (MO.isDef()) {
166       if (RegDef)
167         return Register();
168       RegDef = MO.getReg();
169     } else if (MO.getReg().isVirtual()) {
170       // This is another use of a vreg. Don't delete it.
171       return Register();
172     }
173   }
174   return RegDef;
175 }
176 
177 static bool isRegUsedByPhiNodes(Register DefReg,
178                                 FunctionLoweringInfo &FuncInfo) {
179   for (auto &P : FuncInfo.PHINodesToUpdate)
180     if (P.second == DefReg)
181       return true;
182   return false;
183 }
184 
185 void FastISel::flushLocalValueMap() {
186   // If FastISel bails out, it could leave local value instructions behind
187   // that aren't used for anything.  Detect and erase those.
188   if (LastLocalValue != EmitStartPt) {
189     // Save the first instruction after local values, for later.
190     MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
191     ++FirstNonValue;
192 
193     MachineBasicBlock::reverse_iterator RE =
194         EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
195                     : FuncInfo.MBB->rend();
196     MachineBasicBlock::reverse_iterator RI(LastLocalValue);
197     for (MachineInstr &LocalMI :
198          llvm::make_early_inc_range(llvm::make_range(RI, RE))) {
199       Register DefReg = findLocalRegDef(LocalMI);
200       if (!DefReg)
201         continue;
202       if (FuncInfo.RegsWithFixups.count(DefReg))
203         continue;
204       bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
205       if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
206         if (EmitStartPt == &LocalMI)
207           EmitStartPt = EmitStartPt->getPrevNode();
208         LLVM_DEBUG(dbgs() << "removing dead local value materialization"
209                           << LocalMI);
210         LocalMI.eraseFromParent();
211       }
212     }
213 
214     if (FirstNonValue != FuncInfo.MBB->end()) {
215       // See if there are any local value instructions left.  If so, we want to
216       // make sure the first one has a debug location; if it doesn't, use the
217       // first non-value instruction's debug location.
218 
219       // If EmitStartPt is non-null, this block had copies at the top before
220       // FastISel started doing anything; it points to the last one, so the
221       // first local value instruction is the one after EmitStartPt.
222       // If EmitStartPt is null, the first local value instruction is at the
223       // top of the block.
224       MachineBasicBlock::iterator FirstLocalValue =
225           EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
226                       : FuncInfo.MBB->begin();
227       if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228         FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
229     }
230   }
231 
232   LocalValueMap.clear();
233   LastLocalValue = EmitStartPt;
234   recomputeInsertPt();
235   SavedInsertPt = FuncInfo.InsertPt;
236 }
237 
238 Register FastISel::getRegForValue(const Value *V) {
239   EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
240   // Don't handle non-simple values in FastISel.
241   if (!RealVT.isSimple())
242     return Register();
243 
244   // Ignore illegal types. We must do this before looking up the value
245   // in ValueMap because Arguments are given virtual registers regardless
246   // of whether FastISel can handle them.
247   MVT VT = RealVT.getSimpleVT();
248   if (!TLI.isTypeLegal(VT)) {
249     // Handle integer promotions, though, because they're common and easy.
250     if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
251       VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
252     else
253       return Register();
254   }
255 
256   // Look up the value to see if we already have a register for it.
257   Register Reg = lookUpRegForValue(V);
258   if (Reg)
259     return Reg;
260 
261   // In bottom-up mode, just create the virtual register which will be used
262   // to hold the value. It will be materialized later.
263   if (isa<Instruction>(V) &&
264       (!isa<AllocaInst>(V) ||
265        !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
266     return FuncInfo.InitializeRegForValue(V);
267 
268   SavePoint SaveInsertPt = enterLocalValueArea();
269 
270   // Materialize the value in a register. Emit any instructions in the
271   // local value area.
272   Reg = materializeRegForValue(V, VT);
273 
274   leaveLocalValueArea(SaveInsertPt);
275 
276   return Reg;
277 }
278 
279 Register FastISel::materializeConstant(const Value *V, MVT VT) {
280   Register Reg;
281   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
282     if (CI->getValue().getActiveBits() <= 64)
283       Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
284   } else if (isa<AllocaInst>(V))
285     Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286   else if (isa<ConstantPointerNull>(V))
287     // Translate this as an integer zero so that it can be
288     // local-CSE'd with actual integer zeros.
289     Reg =
290         getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
291   else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
292     if (CF->isNullValue())
293       Reg = fastMaterializeFloatZero(CF);
294     else
295       // Try to emit the constant directly.
296       Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
297 
298     if (!Reg) {
299       // Try to emit the constant by using an integer constant with a cast.
300       const APFloat &Flt = CF->getValueAPF();
301       EVT IntVT = TLI.getPointerTy(DL);
302       uint32_t IntBitWidth = IntVT.getSizeInBits();
303       APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
304       bool isExact;
305       (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
306       if (isExact) {
307         Register IntegerReg =
308             getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
309         if (IntegerReg)
310           Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
311                            IntegerReg);
312       }
313     }
314   } else if (const auto *Op = dyn_cast<Operator>(V)) {
315     if (!selectOperator(Op, Op->getOpcode()))
316       if (!isa<Instruction>(Op) ||
317           !fastSelectInstruction(cast<Instruction>(Op)))
318         return 0;
319     Reg = lookUpRegForValue(Op);
320   } else if (isa<UndefValue>(V)) {
321     Reg = createResultReg(TLI.getRegClassFor(VT));
322     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
323             TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
324   }
325   return Reg;
326 }
327 
328 /// Helper for getRegForValue. This function is called when the value isn't
329 /// already available in a register and must be materialized with new
330 /// instructions.
331 Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
332   Register Reg;
333   // Give the target-specific code a try first.
334   if (isa<Constant>(V))
335     Reg = fastMaterializeConstant(cast<Constant>(V));
336 
337   // If target-specific code couldn't or didn't want to handle the value, then
338   // give target-independent code a try.
339   if (!Reg)
340     Reg = materializeConstant(V, VT);
341 
342   // Don't cache constant materializations in the general ValueMap.
343   // To do so would require tracking what uses they dominate.
344   if (Reg) {
345     LocalValueMap[V] = Reg;
346     LastLocalValue = MRI.getVRegDef(Reg);
347   }
348   return Reg;
349 }
350 
351 Register FastISel::lookUpRegForValue(const Value *V) {
352   // Look up the value to see if we already have a register for it. We
353   // cache values defined by Instructions across blocks, and other values
354   // only locally. This is because Instructions already have the SSA
355   // def-dominates-use requirement enforced.
356   DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
357   if (I != FuncInfo.ValueMap.end())
358     return I->second;
359   return LocalValueMap[V];
360 }
361 
362 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
363   if (!isa<Instruction>(I)) {
364     LocalValueMap[I] = Reg;
365     return;
366   }
367 
368   Register &AssignedReg = FuncInfo.ValueMap[I];
369   if (!AssignedReg)
370     // Use the new register.
371     AssignedReg = Reg;
372   else if (Reg != AssignedReg) {
373     // Arrange for uses of AssignedReg to be replaced by uses of Reg.
374     for (unsigned i = 0; i < NumRegs; i++) {
375       FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
376       FuncInfo.RegsWithFixups.insert(Reg + i);
377     }
378 
379     AssignedReg = Reg;
380   }
381 }
382 
383 Register FastISel::getRegForGEPIndex(const Value *Idx) {
384   Register IdxN = getRegForValue(Idx);
385   if (!IdxN)
386     // Unhandled operand. Halt "fast" selection and bail.
387     return Register();
388 
389   // If the index is smaller or larger than intptr_t, truncate or extend it.
390   MVT PtrVT = TLI.getPointerTy(DL);
391   EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
392   if (IdxVT.bitsLT(PtrVT)) {
393     IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
394   } else if (IdxVT.bitsGT(PtrVT)) {
395     IdxN =
396         fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
397   }
398   return IdxN;
399 }
400 
401 void FastISel::recomputeInsertPt() {
402   if (getLastLocalValue()) {
403     FuncInfo.InsertPt = getLastLocalValue();
404     FuncInfo.MBB = FuncInfo.InsertPt->getParent();
405     ++FuncInfo.InsertPt;
406   } else
407     FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
408 }
409 
410 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
411                               MachineBasicBlock::iterator E) {
412   assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
413          "Invalid iterator!");
414   while (I != E) {
415     if (SavedInsertPt == I)
416       SavedInsertPt = E;
417     if (EmitStartPt == I)
418       EmitStartPt = E.isValid() ? &*E : nullptr;
419     if (LastLocalValue == I)
420       LastLocalValue = E.isValid() ? &*E : nullptr;
421 
422     MachineInstr *Dead = &*I;
423     ++I;
424     Dead->eraseFromParent();
425     ++NumFastIselDead;
426   }
427   recomputeInsertPt();
428 }
429 
430 FastISel::SavePoint FastISel::enterLocalValueArea() {
431   SavePoint OldInsertPt = FuncInfo.InsertPt;
432   recomputeInsertPt();
433   return OldInsertPt;
434 }
435 
436 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
437   if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
438     LastLocalValue = &*std::prev(FuncInfo.InsertPt);
439 
440   // Restore the previous insert position.
441   FuncInfo.InsertPt = OldInsertPt;
442 }
443 
444 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
445   EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
446   if (VT == MVT::Other || !VT.isSimple())
447     // Unhandled type. Halt "fast" selection and bail.
448     return false;
449 
450   // We only handle legal types. For example, on x86-32 the instruction
451   // selector contains all of the 64-bit instructions from x86-64,
452   // under the assumption that i64 won't be used if the target doesn't
453   // support it.
454   if (!TLI.isTypeLegal(VT)) {
455     // MVT::i1 is special. Allow AND, OR, or XOR because they
456     // don't require additional zeroing, which makes them easy.
457     if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
458                           ISDOpcode == ISD::XOR))
459       VT = TLI.getTypeToTransformTo(I->getContext(), VT);
460     else
461       return false;
462   }
463 
464   // Check if the first operand is a constant, and handle it as "ri".  At -O0,
465   // we don't have anything that canonicalizes operand order.
466   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
467     if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
468       Register Op1 = getRegForValue(I->getOperand(1));
469       if (!Op1)
470         return false;
471 
472       Register ResultReg =
473           fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
474                        VT.getSimpleVT());
475       if (!ResultReg)
476         return false;
477 
478       // We successfully emitted code for the given LLVM Instruction.
479       updateValueMap(I, ResultReg);
480       return true;
481     }
482 
483   Register Op0 = getRegForValue(I->getOperand(0));
484   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
485     return false;
486 
487   // Check if the second operand is a constant and handle it appropriately.
488   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
489     uint64_t Imm = CI->getSExtValue();
490 
491     // Transform "sdiv exact X, 8" -> "sra X, 3".
492     if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
493         cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
494       Imm = Log2_64(Imm);
495       ISDOpcode = ISD::SRA;
496     }
497 
498     // Transform "urem x, pow2" -> "and x, pow2-1".
499     if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
500         isPowerOf2_64(Imm)) {
501       --Imm;
502       ISDOpcode = ISD::AND;
503     }
504 
505     Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
506                                       VT.getSimpleVT());
507     if (!ResultReg)
508       return false;
509 
510     // We successfully emitted code for the given LLVM Instruction.
511     updateValueMap(I, ResultReg);
512     return true;
513   }
514 
515   Register Op1 = getRegForValue(I->getOperand(1));
516   if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
517     return false;
518 
519   // Now we have both operands in registers. Emit the instruction.
520   Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
521                                    ISDOpcode, Op0, Op1);
522   if (!ResultReg)
523     // Target-specific code wasn't able to find a machine opcode for
524     // the given ISD opcode and type. Halt "fast" selection and bail.
525     return false;
526 
527   // We successfully emitted code for the given LLVM Instruction.
528   updateValueMap(I, ResultReg);
529   return true;
530 }
531 
532 bool FastISel::selectGetElementPtr(const User *I) {
533   Register N = getRegForValue(I->getOperand(0));
534   if (!N) // Unhandled operand. Halt "fast" selection and bail.
535     return false;
536 
537   // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
538   // and bail.
539   if (isa<VectorType>(I->getType()))
540     return false;
541 
542   // Keep a running tab of the total offset to coalesce multiple N = N + Offset
543   // into a single N = N + TotalOffset.
544   uint64_t TotalOffs = 0;
545   // FIXME: What's a good SWAG number for MaxOffs?
546   uint64_t MaxOffs = 2048;
547   MVT VT = TLI.getPointerTy(DL);
548   for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
549        GTI != E; ++GTI) {
550     const Value *Idx = GTI.getOperand();
551     if (StructType *StTy = GTI.getStructTypeOrNull()) {
552       uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
553       if (Field) {
554         // N = N + Offset
555         TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
556         if (TotalOffs >= MaxOffs) {
557           N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
558           if (!N) // Unhandled operand. Halt "fast" selection and bail.
559             return false;
560           TotalOffs = 0;
561         }
562       }
563     } else {
564       Type *Ty = GTI.getIndexedType();
565 
566       // If this is a constant subscript, handle it quickly.
567       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
568         if (CI->isZero())
569           continue;
570         // N = N + Offset
571         uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
572         TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
573         if (TotalOffs >= MaxOffs) {
574           N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
575           if (!N) // Unhandled operand. Halt "fast" selection and bail.
576             return false;
577           TotalOffs = 0;
578         }
579         continue;
580       }
581       if (TotalOffs) {
582         N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
583         if (!N) // Unhandled operand. Halt "fast" selection and bail.
584           return false;
585         TotalOffs = 0;
586       }
587 
588       // N = N + Idx * ElementSize;
589       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
590       Register IdxN = getRegForGEPIndex(Idx);
591       if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
592         return false;
593 
594       if (ElementSize != 1) {
595         IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
596         if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
597           return false;
598       }
599       N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
600       if (!N) // Unhandled operand. Halt "fast" selection and bail.
601         return false;
602     }
603   }
604   if (TotalOffs) {
605     N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
606     if (!N) // Unhandled operand. Halt "fast" selection and bail.
607       return false;
608   }
609 
610   // We successfully emitted code for the given LLVM Instruction.
611   updateValueMap(I, N);
612   return true;
613 }
614 
615 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
616                                    const CallInst *CI, unsigned StartIdx) {
617   for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
618     Value *Val = CI->getArgOperand(i);
619     // Check for constants and encode them with a StackMaps::ConstantOp prefix.
620     if (const auto *C = dyn_cast<ConstantInt>(Val)) {
621       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
622       Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
623     } else if (isa<ConstantPointerNull>(Val)) {
624       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
625       Ops.push_back(MachineOperand::CreateImm(0));
626     } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
627       // Values coming from a stack location also require a special encoding,
628       // but that is added later on by the target specific frame index
629       // elimination implementation.
630       auto SI = FuncInfo.StaticAllocaMap.find(AI);
631       if (SI != FuncInfo.StaticAllocaMap.end())
632         Ops.push_back(MachineOperand::CreateFI(SI->second));
633       else
634         return false;
635     } else {
636       Register Reg = getRegForValue(Val);
637       if (!Reg)
638         return false;
639       Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
640     }
641   }
642   return true;
643 }
644 
645 bool FastISel::selectStackmap(const CallInst *I) {
646   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
647   //                                  [live variables...])
648   assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
649          "Stackmap cannot return a value.");
650 
651   // The stackmap intrinsic only records the live variables (the arguments
652   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
653   // intrinsic, this won't be lowered to a function call. This means we don't
654   // have to worry about calling conventions and target-specific lowering code.
655   // Instead we perform the call lowering right here.
656   //
657   // CALLSEQ_START(0, 0...)
658   // STACKMAP(id, nbytes, ...)
659   // CALLSEQ_END(0, 0)
660   //
661   SmallVector<MachineOperand, 32> Ops;
662 
663   // Add the <id> and <numBytes> constants.
664   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
665          "Expected a constant integer.");
666   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
667   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
668 
669   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
670          "Expected a constant integer.");
671   const auto *NumBytes =
672       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
673   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
674 
675   // Push live variables for the stack map (skipping the first two arguments
676   // <id> and <numBytes>).
677   if (!addStackMapLiveVars(Ops, I, 2))
678     return false;
679 
680   // We are not adding any register mask info here, because the stackmap doesn't
681   // clobber anything.
682 
683   // Add scratch registers as implicit def and early clobber.
684   CallingConv::ID CC = I->getCallingConv();
685   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
686   for (unsigned i = 0; ScratchRegs[i]; ++i)
687     Ops.push_back(MachineOperand::CreateReg(
688         ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
689         /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
690 
691   // Issue CALLSEQ_START
692   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
693   auto Builder =
694       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
695   const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
696   for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
697     Builder.addImm(0);
698 
699   // Issue STACKMAP.
700   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
701                                     TII.get(TargetOpcode::STACKMAP));
702   for (auto const &MO : Ops)
703     MIB.add(MO);
704 
705   // Issue CALLSEQ_END
706   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
707   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
708       .addImm(0)
709       .addImm(0);
710 
711   // Inform the Frame Information that we have a stackmap in this function.
712   FuncInfo.MF->getFrameInfo().setHasStackMap();
713 
714   return true;
715 }
716 
717 /// Lower an argument list according to the target calling convention.
718 ///
719 /// This is a helper for lowering intrinsics that follow a target calling
720 /// convention or require stack pointer adjustment. Only a subset of the
721 /// intrinsic's operands need to participate in the calling convention.
722 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
723                                  unsigned NumArgs, const Value *Callee,
724                                  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
725   ArgListTy Args;
726   Args.reserve(NumArgs);
727 
728   // Populate the argument list.
729   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
730     Value *V = CI->getOperand(ArgI);
731 
732     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
733 
734     ArgListEntry Entry;
735     Entry.Val = V;
736     Entry.Ty = V->getType();
737     Entry.setAttributes(CI, ArgI);
738     Args.push_back(Entry);
739   }
740 
741   Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
742                                : CI->getType();
743   CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
744 
745   return lowerCallTo(CLI);
746 }
747 
748 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
749     const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
750     StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
751   SmallString<32> MangledName;
752   Mangler::getNameWithPrefix(MangledName, Target, DL);
753   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
754   return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
755 }
756 
757 bool FastISel::selectPatchpoint(const CallInst *I) {
758   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
759   //                                                 i32 <numBytes>,
760   //                                                 i8* <target>,
761   //                                                 i32 <numArgs>,
762   //                                                 [Args...],
763   //                                                 [live variables...])
764   CallingConv::ID CC = I->getCallingConv();
765   bool IsAnyRegCC = CC == CallingConv::AnyReg;
766   bool HasDef = !I->getType()->isVoidTy();
767   Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
768 
769   // Get the real number of arguments participating in the call <numArgs>
770   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
771          "Expected a constant integer.");
772   const auto *NumArgsVal =
773       cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
774   unsigned NumArgs = NumArgsVal->getZExtValue();
775 
776   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
777   // This includes all meta-operands up to but not including CC.
778   unsigned NumMetaOpers = PatchPointOpers::CCPos;
779   assert(I->arg_size() >= NumMetaOpers + NumArgs &&
780          "Not enough arguments provided to the patchpoint intrinsic");
781 
782   // For AnyRegCC the arguments are lowered later on manually.
783   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
784   CallLoweringInfo CLI;
785   CLI.setIsPatchPoint();
786   if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
787     return false;
788 
789   assert(CLI.Call && "No call instruction specified.");
790 
791   SmallVector<MachineOperand, 32> Ops;
792 
793   // Add an explicit result reg if we use the anyreg calling convention.
794   if (IsAnyRegCC && HasDef) {
795     assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
796     CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
797     CLI.NumResultRegs = 1;
798     Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
799   }
800 
801   // Add the <id> and <numBytes> constants.
802   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
803          "Expected a constant integer.");
804   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
805   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
806 
807   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
808          "Expected a constant integer.");
809   const auto *NumBytes =
810       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
811   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
812 
813   // Add the call target.
814   if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
815     uint64_t CalleeConstAddr =
816       cast<ConstantInt>(C->getOperand(0))->getZExtValue();
817     Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
818   } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
819     if (C->getOpcode() == Instruction::IntToPtr) {
820       uint64_t CalleeConstAddr =
821         cast<ConstantInt>(C->getOperand(0))->getZExtValue();
822       Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
823     } else
824       llvm_unreachable("Unsupported ConstantExpr.");
825   } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
826     Ops.push_back(MachineOperand::CreateGA(GV, 0));
827   } else if (isa<ConstantPointerNull>(Callee))
828     Ops.push_back(MachineOperand::CreateImm(0));
829   else
830     llvm_unreachable("Unsupported callee address.");
831 
832   // Adjust <numArgs> to account for any arguments that have been passed on
833   // the stack instead.
834   unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
835   Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
836 
837   // Add the calling convention
838   Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
839 
840   // Add the arguments we omitted previously. The register allocator should
841   // place these in any free register.
842   if (IsAnyRegCC) {
843     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
844       Register Reg = getRegForValue(I->getArgOperand(i));
845       if (!Reg)
846         return false;
847       Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
848     }
849   }
850 
851   // Push the arguments from the call instruction.
852   for (auto Reg : CLI.OutRegs)
853     Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
854 
855   // Push live variables for the stack map.
856   if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
857     return false;
858 
859   // Push the register mask info.
860   Ops.push_back(MachineOperand::CreateRegMask(
861       TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
862 
863   // Add scratch registers as implicit def and early clobber.
864   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
865   for (unsigned i = 0; ScratchRegs[i]; ++i)
866     Ops.push_back(MachineOperand::CreateReg(
867         ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
868         /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
869 
870   // Add implicit defs (return values).
871   for (auto Reg : CLI.InRegs)
872     Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
873                                             /*isImp=*/true));
874 
875   // Insert the patchpoint instruction before the call generated by the target.
876   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD,
877                                     TII.get(TargetOpcode::PATCHPOINT));
878 
879   for (auto &MO : Ops)
880     MIB.add(MO);
881 
882   MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
883 
884   // Delete the original call instruction.
885   CLI.Call->eraseFromParent();
886 
887   // Inform the Frame Information that we have a patchpoint in this function.
888   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
889 
890   if (CLI.NumResultRegs)
891     updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
892   return true;
893 }
894 
895 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
896   const auto &Triple = TM.getTargetTriple();
897   if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
898     return true; // don't do anything to this instruction.
899   SmallVector<MachineOperand, 8> Ops;
900   Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
901                                           /*isDef=*/false));
902   Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
903                                           /*isDef=*/false));
904   MachineInstrBuilder MIB =
905       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
906               TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
907   for (auto &MO : Ops)
908     MIB.add(MO);
909 
910   // Insert the Patchable Event Call instruction, that gets lowered properly.
911   return true;
912 }
913 
914 bool FastISel::selectXRayTypedEvent(const CallInst *I) {
915   const auto &Triple = TM.getTargetTriple();
916   if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
917     return true; // don't do anything to this instruction.
918   SmallVector<MachineOperand, 8> Ops;
919   Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
920                                           /*isDef=*/false));
921   Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
922                                           /*isDef=*/false));
923   Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
924                                           /*isDef=*/false));
925   MachineInstrBuilder MIB =
926       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
927               TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
928   for (auto &MO : Ops)
929     MIB.add(MO);
930 
931   // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
932   return true;
933 }
934 
935 /// Returns an AttributeList representing the attributes applied to the return
936 /// value of the given call.
937 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
938   SmallVector<Attribute::AttrKind, 2> Attrs;
939   if (CLI.RetSExt)
940     Attrs.push_back(Attribute::SExt);
941   if (CLI.RetZExt)
942     Attrs.push_back(Attribute::ZExt);
943   if (CLI.IsInReg)
944     Attrs.push_back(Attribute::InReg);
945 
946   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
947                             Attrs);
948 }
949 
950 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
951                            unsigned NumArgs) {
952   MCContext &Ctx = MF->getContext();
953   SmallString<32> MangledName;
954   Mangler::getNameWithPrefix(MangledName, SymName, DL);
955   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
956   return lowerCallTo(CI, Sym, NumArgs);
957 }
958 
959 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
960                            unsigned NumArgs) {
961   FunctionType *FTy = CI->getFunctionType();
962   Type *RetTy = CI->getType();
963 
964   ArgListTy Args;
965   Args.reserve(NumArgs);
966 
967   // Populate the argument list.
968   // Attributes for args start at offset 1, after the return attribute.
969   for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
970     Value *V = CI->getOperand(ArgI);
971 
972     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
973 
974     ArgListEntry Entry;
975     Entry.Val = V;
976     Entry.Ty = V->getType();
977     Entry.setAttributes(CI, ArgI);
978     Args.push_back(Entry);
979   }
980   TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
981 
982   CallLoweringInfo CLI;
983   CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
984 
985   return lowerCallTo(CLI);
986 }
987 
988 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
989   // Handle the incoming return values from the call.
990   CLI.clearIns();
991   SmallVector<EVT, 4> RetTys;
992   ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
993 
994   SmallVector<ISD::OutputArg, 4> Outs;
995   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
996 
997   bool CanLowerReturn = TLI.CanLowerReturn(
998       CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
999 
1000   // FIXME: sret demotion isn't supported yet - bail out.
1001   if (!CanLowerReturn)
1002     return false;
1003 
1004   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1005     EVT VT = RetTys[I];
1006     MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1007     unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1008     for (unsigned i = 0; i != NumRegs; ++i) {
1009       ISD::InputArg MyFlags;
1010       MyFlags.VT = RegisterVT;
1011       MyFlags.ArgVT = VT;
1012       MyFlags.Used = CLI.IsReturnValueUsed;
1013       if (CLI.RetSExt)
1014         MyFlags.Flags.setSExt();
1015       if (CLI.RetZExt)
1016         MyFlags.Flags.setZExt();
1017       if (CLI.IsInReg)
1018         MyFlags.Flags.setInReg();
1019       CLI.Ins.push_back(MyFlags);
1020     }
1021   }
1022 
1023   // Handle all of the outgoing arguments.
1024   CLI.clearOuts();
1025   for (auto &Arg : CLI.getArgs()) {
1026     Type *FinalType = Arg.Ty;
1027     if (Arg.IsByVal)
1028       FinalType = Arg.IndirectType;
1029     bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1030         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1031 
1032     ISD::ArgFlagsTy Flags;
1033     if (Arg.IsZExt)
1034       Flags.setZExt();
1035     if (Arg.IsSExt)
1036       Flags.setSExt();
1037     if (Arg.IsInReg)
1038       Flags.setInReg();
1039     if (Arg.IsSRet)
1040       Flags.setSRet();
1041     if (Arg.IsSwiftSelf)
1042       Flags.setSwiftSelf();
1043     if (Arg.IsSwiftAsync)
1044       Flags.setSwiftAsync();
1045     if (Arg.IsSwiftError)
1046       Flags.setSwiftError();
1047     if (Arg.IsCFGuardTarget)
1048       Flags.setCFGuardTarget();
1049     if (Arg.IsByVal)
1050       Flags.setByVal();
1051     if (Arg.IsInAlloca) {
1052       Flags.setInAlloca();
1053       // Set the byval flag for CCAssignFn callbacks that don't know about
1054       // inalloca. This way we can know how many bytes we should've allocated
1055       // and how many bytes a callee cleanup function will pop.  If we port
1056       // inalloca to more targets, we'll have to add custom inalloca handling in
1057       // the various CC lowering callbacks.
1058       Flags.setByVal();
1059     }
1060     if (Arg.IsPreallocated) {
1061       Flags.setPreallocated();
1062       // Set the byval flag for CCAssignFn callbacks that don't know about
1063       // preallocated. This way we can know how many bytes we should've
1064       // allocated and how many bytes a callee cleanup function will pop.  If we
1065       // port preallocated to more targets, we'll have to add custom
1066       // preallocated handling in the various CC lowering callbacks.
1067       Flags.setByVal();
1068     }
1069     MaybeAlign MemAlign = Arg.Alignment;
1070     if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1071       unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1072 
1073       // For ByVal, alignment should come from FE. BE will guess if this info
1074       // is not there, but there are cases it cannot get right.
1075       if (!MemAlign)
1076         MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1077       Flags.setByValSize(FrameSize);
1078     } else if (!MemAlign) {
1079       MemAlign = DL.getABITypeAlign(Arg.Ty);
1080     }
1081     Flags.setMemAlign(*MemAlign);
1082     if (Arg.IsNest)
1083       Flags.setNest();
1084     if (NeedsRegBlock)
1085       Flags.setInConsecutiveRegs();
1086     Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1087     CLI.OutVals.push_back(Arg.Val);
1088     CLI.OutFlags.push_back(Flags);
1089   }
1090 
1091   if (!fastLowerCall(CLI))
1092     return false;
1093 
1094   // Set all unused physreg defs as dead.
1095   assert(CLI.Call && "No call instruction specified.");
1096   CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1097 
1098   if (CLI.NumResultRegs && CLI.CB)
1099     updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1100 
1101   // Set labels for heapallocsite call.
1102   if (CLI.CB)
1103     if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1104       CLI.Call->setHeapAllocMarker(*MF, MD);
1105 
1106   return true;
1107 }
1108 
1109 bool FastISel::lowerCall(const CallInst *CI) {
1110   FunctionType *FuncTy = CI->getFunctionType();
1111   Type *RetTy = CI->getType();
1112 
1113   ArgListTy Args;
1114   ArgListEntry Entry;
1115   Args.reserve(CI->arg_size());
1116 
1117   for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1118     Value *V = *i;
1119 
1120     // Skip empty types
1121     if (V->getType()->isEmptyTy())
1122       continue;
1123 
1124     Entry.Val = V;
1125     Entry.Ty = V->getType();
1126 
1127     // Skip the first return-type Attribute to get to params.
1128     Entry.setAttributes(CI, i - CI->arg_begin());
1129     Args.push_back(Entry);
1130   }
1131 
1132   // Check if target-independent constraints permit a tail call here.
1133   // Target-dependent constraints are checked within fastLowerCall.
1134   bool IsTailCall = CI->isTailCall();
1135   if (IsTailCall && !isInTailCallPosition(*CI, TM))
1136     IsTailCall = false;
1137   if (IsTailCall && !CI->isMustTailCall() &&
1138       MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1139     IsTailCall = false;
1140 
1141   CallLoweringInfo CLI;
1142   CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1143       .setTailCall(IsTailCall);
1144 
1145   diagnoseDontCall(*CI);
1146 
1147   return lowerCallTo(CLI);
1148 }
1149 
1150 bool FastISel::selectCall(const User *I) {
1151   const CallInst *Call = cast<CallInst>(I);
1152 
1153   // Handle simple inline asms.
1154   if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1155     // Don't attempt to handle constraints.
1156     if (!IA->getConstraintString().empty())
1157       return false;
1158 
1159     unsigned ExtraInfo = 0;
1160     if (IA->hasSideEffects())
1161       ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1162     if (IA->isAlignStack())
1163       ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1164     if (Call->isConvergent())
1165       ExtraInfo |= InlineAsm::Extra_IsConvergent;
1166     ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1167 
1168     MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1169                                       TII.get(TargetOpcode::INLINEASM));
1170     MIB.addExternalSymbol(IA->getAsmString().c_str());
1171     MIB.addImm(ExtraInfo);
1172 
1173     const MDNode *SrcLoc = Call->getMetadata("srcloc");
1174     if (SrcLoc)
1175       MIB.addMetadata(SrcLoc);
1176 
1177     return true;
1178   }
1179 
1180   // Handle intrinsic function calls.
1181   if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1182     return selectIntrinsicCall(II);
1183 
1184   return lowerCall(Call);
1185 }
1186 
1187 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1188   switch (II->getIntrinsicID()) {
1189   default:
1190     break;
1191   // At -O0 we don't care about the lifetime intrinsics.
1192   case Intrinsic::lifetime_start:
1193   case Intrinsic::lifetime_end:
1194   // The donothing intrinsic does, well, nothing.
1195   case Intrinsic::donothing:
1196   // Neither does the sideeffect intrinsic.
1197   case Intrinsic::sideeffect:
1198   // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1199   case Intrinsic::assume:
1200   // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1201   case Intrinsic::experimental_noalias_scope_decl:
1202     return true;
1203   case Intrinsic::dbg_declare: {
1204     const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1205     assert(DI->getVariable() && "Missing variable");
1206     if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1207       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1208                         << " (!hasDebugInfo)\n");
1209       return true;
1210     }
1211 
1212     const Value *Address = DI->getAddress();
1213     if (!Address || isa<UndefValue>(Address)) {
1214       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1215                         << " (bad/undef address)\n");
1216       return true;
1217     }
1218 
1219     // Byval arguments with frame indices were already handled after argument
1220     // lowering and before isel.
1221     const auto *Arg =
1222         dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1223     if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1224       return true;
1225 
1226     std::optional<MachineOperand> Op;
1227     if (Register Reg = lookUpRegForValue(Address))
1228       Op = MachineOperand::CreateReg(Reg, false);
1229 
1230     // If we have a VLA that has a "use" in a metadata node that's then used
1231     // here but it has no other uses, then we have a problem. E.g.,
1232     //
1233     //   int foo (const int *x) {
1234     //     char a[*x];
1235     //     return 0;
1236     //   }
1237     //
1238     // If we assign 'a' a vreg and fast isel later on has to use the selection
1239     // DAG isel, it will want to copy the value to the vreg. However, there are
1240     // no uses, which goes counter to what selection DAG isel expects.
1241     if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1242         (!isa<AllocaInst>(Address) ||
1243          !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1244       Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1245                                      false);
1246 
1247     if (Op) {
1248       assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) &&
1249              "Expected inlined-at fields to agree");
1250       if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1251         // If using instruction referencing, produce this as a DBG_INSTR_REF,
1252         // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1253         // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1254         SmallVector<uint64_t, 3> Ops(
1255             {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1256         auto *NewExpr = DIExpression::prependOpcodes(DI->getExpression(), Ops);
1257         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(),
1258                 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1259                 DI->getVariable(), NewExpr);
1260       } else {
1261         // A dbg.declare describes the address of a source variable, so lower it
1262         // into an indirect DBG_VALUE.
1263         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(),
1264                 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op,
1265                 DI->getVariable(), DI->getExpression());
1266       }
1267     } else {
1268       // We can't yet handle anything else here because it would require
1269       // generating code, thus altering codegen because of debug info.
1270       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1271                         << " (no materialized reg for address)\n");
1272     }
1273     return true;
1274   }
1275   case Intrinsic::dbg_value: {
1276     // This form of DBG_VALUE is target-independent.
1277     const DbgValueInst *DI = cast<DbgValueInst>(II);
1278     const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1279     const Value *V = DI->getValue();
1280     assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) &&
1281            "Expected inlined-at fields to agree");
1282     if (!V || isa<UndefValue>(V) || DI->hasArgList()) {
1283       // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1284       // undef DBG_VALUE to terminate any prior location.
1285       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, false, 0U,
1286               DI->getVariable(), DI->getExpression());
1287     } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1288       // See if there's an expression to constant-fold.
1289       DIExpression *Expr = DI->getExpression();
1290       if (Expr)
1291         std::tie(Expr, CI) = Expr->constantFold(CI);
1292       if (CI->getBitWidth() > 64)
1293         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1294             .addCImm(CI)
1295             .addImm(0U)
1296             .addMetadata(DI->getVariable())
1297             .addMetadata(Expr);
1298       else
1299         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1300             .addImm(CI->getZExtValue())
1301             .addImm(0U)
1302             .addMetadata(DI->getVariable())
1303             .addMetadata(Expr);
1304     } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1305       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1306           .addFPImm(CF)
1307           .addImm(0U)
1308           .addMetadata(DI->getVariable())
1309           .addMetadata(DI->getExpression());
1310     } else if (Register Reg = lookUpRegForValue(V)) {
1311       // FIXME: This does not handle register-indirect values at offset 0.
1312       if (!FuncInfo.MF->useDebugInstrRef()) {
1313         bool IsIndirect = false;
1314         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(), II, IsIndirect,
1315                 Reg, DI->getVariable(), DI->getExpression());
1316       } else {
1317         // If using instruction referencing, produce this as a DBG_INSTR_REF,
1318         // to be later patched up by finalizeDebugInstrRefs.
1319         SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
1320             /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1321             /* isKill */ false, /* isDead */ false,
1322             /* isUndef */ false, /* isEarlyClobber */ false,
1323             /* SubReg */ 0, /* isDebug */ true)});
1324         SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
1325         auto *NewExpr = DIExpression::prependOpcodes(DI->getExpression(), Ops);
1326         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD.getDL(),
1327                 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1328                 DI->getVariable(), NewExpr);
1329       }
1330     } else {
1331       // We don't know how to handle other cases, so we drop.
1332       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1333     }
1334     return true;
1335   }
1336   case Intrinsic::dbg_label: {
1337     const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1338     assert(DI->getLabel() && "Missing label");
1339     if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1340       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1341       return true;
1342     }
1343 
1344     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1345             TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1346     return true;
1347   }
1348   case Intrinsic::objectsize:
1349     llvm_unreachable("llvm.objectsize.* should have been lowered already");
1350 
1351   case Intrinsic::is_constant:
1352     llvm_unreachable("llvm.is.constant.* should have been lowered already");
1353 
1354   case Intrinsic::launder_invariant_group:
1355   case Intrinsic::strip_invariant_group:
1356   case Intrinsic::expect: {
1357     Register ResultReg = getRegForValue(II->getArgOperand(0));
1358     if (!ResultReg)
1359       return false;
1360     updateValueMap(II, ResultReg);
1361     return true;
1362   }
1363   case Intrinsic::experimental_stackmap:
1364     return selectStackmap(II);
1365   case Intrinsic::experimental_patchpoint_void:
1366   case Intrinsic::experimental_patchpoint_i64:
1367     return selectPatchpoint(II);
1368 
1369   case Intrinsic::xray_customevent:
1370     return selectXRayCustomEvent(II);
1371   case Intrinsic::xray_typedevent:
1372     return selectXRayTypedEvent(II);
1373   }
1374 
1375   return fastLowerIntrinsicCall(II);
1376 }
1377 
1378 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1379   EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1380   EVT DstVT = TLI.getValueType(DL, I->getType());
1381 
1382   if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1383       !DstVT.isSimple())
1384     // Unhandled type. Halt "fast" selection and bail.
1385     return false;
1386 
1387   // Check if the destination type is legal.
1388   if (!TLI.isTypeLegal(DstVT))
1389     return false;
1390 
1391   // Check if the source operand is legal.
1392   if (!TLI.isTypeLegal(SrcVT))
1393     return false;
1394 
1395   Register InputReg = getRegForValue(I->getOperand(0));
1396   if (!InputReg)
1397     // Unhandled operand.  Halt "fast" selection and bail.
1398     return false;
1399 
1400   Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1401                                   Opcode, InputReg);
1402   if (!ResultReg)
1403     return false;
1404 
1405   updateValueMap(I, ResultReg);
1406   return true;
1407 }
1408 
1409 bool FastISel::selectBitCast(const User *I) {
1410   EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1411   EVT DstEVT = TLI.getValueType(DL, I->getType());
1412   if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1413       !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1414     // Unhandled type. Halt "fast" selection and bail.
1415     return false;
1416 
1417   MVT SrcVT = SrcEVT.getSimpleVT();
1418   MVT DstVT = DstEVT.getSimpleVT();
1419   Register Op0 = getRegForValue(I->getOperand(0));
1420   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1421     return false;
1422 
1423   // If the bitcast doesn't change the type, just use the operand value.
1424   if (SrcVT == DstVT) {
1425     updateValueMap(I, Op0);
1426     return true;
1427   }
1428 
1429   // Otherwise, select a BITCAST opcode.
1430   Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1431   if (!ResultReg)
1432     return false;
1433 
1434   updateValueMap(I, ResultReg);
1435   return true;
1436 }
1437 
1438 bool FastISel::selectFreeze(const User *I) {
1439   Register Reg = getRegForValue(I->getOperand(0));
1440   if (!Reg)
1441     // Unhandled operand.
1442     return false;
1443 
1444   EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1445   if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1446     // Unhandled type, bail out.
1447     return false;
1448 
1449   MVT Ty = ETy.getSimpleVT();
1450   const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1451   Register ResultReg = createResultReg(TyRegClass);
1452   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1453           TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1454 
1455   updateValueMap(I, ResultReg);
1456   return true;
1457 }
1458 
1459 // Remove local value instructions starting from the instruction after
1460 // SavedLastLocalValue to the current function insert point.
1461 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1462 {
1463   MachineInstr *CurLastLocalValue = getLastLocalValue();
1464   if (CurLastLocalValue != SavedLastLocalValue) {
1465     // Find the first local value instruction to be deleted.
1466     // This is the instruction after SavedLastLocalValue if it is non-NULL.
1467     // Otherwise it's the first instruction in the block.
1468     MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1469     if (SavedLastLocalValue)
1470       ++FirstDeadInst;
1471     else
1472       FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1473     setLastLocalValue(SavedLastLocalValue);
1474     removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1475   }
1476 }
1477 
1478 bool FastISel::selectInstruction(const Instruction *I) {
1479   // Flush the local value map before starting each instruction.
1480   // This improves locality and debugging, and can reduce spills.
1481   // Reuse of values across IR instructions is relatively uncommon.
1482   flushLocalValueMap();
1483 
1484   MachineInstr *SavedLastLocalValue = getLastLocalValue();
1485   // Just before the terminator instruction, insert instructions to
1486   // feed PHI nodes in successor blocks.
1487   if (I->isTerminator()) {
1488     if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1489       // PHI node handling may have generated local value instructions,
1490       // even though it failed to handle all PHI nodes.
1491       // We remove these instructions because SelectionDAGISel will generate
1492       // them again.
1493       removeDeadLocalValueCode(SavedLastLocalValue);
1494       return false;
1495     }
1496   }
1497 
1498   // FastISel does not handle any operand bundles except OB_funclet.
1499   if (auto *Call = dyn_cast<CallBase>(I))
1500     for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1501       if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1502         return false;
1503 
1504   MIMD = MIMetadata(*I);
1505 
1506   SavedInsertPt = FuncInfo.InsertPt;
1507 
1508   if (const auto *Call = dyn_cast<CallInst>(I)) {
1509     const Function *F = Call->getCalledFunction();
1510     LibFunc Func;
1511 
1512     // As a special case, don't handle calls to builtin library functions that
1513     // may be translated directly to target instructions.
1514     if (F && !F->hasLocalLinkage() && F->hasName() &&
1515         LibInfo->getLibFunc(F->getName(), Func) &&
1516         LibInfo->hasOptimizedCodeGen(Func))
1517       return false;
1518 
1519     // Don't handle Intrinsic::trap if a trap function is specified.
1520     if (F && F->getIntrinsicID() == Intrinsic::trap &&
1521         Call->hasFnAttr("trap-func-name"))
1522       return false;
1523   }
1524 
1525   // First, try doing target-independent selection.
1526   if (!SkipTargetIndependentISel) {
1527     if (selectOperator(I, I->getOpcode())) {
1528       ++NumFastIselSuccessIndependent;
1529       MIMD = {};
1530       return true;
1531     }
1532     // Remove dead code.
1533     recomputeInsertPt();
1534     if (SavedInsertPt != FuncInfo.InsertPt)
1535       removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1536     SavedInsertPt = FuncInfo.InsertPt;
1537   }
1538   // Next, try calling the target to attempt to handle the instruction.
1539   if (fastSelectInstruction(I)) {
1540     ++NumFastIselSuccessTarget;
1541     MIMD = {};
1542     return true;
1543   }
1544   // Remove dead code.
1545   recomputeInsertPt();
1546   if (SavedInsertPt != FuncInfo.InsertPt)
1547     removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1548 
1549   MIMD = {};
1550   // Undo phi node updates, because they will be added again by SelectionDAG.
1551   if (I->isTerminator()) {
1552     // PHI node handling may have generated local value instructions.
1553     // We remove them because SelectionDAGISel will generate them again.
1554     removeDeadLocalValueCode(SavedLastLocalValue);
1555     FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1556   }
1557   return false;
1558 }
1559 
1560 /// Emit an unconditional branch to the given block, unless it is the immediate
1561 /// (fall-through) successor, and update the CFG.
1562 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1563                               const DebugLoc &DbgLoc) {
1564   if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1565       FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1566     // For more accurate line information if this is the only non-debug
1567     // instruction in the block then emit it, otherwise we have the
1568     // unconditional fall-through case, which needs no instructions.
1569   } else {
1570     // The unconditional branch case.
1571     TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1572                      SmallVector<MachineOperand, 0>(), DbgLoc);
1573   }
1574   if (FuncInfo.BPI) {
1575     auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1576         FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1577     FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1578   } else
1579     FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1580 }
1581 
1582 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1583                                 MachineBasicBlock *TrueMBB,
1584                                 MachineBasicBlock *FalseMBB) {
1585   // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1586   // happen in degenerate IR and MachineIR forbids to have a block twice in the
1587   // successor/predecessor lists.
1588   if (TrueMBB != FalseMBB) {
1589     if (FuncInfo.BPI) {
1590       auto BranchProbability =
1591           FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1592       FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1593     } else
1594       FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1595   }
1596 
1597   fastEmitBranch(FalseMBB, MIMD.getDL());
1598 }
1599 
1600 /// Emit an FNeg operation.
1601 bool FastISel::selectFNeg(const User *I, const Value *In) {
1602   Register OpReg = getRegForValue(In);
1603   if (!OpReg)
1604     return false;
1605 
1606   // If the target has ISD::FNEG, use it.
1607   EVT VT = TLI.getValueType(DL, I->getType());
1608   Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1609                                   OpReg);
1610   if (ResultReg) {
1611     updateValueMap(I, ResultReg);
1612     return true;
1613   }
1614 
1615   // Bitcast the value to integer, twiddle the sign bit with xor,
1616   // and then bitcast it back to floating-point.
1617   if (VT.getSizeInBits() > 64)
1618     return false;
1619   EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1620   if (!TLI.isTypeLegal(IntVT))
1621     return false;
1622 
1623   Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1624                                ISD::BITCAST, OpReg);
1625   if (!IntReg)
1626     return false;
1627 
1628   Register IntResultReg = fastEmit_ri_(
1629       IntVT.getSimpleVT(), ISD::XOR, IntReg,
1630       UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1631   if (!IntResultReg)
1632     return false;
1633 
1634   ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1635                          IntResultReg);
1636   if (!ResultReg)
1637     return false;
1638 
1639   updateValueMap(I, ResultReg);
1640   return true;
1641 }
1642 
1643 bool FastISel::selectExtractValue(const User *U) {
1644   const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1645   if (!EVI)
1646     return false;
1647 
1648   // Make sure we only try to handle extracts with a legal result.  But also
1649   // allow i1 because it's easy.
1650   EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1651   if (!RealVT.isSimple())
1652     return false;
1653   MVT VT = RealVT.getSimpleVT();
1654   if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1655     return false;
1656 
1657   const Value *Op0 = EVI->getOperand(0);
1658   Type *AggTy = Op0->getType();
1659 
1660   // Get the base result register.
1661   unsigned ResultReg;
1662   DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1663   if (I != FuncInfo.ValueMap.end())
1664     ResultReg = I->second;
1665   else if (isa<Instruction>(Op0))
1666     ResultReg = FuncInfo.InitializeRegForValue(Op0);
1667   else
1668     return false; // fast-isel can't handle aggregate constants at the moment
1669 
1670   // Get the actual result register, which is an offset from the base register.
1671   unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1672 
1673   SmallVector<EVT, 4> AggValueVTs;
1674   ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1675 
1676   for (unsigned i = 0; i < VTIndex; i++)
1677     ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1678 
1679   updateValueMap(EVI, ResultReg);
1680   return true;
1681 }
1682 
1683 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1684   switch (Opcode) {
1685   case Instruction::Add:
1686     return selectBinaryOp(I, ISD::ADD);
1687   case Instruction::FAdd:
1688     return selectBinaryOp(I, ISD::FADD);
1689   case Instruction::Sub:
1690     return selectBinaryOp(I, ISD::SUB);
1691   case Instruction::FSub:
1692     return selectBinaryOp(I, ISD::FSUB);
1693   case Instruction::Mul:
1694     return selectBinaryOp(I, ISD::MUL);
1695   case Instruction::FMul:
1696     return selectBinaryOp(I, ISD::FMUL);
1697   case Instruction::SDiv:
1698     return selectBinaryOp(I, ISD::SDIV);
1699   case Instruction::UDiv:
1700     return selectBinaryOp(I, ISD::UDIV);
1701   case Instruction::FDiv:
1702     return selectBinaryOp(I, ISD::FDIV);
1703   case Instruction::SRem:
1704     return selectBinaryOp(I, ISD::SREM);
1705   case Instruction::URem:
1706     return selectBinaryOp(I, ISD::UREM);
1707   case Instruction::FRem:
1708     return selectBinaryOp(I, ISD::FREM);
1709   case Instruction::Shl:
1710     return selectBinaryOp(I, ISD::SHL);
1711   case Instruction::LShr:
1712     return selectBinaryOp(I, ISD::SRL);
1713   case Instruction::AShr:
1714     return selectBinaryOp(I, ISD::SRA);
1715   case Instruction::And:
1716     return selectBinaryOp(I, ISD::AND);
1717   case Instruction::Or:
1718     return selectBinaryOp(I, ISD::OR);
1719   case Instruction::Xor:
1720     return selectBinaryOp(I, ISD::XOR);
1721 
1722   case Instruction::FNeg:
1723     return selectFNeg(I, I->getOperand(0));
1724 
1725   case Instruction::GetElementPtr:
1726     return selectGetElementPtr(I);
1727 
1728   case Instruction::Br: {
1729     const BranchInst *BI = cast<BranchInst>(I);
1730 
1731     if (BI->isUnconditional()) {
1732       const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1733       MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1734       fastEmitBranch(MSucc, BI->getDebugLoc());
1735       return true;
1736     }
1737 
1738     // Conditional branches are not handed yet.
1739     // Halt "fast" selection and bail.
1740     return false;
1741   }
1742 
1743   case Instruction::Unreachable:
1744     if (TM.Options.TrapUnreachable)
1745       return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1746     else
1747       return true;
1748 
1749   case Instruction::Alloca:
1750     // FunctionLowering has the static-sized case covered.
1751     if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1752       return true;
1753 
1754     // Dynamic-sized alloca is not handled yet.
1755     return false;
1756 
1757   case Instruction::Call:
1758     // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1759     // callee of the direct function call instruction will be mapped to the
1760     // symbol for the function's entry point, which is distinct from the
1761     // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1762     // name is the C-linkage name of the source level function.
1763     // But fast isel still has the ability to do selection for intrinsics.
1764     if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1765       return false;
1766     return selectCall(I);
1767 
1768   case Instruction::BitCast:
1769     return selectBitCast(I);
1770 
1771   case Instruction::FPToSI:
1772     return selectCast(I, ISD::FP_TO_SINT);
1773   case Instruction::ZExt:
1774     return selectCast(I, ISD::ZERO_EXTEND);
1775   case Instruction::SExt:
1776     return selectCast(I, ISD::SIGN_EXTEND);
1777   case Instruction::Trunc:
1778     return selectCast(I, ISD::TRUNCATE);
1779   case Instruction::SIToFP:
1780     return selectCast(I, ISD::SINT_TO_FP);
1781 
1782   case Instruction::IntToPtr: // Deliberate fall-through.
1783   case Instruction::PtrToInt: {
1784     EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1785     EVT DstVT = TLI.getValueType(DL, I->getType());
1786     if (DstVT.bitsGT(SrcVT))
1787       return selectCast(I, ISD::ZERO_EXTEND);
1788     if (DstVT.bitsLT(SrcVT))
1789       return selectCast(I, ISD::TRUNCATE);
1790     Register Reg = getRegForValue(I->getOperand(0));
1791     if (!Reg)
1792       return false;
1793     updateValueMap(I, Reg);
1794     return true;
1795   }
1796 
1797   case Instruction::ExtractValue:
1798     return selectExtractValue(I);
1799 
1800   case Instruction::Freeze:
1801     return selectFreeze(I);
1802 
1803   case Instruction::PHI:
1804     llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1805 
1806   default:
1807     // Unhandled instruction. Halt "fast" selection and bail.
1808     return false;
1809   }
1810 }
1811 
1812 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1813                    const TargetLibraryInfo *LibInfo,
1814                    bool SkipTargetIndependentISel)
1815     : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1816       MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1817       TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1818       TII(*MF->getSubtarget().getInstrInfo()),
1819       TLI(*MF->getSubtarget().getTargetLowering()),
1820       TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1821       SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1822 
1823 FastISel::~FastISel() = default;
1824 
1825 bool FastISel::fastLowerArguments() { return false; }
1826 
1827 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1828 
1829 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1830   return false;
1831 }
1832 
1833 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1834 
1835 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1836   return 0;
1837 }
1838 
1839 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1840                                unsigned /*Op1*/) {
1841   return 0;
1842 }
1843 
1844 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1845   return 0;
1846 }
1847 
1848 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1849                               const ConstantFP * /*FPImm*/) {
1850   return 0;
1851 }
1852 
1853 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1854                                uint64_t /*Imm*/) {
1855   return 0;
1856 }
1857 
1858 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1859 /// instruction with an immediate operand using fastEmit_ri.
1860 /// If that fails, it materializes the immediate into a register and try
1861 /// fastEmit_rr instead.
1862 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1863                                 uint64_t Imm, MVT ImmType) {
1864   // If this is a multiply by a power of two, emit this as a shift left.
1865   if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1866     Opcode = ISD::SHL;
1867     Imm = Log2_64(Imm);
1868   } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1869     // div x, 8 -> srl x, 3
1870     Opcode = ISD::SRL;
1871     Imm = Log2_64(Imm);
1872   }
1873 
1874   // Horrible hack (to be removed), check to make sure shift amounts are
1875   // in-range.
1876   if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1877       Imm >= VT.getSizeInBits())
1878     return 0;
1879 
1880   // First check if immediate type is legal. If not, we can't use the ri form.
1881   Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1882   if (ResultReg)
1883     return ResultReg;
1884   Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1885   if (!MaterialReg) {
1886     // This is a bit ugly/slow, but failing here means falling out of
1887     // fast-isel, which would be very slow.
1888     IntegerType *ITy =
1889         IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1890     MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1891     if (!MaterialReg)
1892       return 0;
1893   }
1894   return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1895 }
1896 
1897 Register FastISel::createResultReg(const TargetRegisterClass *RC) {
1898   return MRI.createVirtualRegister(RC);
1899 }
1900 
1901 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
1902                                             unsigned OpNum) {
1903   if (Op.isVirtual()) {
1904     const TargetRegisterClass *RegClass =
1905         TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1906     if (!MRI.constrainRegClass(Op, RegClass)) {
1907       // If it's not legal to COPY between the register classes, something
1908       // has gone very wrong before we got here.
1909       Register NewOp = createResultReg(RegClass);
1910       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1911               TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1912       return NewOp;
1913     }
1914   }
1915   return Op;
1916 }
1917 
1918 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1919                                  const TargetRegisterClass *RC) {
1920   Register ResultReg = createResultReg(RC);
1921   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1922 
1923   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
1924   return ResultReg;
1925 }
1926 
1927 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1928                                   const TargetRegisterClass *RC, unsigned Op0) {
1929   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1930 
1931   Register ResultReg = createResultReg(RC);
1932   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1933 
1934   if (II.getNumDefs() >= 1)
1935     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1936         .addReg(Op0);
1937   else {
1938     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1939         .addReg(Op0);
1940     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1941             ResultReg)
1942         .addReg(II.implicit_defs()[0]);
1943   }
1944 
1945   return ResultReg;
1946 }
1947 
1948 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1949                                    const TargetRegisterClass *RC, unsigned Op0,
1950                                    unsigned Op1) {
1951   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1952 
1953   Register ResultReg = createResultReg(RC);
1954   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1955   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1956 
1957   if (II.getNumDefs() >= 1)
1958     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1959         .addReg(Op0)
1960         .addReg(Op1);
1961   else {
1962     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1963         .addReg(Op0)
1964         .addReg(Op1);
1965     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1966             ResultReg)
1967         .addReg(II.implicit_defs()[0]);
1968   }
1969   return ResultReg;
1970 }
1971 
1972 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1973                                     const TargetRegisterClass *RC, unsigned Op0,
1974                                     unsigned Op1, unsigned Op2) {
1975   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1976 
1977   Register ResultReg = createResultReg(RC);
1978   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1979   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1980   Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1981 
1982   if (II.getNumDefs() >= 1)
1983     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
1984         .addReg(Op0)
1985         .addReg(Op1)
1986         .addReg(Op2);
1987   else {
1988     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1989         .addReg(Op0)
1990         .addReg(Op1)
1991         .addReg(Op2);
1992     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1993             ResultReg)
1994         .addReg(II.implicit_defs()[0]);
1995   }
1996   return ResultReg;
1997 }
1998 
1999 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2000                                    const TargetRegisterClass *RC, unsigned Op0,
2001                                    uint64_t Imm) {
2002   const MCInstrDesc &II = TII.get(MachineInstOpcode);
2003 
2004   Register ResultReg = createResultReg(RC);
2005   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2006 
2007   if (II.getNumDefs() >= 1)
2008     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2009         .addReg(Op0)
2010         .addImm(Imm);
2011   else {
2012     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2013         .addReg(Op0)
2014         .addImm(Imm);
2015     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2016             ResultReg)
2017         .addReg(II.implicit_defs()[0]);
2018   }
2019   return ResultReg;
2020 }
2021 
2022 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2023                                     const TargetRegisterClass *RC, unsigned Op0,
2024                                     uint64_t Imm1, uint64_t Imm2) {
2025   const MCInstrDesc &II = TII.get(MachineInstOpcode);
2026 
2027   Register ResultReg = createResultReg(RC);
2028   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2029 
2030   if (II.getNumDefs() >= 1)
2031     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2032         .addReg(Op0)
2033         .addImm(Imm1)
2034         .addImm(Imm2);
2035   else {
2036     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2037         .addReg(Op0)
2038         .addImm(Imm1)
2039         .addImm(Imm2);
2040     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2041             ResultReg)
2042         .addReg(II.implicit_defs()[0]);
2043   }
2044   return ResultReg;
2045 }
2046 
2047 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2048                                   const TargetRegisterClass *RC,
2049                                   const ConstantFP *FPImm) {
2050   const MCInstrDesc &II = TII.get(MachineInstOpcode);
2051 
2052   Register ResultReg = createResultReg(RC);
2053 
2054   if (II.getNumDefs() >= 1)
2055     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2056         .addFPImm(FPImm);
2057   else {
2058     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2059         .addFPImm(FPImm);
2060     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2061             ResultReg)
2062         .addReg(II.implicit_defs()[0]);
2063   }
2064   return ResultReg;
2065 }
2066 
2067 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2068                                     const TargetRegisterClass *RC, unsigned Op0,
2069                                     unsigned Op1, uint64_t Imm) {
2070   const MCInstrDesc &II = TII.get(MachineInstOpcode);
2071 
2072   Register ResultReg = createResultReg(RC);
2073   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2074   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2075 
2076   if (II.getNumDefs() >= 1)
2077     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2078         .addReg(Op0)
2079         .addReg(Op1)
2080         .addImm(Imm);
2081   else {
2082     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2083         .addReg(Op0)
2084         .addReg(Op1)
2085         .addImm(Imm);
2086     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2087             ResultReg)
2088         .addReg(II.implicit_defs()[0]);
2089   }
2090   return ResultReg;
2091 }
2092 
2093 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2094                                   const TargetRegisterClass *RC, uint64_t Imm) {
2095   Register ResultReg = createResultReg(RC);
2096   const MCInstrDesc &II = TII.get(MachineInstOpcode);
2097 
2098   if (II.getNumDefs() >= 1)
2099     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2100         .addImm(Imm);
2101   else {
2102     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm);
2103     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2104             ResultReg)
2105         .addReg(II.implicit_defs()[0]);
2106   }
2107   return ResultReg;
2108 }
2109 
2110 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2111                                               uint32_t Idx) {
2112   Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2113   assert(Register::isVirtualRegister(Op0) &&
2114          "Cannot yet extract from physregs");
2115   const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2116   MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2117   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2118           ResultReg).addReg(Op0, 0, Idx);
2119   return ResultReg;
2120 }
2121 
2122 /// Emit MachineInstrs to compute the value of Op with all but the least
2123 /// significant bit set to zero.
2124 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
2125   return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2126 }
2127 
2128 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2129 /// Emit code to ensure constants are copied into registers when needed.
2130 /// Remember the virtual registers that need to be added to the Machine PHI
2131 /// nodes as input.  We cannot just directly add them, because expansion
2132 /// might result in multiple MBB's for one BB.  As such, the start of the
2133 /// BB might correspond to a different MBB than the end.
2134 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2135   const Instruction *TI = LLVMBB->getTerminator();
2136 
2137   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2138   FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2139 
2140   // Check successor nodes' PHI nodes that expect a constant to be available
2141   // from this block.
2142   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2143     const BasicBlock *SuccBB = TI->getSuccessor(succ);
2144     if (!isa<PHINode>(SuccBB->begin()))
2145       continue;
2146     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2147 
2148     // If this terminator has multiple identical successors (common for
2149     // switches), only handle each succ once.
2150     if (!SuccsHandled.insert(SuccMBB).second)
2151       continue;
2152 
2153     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2154 
2155     // At this point we know that there is a 1-1 correspondence between LLVM PHI
2156     // nodes and Machine PHI nodes, but the incoming operands have not been
2157     // emitted yet.
2158     for (const PHINode &PN : SuccBB->phis()) {
2159       // Ignore dead phi's.
2160       if (PN.use_empty())
2161         continue;
2162 
2163       // Only handle legal types. Two interesting things to note here. First,
2164       // by bailing out early, we may leave behind some dead instructions,
2165       // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2166       // own moves. Second, this check is necessary because FastISel doesn't
2167       // use CreateRegs to create registers, so it always creates
2168       // exactly one register for each non-void instruction.
2169       EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2170       if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2171         // Handle integer promotions, though, because they're common and easy.
2172         if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2173           FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2174           return false;
2175         }
2176       }
2177 
2178       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2179 
2180       // Set the DebugLoc for the copy. Use the location of the operand if
2181       // there is one; otherwise no location, flushLocalValueMap will fix it.
2182       MIMD = {};
2183       if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2184         MIMD = MIMetadata(*Inst);
2185 
2186       Register Reg = getRegForValue(PHIOp);
2187       if (!Reg) {
2188         FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2189         return false;
2190       }
2191       FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2192       MIMD = {};
2193     }
2194   }
2195 
2196   return true;
2197 }
2198 
2199 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2200   assert(LI->hasOneUse() &&
2201          "tryToFoldLoad expected a LoadInst with a single use");
2202   // We know that the load has a single use, but don't know what it is.  If it
2203   // isn't one of the folded instructions, then we can't succeed here.  Handle
2204   // this by scanning the single-use users of the load until we get to FoldInst.
2205   unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2206 
2207   const Instruction *TheUser = LI->user_back();
2208   while (TheUser != FoldInst && // Scan up until we find FoldInst.
2209          // Stay in the right block.
2210          TheUser->getParent() == FoldInst->getParent() &&
2211          --MaxUsers) { // Don't scan too far.
2212     // If there are multiple or no uses of this instruction, then bail out.
2213     if (!TheUser->hasOneUse())
2214       return false;
2215 
2216     TheUser = TheUser->user_back();
2217   }
2218 
2219   // If we didn't find the fold instruction, then we failed to collapse the
2220   // sequence.
2221   if (TheUser != FoldInst)
2222     return false;
2223 
2224   // Don't try to fold volatile loads.  Target has to deal with alignment
2225   // constraints.
2226   if (LI->isVolatile())
2227     return false;
2228 
2229   // Figure out which vreg this is going into.  If there is no assigned vreg yet
2230   // then there actually was no reference to it.  Perhaps the load is referenced
2231   // by a dead instruction.
2232   Register LoadReg = getRegForValue(LI);
2233   if (!LoadReg)
2234     return false;
2235 
2236   // We can't fold if this vreg has no uses or more than one use.  Multiple uses
2237   // may mean that the instruction got lowered to multiple MIs, or the use of
2238   // the loaded value ended up being multiple operands of the result.
2239   if (!MRI.hasOneUse(LoadReg))
2240     return false;
2241 
2242   // If the register has fixups, there may be additional uses through a
2243   // different alias of the register.
2244   if (FuncInfo.RegsWithFixups.contains(LoadReg))
2245     return false;
2246 
2247   MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2248   MachineInstr *User = RI->getParent();
2249 
2250   // Set the insertion point properly.  Folding the load can cause generation of
2251   // other random instructions (like sign extends) for addressing modes; make
2252   // sure they get inserted in a logical place before the new instruction.
2253   FuncInfo.InsertPt = User;
2254   FuncInfo.MBB = User->getParent();
2255 
2256   // Ask the target to try folding the load.
2257   return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2258 }
2259 
2260 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2261   // Must be an add.
2262   if (!isa<AddOperator>(Add))
2263     return false;
2264   // Type size needs to match.
2265   if (DL.getTypeSizeInBits(GEP->getType()) !=
2266       DL.getTypeSizeInBits(Add->getType()))
2267     return false;
2268   // Must be in the same basic block.
2269   if (isa<Instruction>(Add) &&
2270       FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2271     return false;
2272   // Must have a constant operand.
2273   return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2274 }
2275 
2276 MachineMemOperand *
2277 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2278   const Value *Ptr;
2279   Type *ValTy;
2280   MaybeAlign Alignment;
2281   MachineMemOperand::Flags Flags;
2282   bool IsVolatile;
2283 
2284   if (const auto *LI = dyn_cast<LoadInst>(I)) {
2285     Alignment = LI->getAlign();
2286     IsVolatile = LI->isVolatile();
2287     Flags = MachineMemOperand::MOLoad;
2288     Ptr = LI->getPointerOperand();
2289     ValTy = LI->getType();
2290   } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2291     Alignment = SI->getAlign();
2292     IsVolatile = SI->isVolatile();
2293     Flags = MachineMemOperand::MOStore;
2294     Ptr = SI->getPointerOperand();
2295     ValTy = SI->getValueOperand()->getType();
2296   } else
2297     return nullptr;
2298 
2299   bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2300   bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2301   bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2302   const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2303 
2304   AAMDNodes AAInfo = I->getAAMetadata();
2305 
2306   if (!Alignment) // Ensure that codegen never sees alignment 0.
2307     Alignment = DL.getABITypeAlign(ValTy);
2308 
2309   unsigned Size = DL.getTypeStoreSize(ValTy);
2310 
2311   if (IsVolatile)
2312     Flags |= MachineMemOperand::MOVolatile;
2313   if (IsNonTemporal)
2314     Flags |= MachineMemOperand::MONonTemporal;
2315   if (IsDereferenceable)
2316     Flags |= MachineMemOperand::MODereferenceable;
2317   if (IsInvariant)
2318     Flags |= MachineMemOperand::MOInvariant;
2319 
2320   return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2321                                            *Alignment, AAInfo, Ranges);
2322 }
2323 
2324 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2325   // If both operands are the same, then try to optimize or fold the cmp.
2326   CmpInst::Predicate Predicate = CI->getPredicate();
2327   if (CI->getOperand(0) != CI->getOperand(1))
2328     return Predicate;
2329 
2330   switch (Predicate) {
2331   default: llvm_unreachable("Invalid predicate!");
2332   case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2333   case CmpInst::FCMP_OEQ:   Predicate = CmpInst::FCMP_ORD;   break;
2334   case CmpInst::FCMP_OGT:   Predicate = CmpInst::FCMP_FALSE; break;
2335   case CmpInst::FCMP_OGE:   Predicate = CmpInst::FCMP_ORD;   break;
2336   case CmpInst::FCMP_OLT:   Predicate = CmpInst::FCMP_FALSE; break;
2337   case CmpInst::FCMP_OLE:   Predicate = CmpInst::FCMP_ORD;   break;
2338   case CmpInst::FCMP_ONE:   Predicate = CmpInst::FCMP_FALSE; break;
2339   case CmpInst::FCMP_ORD:   Predicate = CmpInst::FCMP_ORD;   break;
2340   case CmpInst::FCMP_UNO:   Predicate = CmpInst::FCMP_UNO;   break;
2341   case CmpInst::FCMP_UEQ:   Predicate = CmpInst::FCMP_TRUE;  break;
2342   case CmpInst::FCMP_UGT:   Predicate = CmpInst::FCMP_UNO;   break;
2343   case CmpInst::FCMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2344   case CmpInst::FCMP_ULT:   Predicate = CmpInst::FCMP_UNO;   break;
2345   case CmpInst::FCMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2346   case CmpInst::FCMP_UNE:   Predicate = CmpInst::FCMP_UNO;   break;
2347   case CmpInst::FCMP_TRUE:  Predicate = CmpInst::FCMP_TRUE;  break;
2348 
2349   case CmpInst::ICMP_EQ:    Predicate = CmpInst::FCMP_TRUE;  break;
2350   case CmpInst::ICMP_NE:    Predicate = CmpInst::FCMP_FALSE; break;
2351   case CmpInst::ICMP_UGT:   Predicate = CmpInst::FCMP_FALSE; break;
2352   case CmpInst::ICMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2353   case CmpInst::ICMP_ULT:   Predicate = CmpInst::FCMP_FALSE; break;
2354   case CmpInst::ICMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2355   case CmpInst::ICMP_SGT:   Predicate = CmpInst::FCMP_FALSE; break;
2356   case CmpInst::ICMP_SGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2357   case CmpInst::ICMP_SLT:   Predicate = CmpInst::FCMP_FALSE; break;
2358   case CmpInst::ICMP_SLE:   Predicate = CmpInst::FCMP_TRUE;  break;
2359   }
2360 
2361   return Predicate;
2362 }
2363