xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 4f5890a0fb086324a657f3cd7ba1abc57274e0db)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 
35 #define DEBUG_TYPE "globalisel-utils"
36 
37 using namespace llvm;
38 using namespace MIPatternMatch;
39 
40 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
41                                    const TargetInstrInfo &TII,
42                                    const RegisterBankInfo &RBI, Register Reg,
43                                    const TargetRegisterClass &RegClass) {
44   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
45     return MRI.createVirtualRegister(&RegClass);
46 
47   return Reg;
48 }
49 
50 Register llvm::constrainOperandRegClass(
51     const MachineFunction &MF, const TargetRegisterInfo &TRI,
52     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
53     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
54     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
55   Register Reg = RegMO.getReg();
56   // Assume physical registers are properly constrained.
57   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
58 
59   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
60   // If we created a new virtual register because the class is not compatible
61   // then create a copy between the new and the old register.
62   if (ConstrainedReg != Reg) {
63     MachineBasicBlock::iterator InsertIt(&InsertPt);
64     MachineBasicBlock &MBB = *InsertPt.getParent();
65     // FIXME: The copy needs to have the classes constrained for its operands.
66     // Use operand's regbank to get the class for old register (Reg).
67     if (RegMO.isUse()) {
68       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
69               TII.get(TargetOpcode::COPY), ConstrainedReg)
70           .addReg(Reg);
71     } else {
72       assert(RegMO.isDef() && "Must be a definition");
73       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
74               TII.get(TargetOpcode::COPY), Reg)
75           .addReg(ConstrainedReg);
76     }
77     if (GISelChangeObserver *Observer = MF.getObserver()) {
78       Observer->changingInstr(*RegMO.getParent());
79     }
80     RegMO.setReg(ConstrainedReg);
81     if (GISelChangeObserver *Observer = MF.getObserver()) {
82       Observer->changedInstr(*RegMO.getParent());
83     }
84   } else {
85     if (GISelChangeObserver *Observer = MF.getObserver()) {
86       if (!RegMO.isDef()) {
87         MachineInstr *RegDef = MRI.getVRegDef(Reg);
88         Observer->changedInstr(*RegDef);
89       }
90       Observer->changingAllUsesOfReg(MRI, Reg);
91       Observer->finishedChangingAllUsesOfReg();
92     }
93   }
94   return ConstrainedReg;
95 }
96 
97 Register llvm::constrainOperandRegClass(
98     const MachineFunction &MF, const TargetRegisterInfo &TRI,
99     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
100     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
101     MachineOperand &RegMO, unsigned OpIdx) {
102   Register Reg = RegMO.getReg();
103   // Assume physical registers are properly constrained.
104   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
105 
106   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
107   // Some of the target independent instructions, like COPY, may not impose any
108   // register class constraints on some of their operands: If it's a use, we can
109   // skip constraining as the instruction defining the register would constrain
110   // it.
111 
112   if (OpRC) {
113     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
114     // can have multiple regbanks for a superclass that combine different
115     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
116     // resolved by targets during regbankselect should not be overridden.
117     if (const auto *SubRC = TRI.getCommonSubClass(
118             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
119       OpRC = SubRC;
120 
121     OpRC = TRI.getAllocatableClass(OpRC);
122   }
123 
124   if (!OpRC) {
125     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
126            "Register class constraint is required unless either the "
127            "instruction is target independent or the operand is a use");
128     // FIXME: Just bailing out like this here could be not enough, unless we
129     // expect the users of this function to do the right thing for PHIs and
130     // COPY:
131     //   v1 = COPY v0
132     //   v2 = COPY v1
133     // v1 here may end up not being constrained at all. Please notice that to
134     // reproduce the issue we likely need a destination pattern of a selection
135     // rule producing such extra copies, not just an input GMIR with them as
136     // every existing target using selectImpl handles copies before calling it
137     // and they never reach this function.
138     return Reg;
139   }
140   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
141                                   RegMO);
142 }
143 
144 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
145                                             const TargetInstrInfo &TII,
146                                             const TargetRegisterInfo &TRI,
147                                             const RegisterBankInfo &RBI) {
148   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
149          "A selected instruction is expected");
150   MachineBasicBlock &MBB = *I.getParent();
151   MachineFunction &MF = *MBB.getParent();
152   MachineRegisterInfo &MRI = MF.getRegInfo();
153 
154   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
155     MachineOperand &MO = I.getOperand(OpI);
156 
157     // There's nothing to be done on non-register operands.
158     if (!MO.isReg())
159       continue;
160 
161     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
162     assert(MO.isReg() && "Unsupported non-reg operand");
163 
164     Register Reg = MO.getReg();
165     // Physical registers don't need to be constrained.
166     if (Register::isPhysicalRegister(Reg))
167       continue;
168 
169     // Register operands with a value of 0 (e.g. predicate operands) don't need
170     // to be constrained.
171     if (Reg == 0)
172       continue;
173 
174     // If the operand is a vreg, we should constrain its regclass, and only
175     // insert COPYs if that's impossible.
176     // constrainOperandRegClass does that for us.
177     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
178 
179     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
180     // done.
181     if (MO.isUse()) {
182       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
183       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
184         I.tieOperands(DefIdx, OpI);
185     }
186   }
187   return true;
188 }
189 
190 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
191                          MachineRegisterInfo &MRI) {
192   // Give up if either DstReg or SrcReg  is a physical register.
193   if (DstReg.isPhysical() || SrcReg.isPhysical())
194     return false;
195   // Give up if the types don't match.
196   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
197     return false;
198   // Replace if either DstReg has no constraints or the register
199   // constraints match.
200   return !MRI.getRegClassOrRegBank(DstReg) ||
201          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
202 }
203 
204 bool llvm::isTriviallyDead(const MachineInstr &MI,
205                            const MachineRegisterInfo &MRI) {
206   // FIXME: This logical is mostly duplicated with
207   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
208   // MachineInstr::isLabel?
209 
210   // Don't delete frame allocation labels.
211   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
212     return false;
213   // LIFETIME markers should be preserved even if they seem dead.
214   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
215       MI.getOpcode() == TargetOpcode::LIFETIME_END)
216     return false;
217 
218   // If we can move an instruction, we can remove it.  Otherwise, it has
219   // a side-effect of some sort.
220   bool SawStore = false;
221   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
222     return false;
223 
224   // Instructions without side-effects are dead iff they only define dead vregs.
225   for (auto &MO : MI.operands()) {
226     if (!MO.isReg() || !MO.isDef())
227       continue;
228 
229     Register Reg = MO.getReg();
230     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
231       return false;
232   }
233   return true;
234 }
235 
236 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
237                                   MachineFunction &MF,
238                                   const TargetPassConfig &TPC,
239                                   MachineOptimizationRemarkEmitter &MORE,
240                                   MachineOptimizationRemarkMissed &R) {
241   bool IsFatal = Severity == DS_Error &&
242                  TPC.isGlobalISelAbortEnabled();
243   // Print the function name explicitly if we don't have a debug location (which
244   // makes the diagnostic less useful) or if we're going to emit a raw error.
245   if (!R.getLocation().isValid() || IsFatal)
246     R << (" (in function: " + MF.getName() + ")").str();
247 
248   if (IsFatal)
249     report_fatal_error(Twine(R.getMsg()));
250   else
251     MORE.emit(R);
252 }
253 
254 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
255                               MachineOptimizationRemarkEmitter &MORE,
256                               MachineOptimizationRemarkMissed &R) {
257   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
258 }
259 
260 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
261                               MachineOptimizationRemarkEmitter &MORE,
262                               MachineOptimizationRemarkMissed &R) {
263   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
264   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
265 }
266 
267 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
268                               MachineOptimizationRemarkEmitter &MORE,
269                               const char *PassName, StringRef Msg,
270                               const MachineInstr &MI) {
271   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
272                                     MI.getDebugLoc(), MI.getParent());
273   R << Msg;
274   // Printing MI is expensive;  only do it if expensive remarks are enabled.
275   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
276     R << ": " << ore::MNV("Inst", MI);
277   reportGISelFailure(MF, TPC, MORE, R);
278 }
279 
280 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
281                                           const MachineRegisterInfo &MRI) {
282   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
283       VReg, MRI, /*LookThroughInstrs*/ false);
284   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
285          "Value found while looking through instrs");
286   if (!ValAndVReg)
287     return None;
288   return ValAndVReg->Value;
289 }
290 
291 Optional<int64_t>
292 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
293   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
294   if (Val && Val->getBitWidth() <= 64)
295     return Val->getSExtValue();
296   return None;
297 }
298 
299 namespace {
300 
301 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
302 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
303 
304 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
305     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
306     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
307     bool LookThroughAnyExt = false) {
308   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
309   MachineInstr *MI;
310 
311   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
312          LookThroughInstrs) {
313     switch (MI->getOpcode()) {
314     case TargetOpcode::G_ANYEXT:
315       if (!LookThroughAnyExt)
316         return None;
317       LLVM_FALLTHROUGH;
318     case TargetOpcode::G_TRUNC:
319     case TargetOpcode::G_SEXT:
320     case TargetOpcode::G_ZEXT:
321       SeenOpcodes.push_back(std::make_pair(
322           MI->getOpcode(),
323           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
324       VReg = MI->getOperand(1).getReg();
325       break;
326     case TargetOpcode::COPY:
327       VReg = MI->getOperand(1).getReg();
328       if (Register::isPhysicalRegister(VReg))
329         return None;
330       break;
331     case TargetOpcode::G_INTTOPTR:
332       VReg = MI->getOperand(1).getReg();
333       break;
334     default:
335       return None;
336     }
337   }
338   if (!MI || !IsConstantOpcode(MI))
339     return None;
340 
341   Optional<APInt> MaybeVal = getAPCstValue(MI);
342   if (!MaybeVal)
343     return None;
344   APInt &Val = *MaybeVal;
345   while (!SeenOpcodes.empty()) {
346     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
347     switch (OpcodeAndSize.first) {
348     case TargetOpcode::G_TRUNC:
349       Val = Val.trunc(OpcodeAndSize.second);
350       break;
351     case TargetOpcode::G_ANYEXT:
352     case TargetOpcode::G_SEXT:
353       Val = Val.sext(OpcodeAndSize.second);
354       break;
355     case TargetOpcode::G_ZEXT:
356       Val = Val.zext(OpcodeAndSize.second);
357       break;
358     }
359   }
360 
361   return ValueAndVReg{Val, VReg};
362 }
363 
364 bool isIConstant(const MachineInstr *MI) {
365   if (!MI)
366     return false;
367   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
368 }
369 
370 bool isFConstant(const MachineInstr *MI) {
371   if (!MI)
372     return false;
373   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
374 }
375 
376 bool isAnyConstant(const MachineInstr *MI) {
377   if (!MI)
378     return false;
379   unsigned Opc = MI->getOpcode();
380   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
381 }
382 
383 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
384   const MachineOperand &CstVal = MI->getOperand(1);
385   if (CstVal.isCImm())
386     return CstVal.getCImm()->getValue();
387   return None;
388 }
389 
390 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
391   const MachineOperand &CstVal = MI->getOperand(1);
392   if (CstVal.isCImm())
393     return CstVal.getCImm()->getValue();
394   if (CstVal.isFPImm())
395     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
396   return None;
397 }
398 
399 } // end anonymous namespace
400 
401 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
402     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
403   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
404                                            getCImmAsAPInt, LookThroughInstrs);
405 }
406 
407 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
408     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
409     bool LookThroughAnyExt) {
410   return getConstantVRegValWithLookThrough(
411       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
412       LookThroughAnyExt);
413 }
414 
415 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
416     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
417   auto Reg = getConstantVRegValWithLookThrough(
418       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
419   if (!Reg)
420     return None;
421   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
422                         Reg->VReg};
423 }
424 
425 const ConstantFP *
426 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
427   MachineInstr *MI = MRI.getVRegDef(VReg);
428   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
429     return nullptr;
430   return MI->getOperand(1).getFPImm();
431 }
432 
433 Optional<DefinitionAndSourceRegister>
434 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
435   Register DefSrcReg = Reg;
436   auto *DefMI = MRI.getVRegDef(Reg);
437   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
438   if (!DstTy.isValid())
439     return None;
440   unsigned Opc = DefMI->getOpcode();
441   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
442     Register SrcReg = DefMI->getOperand(1).getReg();
443     auto SrcTy = MRI.getType(SrcReg);
444     if (!SrcTy.isValid())
445       break;
446     DefMI = MRI.getVRegDef(SrcReg);
447     DefSrcReg = SrcReg;
448     Opc = DefMI->getOpcode();
449   }
450   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
451 }
452 
453 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
454                                          const MachineRegisterInfo &MRI) {
455   Optional<DefinitionAndSourceRegister> DefSrcReg =
456       getDefSrcRegIgnoringCopies(Reg, MRI);
457   return DefSrcReg ? DefSrcReg->MI : nullptr;
458 }
459 
460 Register llvm::getSrcRegIgnoringCopies(Register Reg,
461                                        const MachineRegisterInfo &MRI) {
462   Optional<DefinitionAndSourceRegister> DefSrcReg =
463       getDefSrcRegIgnoringCopies(Reg, MRI);
464   return DefSrcReg ? DefSrcReg->Reg : Register();
465 }
466 
467 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
468                                  const MachineRegisterInfo &MRI) {
469   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
470   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
471 }
472 
473 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
474   if (Size == 32)
475     return APFloat(float(Val));
476   if (Size == 64)
477     return APFloat(Val);
478   if (Size != 16)
479     llvm_unreachable("Unsupported FPConstant size");
480   bool Ignored;
481   APFloat APF(Val);
482   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
483   return APF;
484 }
485 
486 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
487                                         const Register Op2,
488                                         const MachineRegisterInfo &MRI) {
489   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
490   if (!MaybeOp2Cst)
491     return None;
492 
493   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
494   if (!MaybeOp1Cst)
495     return None;
496 
497   const APInt &C1 = MaybeOp1Cst->Value;
498   const APInt &C2 = MaybeOp2Cst->Value;
499   switch (Opcode) {
500   default:
501     break;
502   case TargetOpcode::G_ADD:
503     return C1 + C2;
504   case TargetOpcode::G_AND:
505     return C1 & C2;
506   case TargetOpcode::G_ASHR:
507     return C1.ashr(C2);
508   case TargetOpcode::G_LSHR:
509     return C1.lshr(C2);
510   case TargetOpcode::G_MUL:
511     return C1 * C2;
512   case TargetOpcode::G_OR:
513     return C1 | C2;
514   case TargetOpcode::G_SHL:
515     return C1 << C2;
516   case TargetOpcode::G_SUB:
517     return C1 - C2;
518   case TargetOpcode::G_XOR:
519     return C1 ^ C2;
520   case TargetOpcode::G_UDIV:
521     if (!C2.getBoolValue())
522       break;
523     return C1.udiv(C2);
524   case TargetOpcode::G_SDIV:
525     if (!C2.getBoolValue())
526       break;
527     return C1.sdiv(C2);
528   case TargetOpcode::G_UREM:
529     if (!C2.getBoolValue())
530       break;
531     return C1.urem(C2);
532   case TargetOpcode::G_SREM:
533     if (!C2.getBoolValue())
534       break;
535     return C1.srem(C2);
536   }
537 
538   return None;
539 }
540 
541 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
542                                             const Register Op2,
543                                             const MachineRegisterInfo &MRI) {
544   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
545   if (!Op2Cst)
546     return None;
547 
548   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
549   if (!Op1Cst)
550     return None;
551 
552   APFloat C1 = Op1Cst->getValueAPF();
553   const APFloat &C2 = Op2Cst->getValueAPF();
554   switch (Opcode) {
555   case TargetOpcode::G_FADD:
556     C1.add(C2, APFloat::rmNearestTiesToEven);
557     return C1;
558   case TargetOpcode::G_FSUB:
559     C1.subtract(C2, APFloat::rmNearestTiesToEven);
560     return C1;
561   case TargetOpcode::G_FMUL:
562     C1.multiply(C2, APFloat::rmNearestTiesToEven);
563     return C1;
564   case TargetOpcode::G_FDIV:
565     C1.divide(C2, APFloat::rmNearestTiesToEven);
566     return C1;
567   case TargetOpcode::G_FREM:
568     C1.mod(C2);
569     return C1;
570   case TargetOpcode::G_FCOPYSIGN:
571     C1.copySign(C2);
572     return C1;
573   case TargetOpcode::G_FMINNUM:
574     return minnum(C1, C2);
575   case TargetOpcode::G_FMAXNUM:
576     return maxnum(C1, C2);
577   case TargetOpcode::G_FMINIMUM:
578     return minimum(C1, C2);
579   case TargetOpcode::G_FMAXIMUM:
580     return maximum(C1, C2);
581   case TargetOpcode::G_FMINNUM_IEEE:
582   case TargetOpcode::G_FMAXNUM_IEEE:
583     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
584     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
585     // and currently there isn't a nice wrapper in APFloat for the version with
586     // correct snan handling.
587     break;
588   default:
589     break;
590   }
591 
592   return None;
593 }
594 
595 Register llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
596                                        const Register Op2,
597                                        const MachineRegisterInfo &MRI,
598                                        MachineIRBuilder &MIB) {
599   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
600   if (!SrcVec2)
601     return Register();
602 
603   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
604   if (!SrcVec1)
605     return Register();
606 
607   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
608 
609   SmallVector<Register, 16> FoldedElements;
610   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
611     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
612                                       SrcVec2->getSourceReg(Idx), MRI);
613     if (!MaybeCst)
614       return Register();
615     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
616     FoldedElements.emplace_back(FoldedCstReg);
617   }
618   // Create the new vector constant.
619   auto CstVec =
620       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
621   return CstVec.getReg(0);
622 }
623 
624 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
625                            bool SNaN) {
626   const MachineInstr *DefMI = MRI.getVRegDef(Val);
627   if (!DefMI)
628     return false;
629 
630   const TargetMachine& TM = DefMI->getMF()->getTarget();
631   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
632     return true;
633 
634   // If the value is a constant, we can obviously see if it is a NaN or not.
635   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
636     return !FPVal->getValueAPF().isNaN() ||
637            (SNaN && !FPVal->getValueAPF().isSignaling());
638   }
639 
640   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
641     for (const auto &Op : DefMI->uses())
642       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
643         return false;
644     return true;
645   }
646 
647   switch (DefMI->getOpcode()) {
648   default:
649     break;
650   case TargetOpcode::G_FMINNUM_IEEE:
651   case TargetOpcode::G_FMAXNUM_IEEE: {
652     if (SNaN)
653       return true;
654     // This can return a NaN if either operand is an sNaN, or if both operands
655     // are NaN.
656     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
657             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
658            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
659             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
660   }
661   case TargetOpcode::G_FMINNUM:
662   case TargetOpcode::G_FMAXNUM: {
663     // Only one needs to be known not-nan, since it will be returned if the
664     // other ends up being one.
665     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
666            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
667   }
668   }
669 
670   if (SNaN) {
671     // FP operations quiet. For now, just handle the ones inserted during
672     // legalization.
673     switch (DefMI->getOpcode()) {
674     case TargetOpcode::G_FPEXT:
675     case TargetOpcode::G_FPTRUNC:
676     case TargetOpcode::G_FCANONICALIZE:
677       return true;
678     default:
679       return false;
680     }
681   }
682 
683   return false;
684 }
685 
686 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
687                                   const MachinePointerInfo &MPO) {
688   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
689   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
690     MachineFrameInfo &MFI = MF.getFrameInfo();
691     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
692                            MPO.Offset);
693   }
694 
695   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
696     const Module *M = MF.getFunction().getParent();
697     return V->getPointerAlignment(M->getDataLayout());
698   }
699 
700   return Align(1);
701 }
702 
703 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
704                                         const TargetInstrInfo &TII,
705                                         MCRegister PhysReg,
706                                         const TargetRegisterClass &RC,
707                                         const DebugLoc &DL, LLT RegTy) {
708   MachineBasicBlock &EntryMBB = MF.front();
709   MachineRegisterInfo &MRI = MF.getRegInfo();
710   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
711   if (LiveIn) {
712     MachineInstr *Def = MRI.getVRegDef(LiveIn);
713     if (Def) {
714       // FIXME: Should the verifier check this is in the entry block?
715       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
716       return LiveIn;
717     }
718 
719     // It's possible the incoming argument register and copy was added during
720     // lowering, but later deleted due to being/becoming dead. If this happens,
721     // re-insert the copy.
722   } else {
723     // The live in register was not present, so add it.
724     LiveIn = MF.addLiveIn(PhysReg, &RC);
725     if (RegTy.isValid())
726       MRI.setType(LiveIn, RegTy);
727   }
728 
729   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
730     .addReg(PhysReg);
731   if (!EntryMBB.isLiveIn(PhysReg))
732     EntryMBB.addLiveIn(PhysReg);
733   return LiveIn;
734 }
735 
736 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
737                                         uint64_t Imm,
738                                         const MachineRegisterInfo &MRI) {
739   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
740   if (MaybeOp1Cst) {
741     switch (Opcode) {
742     default:
743       break;
744     case TargetOpcode::G_SEXT_INREG: {
745       LLT Ty = MRI.getType(Op1);
746       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
747     }
748     }
749   }
750   return None;
751 }
752 
753 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
754                                                Register Src,
755                                                const MachineRegisterInfo &MRI) {
756   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
757   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
758     APFloat DstVal(getFltSemanticForLLT(DstTy));
759     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
760                             APFloat::rmNearestTiesToEven);
761     return DstVal;
762   }
763   return None;
764 }
765 
766 Optional<SmallVector<unsigned>>
767 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
768   LLT Ty = MRI.getType(Src);
769   SmallVector<unsigned> FoldedCTLZs;
770   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
771     auto MaybeCst = getIConstantVRegVal(R, MRI);
772     if (!MaybeCst)
773       return None;
774     return MaybeCst->countLeadingZeros();
775   };
776   if (Ty.isVector()) {
777     // Try to constant fold each element.
778     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
779     if (!BV)
780       return None;
781     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
782       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
783         FoldedCTLZs.emplace_back(*MaybeFold);
784         continue;
785       }
786       return None;
787     }
788     return FoldedCTLZs;
789   }
790   if (auto MaybeCst = tryFoldScalar(Src)) {
791     FoldedCTLZs.emplace_back(*MaybeCst);
792     return FoldedCTLZs;
793   }
794   return None;
795 }
796 
797 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
798                                   GISelKnownBits *KB) {
799   Optional<DefinitionAndSourceRegister> DefSrcReg =
800       getDefSrcRegIgnoringCopies(Reg, MRI);
801   if (!DefSrcReg)
802     return false;
803 
804   const MachineInstr &MI = *DefSrcReg->MI;
805   const LLT Ty = MRI.getType(Reg);
806 
807   switch (MI.getOpcode()) {
808   case TargetOpcode::G_CONSTANT: {
809     unsigned BitWidth = Ty.getScalarSizeInBits();
810     const ConstantInt *CI = MI.getOperand(1).getCImm();
811     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
812   }
813   case TargetOpcode::G_SHL: {
814     // A left-shift of a constant one will have exactly one bit set because
815     // shifting the bit off the end is undefined.
816 
817     // TODO: Constant splat
818     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
819       if (*ConstLHS == 1)
820         return true;
821     }
822 
823     break;
824   }
825   case TargetOpcode::G_LSHR: {
826     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
827       if (ConstLHS->isSignMask())
828         return true;
829     }
830 
831     break;
832   }
833   case TargetOpcode::G_BUILD_VECTOR: {
834     // TODO: Probably should have a recursion depth guard since you could have
835     // bitcasted vector elements.
836     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
837       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
838         return false;
839 
840     return true;
841   }
842   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
843     // Only handle constants since we would need to know if number of leading
844     // zeros is greater than the truncation amount.
845     const unsigned BitWidth = Ty.getScalarSizeInBits();
846     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
847       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
848       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
849         return false;
850     }
851 
852     return true;
853   }
854   default:
855     break;
856   }
857 
858   if (!KB)
859     return false;
860 
861   // More could be done here, though the above checks are enough
862   // to handle some common cases.
863 
864   // Fall back to computeKnownBits to catch other known cases.
865   KnownBits Known = KB->getKnownBits(Reg);
866   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
867 }
868 
869 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
870   AU.addPreserved<StackProtector>();
871 }
872 
873 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
874   unsigned Mul = OrigSize * TargetSize;
875   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
876   return Mul / GCDSize;
877 }
878 
879 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
880   const unsigned OrigSize = OrigTy.getSizeInBits();
881   const unsigned TargetSize = TargetTy.getSizeInBits();
882 
883   if (OrigSize == TargetSize)
884     return OrigTy;
885 
886   if (OrigTy.isVector()) {
887     const LLT OrigElt = OrigTy.getElementType();
888 
889     if (TargetTy.isVector()) {
890       const LLT TargetElt = TargetTy.getElementType();
891 
892       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
893         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
894                                             TargetTy.getNumElements());
895         // Prefer the original element type.
896         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
897         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
898                            OrigTy.getElementType());
899       }
900     } else {
901       if (OrigElt.getSizeInBits() == TargetSize)
902         return OrigTy;
903     }
904 
905     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
906     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
907   }
908 
909   if (TargetTy.isVector()) {
910     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
911     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
912   }
913 
914   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
915 
916   // Preserve pointer types.
917   if (LCMSize == OrigSize)
918     return OrigTy;
919   if (LCMSize == TargetSize)
920     return TargetTy;
921 
922   return LLT::scalar(LCMSize);
923 }
924 
925 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
926   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
927       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
928     return getLCMType(OrigTy, TargetTy);
929 
930   unsigned OrigTyNumElts = OrigTy.getNumElements();
931   unsigned TargetTyNumElts = TargetTy.getNumElements();
932   if (OrigTyNumElts % TargetTyNumElts == 0)
933     return OrigTy;
934 
935   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
936   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
937                              OrigTy.getElementType());
938 }
939 
940 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
941   const unsigned OrigSize = OrigTy.getSizeInBits();
942   const unsigned TargetSize = TargetTy.getSizeInBits();
943 
944   if (OrigSize == TargetSize)
945     return OrigTy;
946 
947   if (OrigTy.isVector()) {
948     LLT OrigElt = OrigTy.getElementType();
949     if (TargetTy.isVector()) {
950       LLT TargetElt = TargetTy.getElementType();
951       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
952         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
953                                         TargetTy.getNumElements());
954         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
955       }
956     } else {
957       // If the source is a vector of pointers, return a pointer element.
958       if (OrigElt.getSizeInBits() == TargetSize)
959         return OrigElt;
960     }
961 
962     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
963     if (GCD == OrigElt.getSizeInBits())
964       return OrigElt;
965 
966     // If we can't produce the original element type, we have to use a smaller
967     // scalar.
968     if (GCD < OrigElt.getSizeInBits())
969       return LLT::scalar(GCD);
970     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
971   }
972 
973   if (TargetTy.isVector()) {
974     // Try to preserve the original element type.
975     LLT TargetElt = TargetTy.getElementType();
976     if (TargetElt.getSizeInBits() == OrigSize)
977       return OrigTy;
978   }
979 
980   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
981   return LLT::scalar(GCD);
982 }
983 
984 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
985   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
986          "Only G_SHUFFLE_VECTOR can have a splat index!");
987   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
988   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
989 
990   // If all elements are undefined, this shuffle can be considered a splat.
991   // Return 0 for better potential for callers to simplify.
992   if (FirstDefinedIdx == Mask.end())
993     return 0;
994 
995   // Make sure all remaining elements are either undef or the same
996   // as the first non-undef value.
997   int SplatValue = *FirstDefinedIdx;
998   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
999              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1000     return None;
1001 
1002   return SplatValue;
1003 }
1004 
1005 static bool isBuildVectorOp(unsigned Opcode) {
1006   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1007          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1008 }
1009 
1010 namespace {
1011 
1012 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1013                                            const MachineRegisterInfo &MRI,
1014                                            bool AllowUndef) {
1015   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1016   if (!MI)
1017     return None;
1018 
1019   if (!isBuildVectorOp(MI->getOpcode()))
1020     return None;
1021 
1022   Optional<ValueAndVReg> SplatValAndReg = None;
1023   for (MachineOperand &Op : MI->uses()) {
1024     Register Element = Op.getReg();
1025     auto ElementValAndReg =
1026         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1027 
1028     // If AllowUndef, treat undef as value that will result in a constant splat.
1029     if (!ElementValAndReg) {
1030       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1031         continue;
1032       return None;
1033     }
1034 
1035     // Record splat value
1036     if (!SplatValAndReg)
1037       SplatValAndReg = ElementValAndReg;
1038 
1039     // Different constant then the one already recorded, not a constant splat.
1040     if (SplatValAndReg->Value != ElementValAndReg->Value)
1041       return None;
1042   }
1043 
1044   return SplatValAndReg;
1045 }
1046 
1047 } // end anonymous namespace
1048 
1049 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1050                                       const MachineRegisterInfo &MRI,
1051                                       int64_t SplatValue, bool AllowUndef) {
1052   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1053     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1054   return false;
1055 }
1056 
1057 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1058                                       const MachineRegisterInfo &MRI,
1059                                       int64_t SplatValue, bool AllowUndef) {
1060   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1061                                     AllowUndef);
1062 }
1063 
1064 Optional<int64_t>
1065 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1066                                   const MachineRegisterInfo &MRI) {
1067   if (auto SplatValAndReg =
1068           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1069     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1070   return None;
1071 }
1072 
1073 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1074                                                  const MachineRegisterInfo &MRI,
1075                                                  bool AllowUndef) {
1076   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1077     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1078   return None;
1079 }
1080 
1081 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1082                                  const MachineRegisterInfo &MRI,
1083                                  bool AllowUndef) {
1084   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1085 }
1086 
1087 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1088                                 const MachineRegisterInfo &MRI,
1089                                 bool AllowUndef) {
1090   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1091 }
1092 
1093 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1094                                              const MachineRegisterInfo &MRI) {
1095   unsigned Opc = MI.getOpcode();
1096   if (!isBuildVectorOp(Opc))
1097     return None;
1098   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1099     return RegOrConstant(*Splat);
1100   auto Reg = MI.getOperand(1).getReg();
1101   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1102              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1103     return None;
1104   return RegOrConstant(Reg);
1105 }
1106 
1107 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1108                                       const MachineRegisterInfo &MRI) {
1109   Register Def = MI.getOperand(0).getReg();
1110   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1111     return true;
1112   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1113   if (!BV)
1114     return false;
1115   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1116     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1117         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1118       continue;
1119     return false;
1120   }
1121   return true;
1122 }
1123 
1124 Optional<APInt>
1125 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1126                                       const MachineRegisterInfo &MRI) {
1127   Register Def = MI.getOperand(0).getReg();
1128   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1129     return C->Value;
1130   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1131   if (!MaybeCst)
1132     return None;
1133   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1134   return APInt(ScalarSize, *MaybeCst, true);
1135 }
1136 
1137 bool llvm::matchUnaryPredicate(
1138     const MachineRegisterInfo &MRI, Register Reg,
1139     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1140 
1141   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1142   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1143     return Match(nullptr);
1144 
1145   // TODO: Also handle fconstant
1146   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1147     return Match(Def->getOperand(1).getCImm());
1148 
1149   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1150     return false;
1151 
1152   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1153     Register SrcElt = Def->getOperand(I).getReg();
1154     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1155     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1156       if (!Match(nullptr))
1157         return false;
1158       continue;
1159     }
1160 
1161     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1162         !Match(SrcDef->getOperand(1).getCImm()))
1163       return false;
1164   }
1165 
1166   return true;
1167 }
1168 
1169 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1170                           bool IsFP) {
1171   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1172   case TargetLowering::UndefinedBooleanContent:
1173     return Val & 0x1;
1174   case TargetLowering::ZeroOrOneBooleanContent:
1175     return Val == 1;
1176   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1177     return Val == -1;
1178   }
1179   llvm_unreachable("Invalid boolean contents");
1180 }
1181 
1182 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1183                              bool IsFP) {
1184   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1185   case TargetLowering::UndefinedBooleanContent:
1186   case TargetLowering::ZeroOrOneBooleanContent:
1187     return 1;
1188   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1189     return -1;
1190   }
1191   llvm_unreachable("Invalid boolean contents");
1192 }
1193 
1194 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1195                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1196   const auto &F = MBB.getParent()->getFunction();
1197   return F.hasOptSize() || F.hasMinSize() ||
1198          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1199 }
1200 
1201 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1202                             LostDebugLocObserver *LocObserver,
1203                             SmallInstListTy &DeadInstChain) {
1204   for (MachineOperand &Op : MI.uses()) {
1205     if (Op.isReg() && Op.getReg().isVirtual())
1206       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1207   }
1208   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1209   DeadInstChain.remove(&MI);
1210   MI.eraseFromParent();
1211   if (LocObserver)
1212     LocObserver->checkpoint(false);
1213 }
1214 
1215 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1216                        MachineRegisterInfo &MRI,
1217                        LostDebugLocObserver *LocObserver) {
1218   SmallInstListTy DeadInstChain;
1219   for (MachineInstr *MI : DeadInstrs)
1220     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1221 
1222   while (!DeadInstChain.empty()) {
1223     MachineInstr *Inst = DeadInstChain.pop_back_val();
1224     if (!isTriviallyDead(*Inst, MRI))
1225       continue;
1226     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1227   }
1228 }
1229 
1230 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1231                       LostDebugLocObserver *LocObserver) {
1232   return eraseInstrs({&MI}, MRI, LocObserver);
1233 }
1234