xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/RegisterBankInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Transforms/Utils/SizeOpts.h"
35 
36 #define DEBUG_TYPE "globalisel-utils"
37 
38 using namespace llvm;
39 using namespace MIPatternMatch;
40 
41 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
42                                    const TargetInstrInfo &TII,
43                                    const RegisterBankInfo &RBI, Register Reg,
44                                    const TargetRegisterClass &RegClass) {
45   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
46     return MRI.createVirtualRegister(&RegClass);
47 
48   return Reg;
49 }
50 
51 Register llvm::constrainOperandRegClass(
52     const MachineFunction &MF, const TargetRegisterInfo &TRI,
53     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
54     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
55     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
56   Register Reg = RegMO.getReg();
57   // Assume physical registers are properly constrained.
58   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
59 
60   // Save the old register class to check whether
61   // the change notifications will be required.
62   // TODO: A better approach would be to pass
63   // the observers to constrainRegToClass().
64   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
65   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
66   // If we created a new virtual register because the class is not compatible
67   // then create a copy between the new and the old register.
68   if (ConstrainedReg != Reg) {
69     MachineBasicBlock::iterator InsertIt(&InsertPt);
70     MachineBasicBlock &MBB = *InsertPt.getParent();
71     // FIXME: The copy needs to have the classes constrained for its operands.
72     // Use operand's regbank to get the class for old register (Reg).
73     if (RegMO.isUse()) {
74       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
75               TII.get(TargetOpcode::COPY), ConstrainedReg)
76           .addReg(Reg);
77     } else {
78       assert(RegMO.isDef() && "Must be a definition");
79       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
80               TII.get(TargetOpcode::COPY), Reg)
81           .addReg(ConstrainedReg);
82     }
83     if (GISelChangeObserver *Observer = MF.getObserver()) {
84       Observer->changingInstr(*RegMO.getParent());
85     }
86     RegMO.setReg(ConstrainedReg);
87     if (GISelChangeObserver *Observer = MF.getObserver()) {
88       Observer->changedInstr(*RegMO.getParent());
89     }
90   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
91     if (GISelChangeObserver *Observer = MF.getObserver()) {
92       if (!RegMO.isDef()) {
93         MachineInstr *RegDef = MRI.getVRegDef(Reg);
94         Observer->changedInstr(*RegDef);
95       }
96       Observer->changingAllUsesOfReg(MRI, Reg);
97       Observer->finishedChangingAllUsesOfReg();
98     }
99   }
100   return ConstrainedReg;
101 }
102 
103 Register llvm::constrainOperandRegClass(
104     const MachineFunction &MF, const TargetRegisterInfo &TRI,
105     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
106     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
107     MachineOperand &RegMO, unsigned OpIdx) {
108   Register Reg = RegMO.getReg();
109   // Assume physical registers are properly constrained.
110   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
111 
112   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
113   // Some of the target independent instructions, like COPY, may not impose any
114   // register class constraints on some of their operands: If it's a use, we can
115   // skip constraining as the instruction defining the register would constrain
116   // it.
117 
118   if (OpRC) {
119     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
120     // can have multiple regbanks for a superclass that combine different
121     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
122     // resolved by targets during regbankselect should not be overridden.
123     if (const auto *SubRC = TRI.getCommonSubClass(
124             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
125       OpRC = SubRC;
126 
127     OpRC = TRI.getAllocatableClass(OpRC);
128   }
129 
130   if (!OpRC) {
131     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
132            "Register class constraint is required unless either the "
133            "instruction is target independent or the operand is a use");
134     // FIXME: Just bailing out like this here could be not enough, unless we
135     // expect the users of this function to do the right thing for PHIs and
136     // COPY:
137     //   v1 = COPY v0
138     //   v2 = COPY v1
139     // v1 here may end up not being constrained at all. Please notice that to
140     // reproduce the issue we likely need a destination pattern of a selection
141     // rule producing such extra copies, not just an input GMIR with them as
142     // every existing target using selectImpl handles copies before calling it
143     // and they never reach this function.
144     return Reg;
145   }
146   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
147                                   RegMO);
148 }
149 
150 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
151                                             const TargetInstrInfo &TII,
152                                             const TargetRegisterInfo &TRI,
153                                             const RegisterBankInfo &RBI) {
154   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
155          "A selected instruction is expected");
156   MachineBasicBlock &MBB = *I.getParent();
157   MachineFunction &MF = *MBB.getParent();
158   MachineRegisterInfo &MRI = MF.getRegInfo();
159 
160   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
161     MachineOperand &MO = I.getOperand(OpI);
162 
163     // There's nothing to be done on non-register operands.
164     if (!MO.isReg())
165       continue;
166 
167     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
168     assert(MO.isReg() && "Unsupported non-reg operand");
169 
170     Register Reg = MO.getReg();
171     // Physical registers don't need to be constrained.
172     if (Register::isPhysicalRegister(Reg))
173       continue;
174 
175     // Register operands with a value of 0 (e.g. predicate operands) don't need
176     // to be constrained.
177     if (Reg == 0)
178       continue;
179 
180     // If the operand is a vreg, we should constrain its regclass, and only
181     // insert COPYs if that's impossible.
182     // constrainOperandRegClass does that for us.
183     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
184 
185     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
186     // done.
187     if (MO.isUse()) {
188       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
189       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
190         I.tieOperands(DefIdx, OpI);
191     }
192   }
193   return true;
194 }
195 
196 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
197                          MachineRegisterInfo &MRI) {
198   // Give up if either DstReg or SrcReg  is a physical register.
199   if (DstReg.isPhysical() || SrcReg.isPhysical())
200     return false;
201   // Give up if the types don't match.
202   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
203     return false;
204   // Replace if either DstReg has no constraints or the register
205   // constraints match.
206   return !MRI.getRegClassOrRegBank(DstReg) ||
207          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
208 }
209 
210 bool llvm::isTriviallyDead(const MachineInstr &MI,
211                            const MachineRegisterInfo &MRI) {
212   // FIXME: This logical is mostly duplicated with
213   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
214   // MachineInstr::isLabel?
215 
216   // Don't delete frame allocation labels.
217   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
218     return false;
219   // LIFETIME markers should be preserved even if they seem dead.
220   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
221       MI.getOpcode() == TargetOpcode::LIFETIME_END)
222     return false;
223 
224   // If we can move an instruction, we can remove it.  Otherwise, it has
225   // a side-effect of some sort.
226   bool SawStore = false;
227   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
228     return false;
229 
230   // Instructions without side-effects are dead iff they only define dead vregs.
231   for (const auto &MO : MI.operands()) {
232     if (!MO.isReg() || !MO.isDef())
233       continue;
234 
235     Register Reg = MO.getReg();
236     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
237       return false;
238   }
239   return true;
240 }
241 
242 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
243                                   MachineFunction &MF,
244                                   const TargetPassConfig &TPC,
245                                   MachineOptimizationRemarkEmitter &MORE,
246                                   MachineOptimizationRemarkMissed &R) {
247   bool IsFatal = Severity == DS_Error &&
248                  TPC.isGlobalISelAbortEnabled();
249   // Print the function name explicitly if we don't have a debug location (which
250   // makes the diagnostic less useful) or if we're going to emit a raw error.
251   if (!R.getLocation().isValid() || IsFatal)
252     R << (" (in function: " + MF.getName() + ")").str();
253 
254   if (IsFatal)
255     report_fatal_error(Twine(R.getMsg()));
256   else
257     MORE.emit(R);
258 }
259 
260 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
261                               MachineOptimizationRemarkEmitter &MORE,
262                               MachineOptimizationRemarkMissed &R) {
263   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
264 }
265 
266 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
267                               MachineOptimizationRemarkEmitter &MORE,
268                               MachineOptimizationRemarkMissed &R) {
269   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
270   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
271 }
272 
273 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
274                               MachineOptimizationRemarkEmitter &MORE,
275                               const char *PassName, StringRef Msg,
276                               const MachineInstr &MI) {
277   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
278                                     MI.getDebugLoc(), MI.getParent());
279   R << Msg;
280   // Printing MI is expensive;  only do it if expensive remarks are enabled.
281   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
282     R << ": " << ore::MNV("Inst", MI);
283   reportGISelFailure(MF, TPC, MORE, R);
284 }
285 
286 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
287                                           const MachineRegisterInfo &MRI) {
288   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
289       VReg, MRI, /*LookThroughInstrs*/ false);
290   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
291          "Value found while looking through instrs");
292   if (!ValAndVReg)
293     return None;
294   return ValAndVReg->Value;
295 }
296 
297 Optional<int64_t>
298 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
299   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
300   if (Val && Val->getBitWidth() <= 64)
301     return Val->getSExtValue();
302   return None;
303 }
304 
305 namespace {
306 
307 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
308 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
309 
310 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
311     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
312     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
313     bool LookThroughAnyExt = false) {
314   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
315   MachineInstr *MI;
316 
317   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
318          LookThroughInstrs) {
319     switch (MI->getOpcode()) {
320     case TargetOpcode::G_ANYEXT:
321       if (!LookThroughAnyExt)
322         return None;
323       LLVM_FALLTHROUGH;
324     case TargetOpcode::G_TRUNC:
325     case TargetOpcode::G_SEXT:
326     case TargetOpcode::G_ZEXT:
327       SeenOpcodes.push_back(std::make_pair(
328           MI->getOpcode(),
329           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
330       VReg = MI->getOperand(1).getReg();
331       break;
332     case TargetOpcode::COPY:
333       VReg = MI->getOperand(1).getReg();
334       if (Register::isPhysicalRegister(VReg))
335         return None;
336       break;
337     case TargetOpcode::G_INTTOPTR:
338       VReg = MI->getOperand(1).getReg();
339       break;
340     default:
341       return None;
342     }
343   }
344   if (!MI || !IsConstantOpcode(MI))
345     return None;
346 
347   Optional<APInt> MaybeVal = getAPCstValue(MI);
348   if (!MaybeVal)
349     return None;
350   APInt &Val = *MaybeVal;
351   while (!SeenOpcodes.empty()) {
352     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
353     switch (OpcodeAndSize.first) {
354     case TargetOpcode::G_TRUNC:
355       Val = Val.trunc(OpcodeAndSize.second);
356       break;
357     case TargetOpcode::G_ANYEXT:
358     case TargetOpcode::G_SEXT:
359       Val = Val.sext(OpcodeAndSize.second);
360       break;
361     case TargetOpcode::G_ZEXT:
362       Val = Val.zext(OpcodeAndSize.second);
363       break;
364     }
365   }
366 
367   return ValueAndVReg{Val, VReg};
368 }
369 
370 bool isIConstant(const MachineInstr *MI) {
371   if (!MI)
372     return false;
373   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
374 }
375 
376 bool isFConstant(const MachineInstr *MI) {
377   if (!MI)
378     return false;
379   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
380 }
381 
382 bool isAnyConstant(const MachineInstr *MI) {
383   if (!MI)
384     return false;
385   unsigned Opc = MI->getOpcode();
386   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
387 }
388 
389 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
390   const MachineOperand &CstVal = MI->getOperand(1);
391   if (CstVal.isCImm())
392     return CstVal.getCImm()->getValue();
393   return None;
394 }
395 
396 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
397   const MachineOperand &CstVal = MI->getOperand(1);
398   if (CstVal.isCImm())
399     return CstVal.getCImm()->getValue();
400   if (CstVal.isFPImm())
401     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
402   return None;
403 }
404 
405 } // end anonymous namespace
406 
407 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
408     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
409   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
410                                            getCImmAsAPInt, LookThroughInstrs);
411 }
412 
413 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
414     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
415     bool LookThroughAnyExt) {
416   return getConstantVRegValWithLookThrough(
417       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
418       LookThroughAnyExt);
419 }
420 
421 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
422     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
423   auto Reg = getConstantVRegValWithLookThrough(
424       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
425   if (!Reg)
426     return None;
427   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
428                         Reg->VReg};
429 }
430 
431 const ConstantFP *
432 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
433   MachineInstr *MI = MRI.getVRegDef(VReg);
434   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
435     return nullptr;
436   return MI->getOperand(1).getFPImm();
437 }
438 
439 Optional<DefinitionAndSourceRegister>
440 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
441   Register DefSrcReg = Reg;
442   auto *DefMI = MRI.getVRegDef(Reg);
443   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
444   if (!DstTy.isValid())
445     return None;
446   unsigned Opc = DefMI->getOpcode();
447   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
448     Register SrcReg = DefMI->getOperand(1).getReg();
449     auto SrcTy = MRI.getType(SrcReg);
450     if (!SrcTy.isValid())
451       break;
452     DefMI = MRI.getVRegDef(SrcReg);
453     DefSrcReg = SrcReg;
454     Opc = DefMI->getOpcode();
455   }
456   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
457 }
458 
459 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
460                                          const MachineRegisterInfo &MRI) {
461   Optional<DefinitionAndSourceRegister> DefSrcReg =
462       getDefSrcRegIgnoringCopies(Reg, MRI);
463   return DefSrcReg ? DefSrcReg->MI : nullptr;
464 }
465 
466 Register llvm::getSrcRegIgnoringCopies(Register Reg,
467                                        const MachineRegisterInfo &MRI) {
468   Optional<DefinitionAndSourceRegister> DefSrcReg =
469       getDefSrcRegIgnoringCopies(Reg, MRI);
470   return DefSrcReg ? DefSrcReg->Reg : Register();
471 }
472 
473 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
474                                  const MachineRegisterInfo &MRI) {
475   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
476   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
477 }
478 
479 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
480   if (Size == 32)
481     return APFloat(float(Val));
482   if (Size == 64)
483     return APFloat(Val);
484   if (Size != 16)
485     llvm_unreachable("Unsupported FPConstant size");
486   bool Ignored;
487   APFloat APF(Val);
488   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
489   return APF;
490 }
491 
492 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
493                                         const Register Op2,
494                                         const MachineRegisterInfo &MRI) {
495   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
496   if (!MaybeOp2Cst)
497     return None;
498 
499   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
500   if (!MaybeOp1Cst)
501     return None;
502 
503   const APInt &C1 = MaybeOp1Cst->Value;
504   const APInt &C2 = MaybeOp2Cst->Value;
505   switch (Opcode) {
506   default:
507     break;
508   case TargetOpcode::G_ADD:
509   case TargetOpcode::G_PTR_ADD:
510     return C1 + C2;
511   case TargetOpcode::G_AND:
512     return C1 & C2;
513   case TargetOpcode::G_ASHR:
514     return C1.ashr(C2);
515   case TargetOpcode::G_LSHR:
516     return C1.lshr(C2);
517   case TargetOpcode::G_MUL:
518     return C1 * C2;
519   case TargetOpcode::G_OR:
520     return C1 | C2;
521   case TargetOpcode::G_SHL:
522     return C1 << C2;
523   case TargetOpcode::G_SUB:
524     return C1 - C2;
525   case TargetOpcode::G_XOR:
526     return C1 ^ C2;
527   case TargetOpcode::G_UDIV:
528     if (!C2.getBoolValue())
529       break;
530     return C1.udiv(C2);
531   case TargetOpcode::G_SDIV:
532     if (!C2.getBoolValue())
533       break;
534     return C1.sdiv(C2);
535   case TargetOpcode::G_UREM:
536     if (!C2.getBoolValue())
537       break;
538     return C1.urem(C2);
539   case TargetOpcode::G_SREM:
540     if (!C2.getBoolValue())
541       break;
542     return C1.srem(C2);
543   case TargetOpcode::G_SMIN:
544     return APIntOps::smin(C1, C2);
545   case TargetOpcode::G_SMAX:
546     return APIntOps::smax(C1, C2);
547   case TargetOpcode::G_UMIN:
548     return APIntOps::umin(C1, C2);
549   case TargetOpcode::G_UMAX:
550     return APIntOps::umax(C1, C2);
551   }
552 
553   return None;
554 }
555 
556 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
557                                             const Register Op2,
558                                             const MachineRegisterInfo &MRI) {
559   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
560   if (!Op2Cst)
561     return None;
562 
563   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
564   if (!Op1Cst)
565     return None;
566 
567   APFloat C1 = Op1Cst->getValueAPF();
568   const APFloat &C2 = Op2Cst->getValueAPF();
569   switch (Opcode) {
570   case TargetOpcode::G_FADD:
571     C1.add(C2, APFloat::rmNearestTiesToEven);
572     return C1;
573   case TargetOpcode::G_FSUB:
574     C1.subtract(C2, APFloat::rmNearestTiesToEven);
575     return C1;
576   case TargetOpcode::G_FMUL:
577     C1.multiply(C2, APFloat::rmNearestTiesToEven);
578     return C1;
579   case TargetOpcode::G_FDIV:
580     C1.divide(C2, APFloat::rmNearestTiesToEven);
581     return C1;
582   case TargetOpcode::G_FREM:
583     C1.mod(C2);
584     return C1;
585   case TargetOpcode::G_FCOPYSIGN:
586     C1.copySign(C2);
587     return C1;
588   case TargetOpcode::G_FMINNUM:
589     return minnum(C1, C2);
590   case TargetOpcode::G_FMAXNUM:
591     return maxnum(C1, C2);
592   case TargetOpcode::G_FMINIMUM:
593     return minimum(C1, C2);
594   case TargetOpcode::G_FMAXIMUM:
595     return maximum(C1, C2);
596   case TargetOpcode::G_FMINNUM_IEEE:
597   case TargetOpcode::G_FMAXNUM_IEEE:
598     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
599     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
600     // and currently there isn't a nice wrapper in APFloat for the version with
601     // correct snan handling.
602     break;
603   default:
604     break;
605   }
606 
607   return None;
608 }
609 
610 SmallVector<APInt>
611 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
612                               const Register Op2,
613                               const MachineRegisterInfo &MRI) {
614   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
615   if (!SrcVec2)
616     return SmallVector<APInt>();
617 
618   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
619   if (!SrcVec1)
620     return SmallVector<APInt>();
621 
622   SmallVector<APInt> FoldedElements;
623   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
624     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
625                                       SrcVec2->getSourceReg(Idx), MRI);
626     if (!MaybeCst)
627       return SmallVector<APInt>();
628     FoldedElements.push_back(*MaybeCst);
629   }
630   return FoldedElements;
631 }
632 
633 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
634                            bool SNaN) {
635   const MachineInstr *DefMI = MRI.getVRegDef(Val);
636   if (!DefMI)
637     return false;
638 
639   const TargetMachine& TM = DefMI->getMF()->getTarget();
640   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
641     return true;
642 
643   // If the value is a constant, we can obviously see if it is a NaN or not.
644   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
645     return !FPVal->getValueAPF().isNaN() ||
646            (SNaN && !FPVal->getValueAPF().isSignaling());
647   }
648 
649   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
650     for (const auto &Op : DefMI->uses())
651       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
652         return false;
653     return true;
654   }
655 
656   switch (DefMI->getOpcode()) {
657   default:
658     break;
659   case TargetOpcode::G_FMINNUM_IEEE:
660   case TargetOpcode::G_FMAXNUM_IEEE: {
661     if (SNaN)
662       return true;
663     // This can return a NaN if either operand is an sNaN, or if both operands
664     // are NaN.
665     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
666             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
667            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
668             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
669   }
670   case TargetOpcode::G_FMINNUM:
671   case TargetOpcode::G_FMAXNUM: {
672     // Only one needs to be known not-nan, since it will be returned if the
673     // other ends up being one.
674     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
675            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
676   }
677   }
678 
679   if (SNaN) {
680     // FP operations quiet. For now, just handle the ones inserted during
681     // legalization.
682     switch (DefMI->getOpcode()) {
683     case TargetOpcode::G_FPEXT:
684     case TargetOpcode::G_FPTRUNC:
685     case TargetOpcode::G_FCANONICALIZE:
686       return true;
687     default:
688       return false;
689     }
690   }
691 
692   return false;
693 }
694 
695 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
696                                   const MachinePointerInfo &MPO) {
697   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
698   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
699     MachineFrameInfo &MFI = MF.getFrameInfo();
700     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
701                            MPO.Offset);
702   }
703 
704   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
705     const Module *M = MF.getFunction().getParent();
706     return V->getPointerAlignment(M->getDataLayout());
707   }
708 
709   return Align(1);
710 }
711 
712 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
713                                         const TargetInstrInfo &TII,
714                                         MCRegister PhysReg,
715                                         const TargetRegisterClass &RC,
716                                         const DebugLoc &DL, LLT RegTy) {
717   MachineBasicBlock &EntryMBB = MF.front();
718   MachineRegisterInfo &MRI = MF.getRegInfo();
719   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
720   if (LiveIn) {
721     MachineInstr *Def = MRI.getVRegDef(LiveIn);
722     if (Def) {
723       // FIXME: Should the verifier check this is in the entry block?
724       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
725       return LiveIn;
726     }
727 
728     // It's possible the incoming argument register and copy was added during
729     // lowering, but later deleted due to being/becoming dead. If this happens,
730     // re-insert the copy.
731   } else {
732     // The live in register was not present, so add it.
733     LiveIn = MF.addLiveIn(PhysReg, &RC);
734     if (RegTy.isValid())
735       MRI.setType(LiveIn, RegTy);
736   }
737 
738   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
739     .addReg(PhysReg);
740   if (!EntryMBB.isLiveIn(PhysReg))
741     EntryMBB.addLiveIn(PhysReg);
742   return LiveIn;
743 }
744 
745 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
746                                         uint64_t Imm,
747                                         const MachineRegisterInfo &MRI) {
748   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
749   if (MaybeOp1Cst) {
750     switch (Opcode) {
751     default:
752       break;
753     case TargetOpcode::G_SEXT_INREG: {
754       LLT Ty = MRI.getType(Op1);
755       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
756     }
757     }
758   }
759   return None;
760 }
761 
762 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
763                                                Register Src,
764                                                const MachineRegisterInfo &MRI) {
765   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
766   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
767     APFloat DstVal(getFltSemanticForLLT(DstTy));
768     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
769                             APFloat::rmNearestTiesToEven);
770     return DstVal;
771   }
772   return None;
773 }
774 
775 Optional<SmallVector<unsigned>>
776 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
777   LLT Ty = MRI.getType(Src);
778   SmallVector<unsigned> FoldedCTLZs;
779   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
780     auto MaybeCst = getIConstantVRegVal(R, MRI);
781     if (!MaybeCst)
782       return None;
783     return MaybeCst->countLeadingZeros();
784   };
785   if (Ty.isVector()) {
786     // Try to constant fold each element.
787     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
788     if (!BV)
789       return None;
790     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
791       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
792         FoldedCTLZs.emplace_back(*MaybeFold);
793         continue;
794       }
795       return None;
796     }
797     return FoldedCTLZs;
798   }
799   if (auto MaybeCst = tryFoldScalar(Src)) {
800     FoldedCTLZs.emplace_back(*MaybeCst);
801     return FoldedCTLZs;
802   }
803   return None;
804 }
805 
806 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
807                                   GISelKnownBits *KB) {
808   Optional<DefinitionAndSourceRegister> DefSrcReg =
809       getDefSrcRegIgnoringCopies(Reg, MRI);
810   if (!DefSrcReg)
811     return false;
812 
813   const MachineInstr &MI = *DefSrcReg->MI;
814   const LLT Ty = MRI.getType(Reg);
815 
816   switch (MI.getOpcode()) {
817   case TargetOpcode::G_CONSTANT: {
818     unsigned BitWidth = Ty.getScalarSizeInBits();
819     const ConstantInt *CI = MI.getOperand(1).getCImm();
820     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
821   }
822   case TargetOpcode::G_SHL: {
823     // A left-shift of a constant one will have exactly one bit set because
824     // shifting the bit off the end is undefined.
825 
826     // TODO: Constant splat
827     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
828       if (*ConstLHS == 1)
829         return true;
830     }
831 
832     break;
833   }
834   case TargetOpcode::G_LSHR: {
835     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
836       if (ConstLHS->isSignMask())
837         return true;
838     }
839 
840     break;
841   }
842   case TargetOpcode::G_BUILD_VECTOR: {
843     // TODO: Probably should have a recursion depth guard since you could have
844     // bitcasted vector elements.
845     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
846       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
847         return false;
848 
849     return true;
850   }
851   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
852     // Only handle constants since we would need to know if number of leading
853     // zeros is greater than the truncation amount.
854     const unsigned BitWidth = Ty.getScalarSizeInBits();
855     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
856       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
857       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
858         return false;
859     }
860 
861     return true;
862   }
863   default:
864     break;
865   }
866 
867   if (!KB)
868     return false;
869 
870   // More could be done here, though the above checks are enough
871   // to handle some common cases.
872 
873   // Fall back to computeKnownBits to catch other known cases.
874   KnownBits Known = KB->getKnownBits(Reg);
875   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
876 }
877 
878 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
879   AU.addPreserved<StackProtector>();
880 }
881 
882 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
883   unsigned Mul = OrigSize * TargetSize;
884   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
885   return Mul / GCDSize;
886 }
887 
888 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
889   const unsigned OrigSize = OrigTy.getSizeInBits();
890   const unsigned TargetSize = TargetTy.getSizeInBits();
891 
892   if (OrigSize == TargetSize)
893     return OrigTy;
894 
895   if (OrigTy.isVector()) {
896     const LLT OrigElt = OrigTy.getElementType();
897 
898     if (TargetTy.isVector()) {
899       const LLT TargetElt = TargetTy.getElementType();
900 
901       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
902         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
903                                             TargetTy.getNumElements());
904         // Prefer the original element type.
905         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
906         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
907                            OrigTy.getElementType());
908       }
909     } else {
910       if (OrigElt.getSizeInBits() == TargetSize)
911         return OrigTy;
912     }
913 
914     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
915     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
916   }
917 
918   if (TargetTy.isVector()) {
919     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
920     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
921   }
922 
923   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
924 
925   // Preserve pointer types.
926   if (LCMSize == OrigSize)
927     return OrigTy;
928   if (LCMSize == TargetSize)
929     return TargetTy;
930 
931   return LLT::scalar(LCMSize);
932 }
933 
934 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
935   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
936       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
937     return getLCMType(OrigTy, TargetTy);
938 
939   unsigned OrigTyNumElts = OrigTy.getNumElements();
940   unsigned TargetTyNumElts = TargetTy.getNumElements();
941   if (OrigTyNumElts % TargetTyNumElts == 0)
942     return OrigTy;
943 
944   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
945   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
946                              OrigTy.getElementType());
947 }
948 
949 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
950   const unsigned OrigSize = OrigTy.getSizeInBits();
951   const unsigned TargetSize = TargetTy.getSizeInBits();
952 
953   if (OrigSize == TargetSize)
954     return OrigTy;
955 
956   if (OrigTy.isVector()) {
957     LLT OrigElt = OrigTy.getElementType();
958     if (TargetTy.isVector()) {
959       LLT TargetElt = TargetTy.getElementType();
960       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
961         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
962                                         TargetTy.getNumElements());
963         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
964       }
965     } else {
966       // If the source is a vector of pointers, return a pointer element.
967       if (OrigElt.getSizeInBits() == TargetSize)
968         return OrigElt;
969     }
970 
971     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
972     if (GCD == OrigElt.getSizeInBits())
973       return OrigElt;
974 
975     // If we can't produce the original element type, we have to use a smaller
976     // scalar.
977     if (GCD < OrigElt.getSizeInBits())
978       return LLT::scalar(GCD);
979     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
980   }
981 
982   if (TargetTy.isVector()) {
983     // Try to preserve the original element type.
984     LLT TargetElt = TargetTy.getElementType();
985     if (TargetElt.getSizeInBits() == OrigSize)
986       return OrigTy;
987   }
988 
989   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
990   return LLT::scalar(GCD);
991 }
992 
993 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
994   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
995          "Only G_SHUFFLE_VECTOR can have a splat index!");
996   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
997   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
998 
999   // If all elements are undefined, this shuffle can be considered a splat.
1000   // Return 0 for better potential for callers to simplify.
1001   if (FirstDefinedIdx == Mask.end())
1002     return 0;
1003 
1004   // Make sure all remaining elements are either undef or the same
1005   // as the first non-undef value.
1006   int SplatValue = *FirstDefinedIdx;
1007   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1008              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1009     return None;
1010 
1011   return SplatValue;
1012 }
1013 
1014 static bool isBuildVectorOp(unsigned Opcode) {
1015   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1016          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1017 }
1018 
1019 namespace {
1020 
1021 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1022                                            const MachineRegisterInfo &MRI,
1023                                            bool AllowUndef) {
1024   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1025   if (!MI)
1026     return None;
1027 
1028   if (!isBuildVectorOp(MI->getOpcode()))
1029     return None;
1030 
1031   Optional<ValueAndVReg> SplatValAndReg = None;
1032   for (MachineOperand &Op : MI->uses()) {
1033     Register Element = Op.getReg();
1034     auto ElementValAndReg =
1035         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1036 
1037     // If AllowUndef, treat undef as value that will result in a constant splat.
1038     if (!ElementValAndReg) {
1039       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1040         continue;
1041       return None;
1042     }
1043 
1044     // Record splat value
1045     if (!SplatValAndReg)
1046       SplatValAndReg = ElementValAndReg;
1047 
1048     // Different constant then the one already recorded, not a constant splat.
1049     if (SplatValAndReg->Value != ElementValAndReg->Value)
1050       return None;
1051   }
1052 
1053   return SplatValAndReg;
1054 }
1055 
1056 } // end anonymous namespace
1057 
1058 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1059                                       const MachineRegisterInfo &MRI,
1060                                       int64_t SplatValue, bool AllowUndef) {
1061   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1062     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1063   return false;
1064 }
1065 
1066 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1067                                       const MachineRegisterInfo &MRI,
1068                                       int64_t SplatValue, bool AllowUndef) {
1069   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1070                                     AllowUndef);
1071 }
1072 
1073 Optional<APInt> llvm::getIConstantSplatVal(const Register Reg,
1074                                            const MachineRegisterInfo &MRI) {
1075   if (auto SplatValAndReg =
1076           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1077     Optional<ValueAndVReg> ValAndVReg =
1078         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1079     return ValAndVReg->Value;
1080   }
1081 
1082   return None;
1083 }
1084 
1085 Optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
1086                                      const MachineRegisterInfo &MRI) {
1087   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1088 }
1089 
1090 Optional<int64_t>
1091 llvm::getIConstantSplatSExtVal(const Register Reg,
1092                                const MachineRegisterInfo &MRI) {
1093   if (auto SplatValAndReg =
1094           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1095     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1096   return None;
1097 }
1098 
1099 Optional<int64_t>
1100 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
1101                                const MachineRegisterInfo &MRI) {
1102   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1103 }
1104 
1105 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1106                                                  const MachineRegisterInfo &MRI,
1107                                                  bool AllowUndef) {
1108   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1109     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1110   return None;
1111 }
1112 
1113 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1114                                  const MachineRegisterInfo &MRI,
1115                                  bool AllowUndef) {
1116   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1117 }
1118 
1119 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1120                                 const MachineRegisterInfo &MRI,
1121                                 bool AllowUndef) {
1122   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1123 }
1124 
1125 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1126                                              const MachineRegisterInfo &MRI) {
1127   unsigned Opc = MI.getOpcode();
1128   if (!isBuildVectorOp(Opc))
1129     return None;
1130   if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1131     return RegOrConstant(*Splat);
1132   auto Reg = MI.getOperand(1).getReg();
1133   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1134              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1135     return None;
1136   return RegOrConstant(Reg);
1137 }
1138 
1139 static bool isConstantScalar(const MachineInstr &MI,
1140                              const MachineRegisterInfo &MRI,
1141                              bool AllowFP = true,
1142                              bool AllowOpaqueConstants = true) {
1143   switch (MI.getOpcode()) {
1144   case TargetOpcode::G_CONSTANT:
1145   case TargetOpcode::G_IMPLICIT_DEF:
1146     return true;
1147   case TargetOpcode::G_FCONSTANT:
1148     return AllowFP;
1149   case TargetOpcode::G_GLOBAL_VALUE:
1150   case TargetOpcode::G_FRAME_INDEX:
1151   case TargetOpcode::G_BLOCK_ADDR:
1152   case TargetOpcode::G_JUMP_TABLE:
1153     return AllowOpaqueConstants;
1154   default:
1155     return false;
1156   }
1157 }
1158 
1159 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1160                                       const MachineRegisterInfo &MRI) {
1161   Register Def = MI.getOperand(0).getReg();
1162   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1163     return true;
1164   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1165   if (!BV)
1166     return false;
1167   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1168     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1169         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1170       continue;
1171     return false;
1172   }
1173   return true;
1174 }
1175 
1176 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1177                                       const MachineRegisterInfo &MRI,
1178                                       bool AllowFP, bool AllowOpaqueConstants) {
1179   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1180     return true;
1181 
1182   if (!isBuildVectorOp(MI.getOpcode()))
1183     return false;
1184 
1185   const unsigned NumOps = MI.getNumOperands();
1186   for (unsigned I = 1; I != NumOps; ++I) {
1187     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1188     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1189       return false;
1190   }
1191 
1192   return true;
1193 }
1194 
1195 Optional<APInt>
1196 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1197                                       const MachineRegisterInfo &MRI) {
1198   Register Def = MI.getOperand(0).getReg();
1199   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1200     return C->Value;
1201   auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1202   if (!MaybeCst)
1203     return None;
1204   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1205   return APInt(ScalarSize, *MaybeCst, true);
1206 }
1207 
1208 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1209                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1210   switch (MI.getOpcode()) {
1211   case TargetOpcode::G_IMPLICIT_DEF:
1212     return AllowUndefs;
1213   case TargetOpcode::G_CONSTANT:
1214     return MI.getOperand(1).getCImm()->isNullValue();
1215   case TargetOpcode::G_FCONSTANT: {
1216     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1217     return FPImm->isZero() && !FPImm->isNegative();
1218   }
1219   default:
1220     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1221       return false;
1222     return isBuildVectorAllZeros(MI, MRI);
1223   }
1224 }
1225 
1226 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1227                                    const MachineRegisterInfo &MRI,
1228                                    bool AllowUndefs) {
1229   switch (MI.getOpcode()) {
1230   case TargetOpcode::G_IMPLICIT_DEF:
1231     return AllowUndefs;
1232   case TargetOpcode::G_CONSTANT:
1233     return MI.getOperand(1).getCImm()->isAllOnesValue();
1234   default:
1235     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1236       return false;
1237     return isBuildVectorAllOnes(MI, MRI);
1238   }
1239 }
1240 
1241 bool llvm::matchUnaryPredicate(
1242     const MachineRegisterInfo &MRI, Register Reg,
1243     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1244 
1245   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1246   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1247     return Match(nullptr);
1248 
1249   // TODO: Also handle fconstant
1250   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1251     return Match(Def->getOperand(1).getCImm());
1252 
1253   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1254     return false;
1255 
1256   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1257     Register SrcElt = Def->getOperand(I).getReg();
1258     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1259     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1260       if (!Match(nullptr))
1261         return false;
1262       continue;
1263     }
1264 
1265     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1266         !Match(SrcDef->getOperand(1).getCImm()))
1267       return false;
1268   }
1269 
1270   return true;
1271 }
1272 
1273 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1274                           bool IsFP) {
1275   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1276   case TargetLowering::UndefinedBooleanContent:
1277     return Val & 0x1;
1278   case TargetLowering::ZeroOrOneBooleanContent:
1279     return Val == 1;
1280   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1281     return Val == -1;
1282   }
1283   llvm_unreachable("Invalid boolean contents");
1284 }
1285 
1286 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1287                              bool IsFP) {
1288   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1289   case TargetLowering::UndefinedBooleanContent:
1290   case TargetLowering::ZeroOrOneBooleanContent:
1291     return 1;
1292   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1293     return -1;
1294   }
1295   llvm_unreachable("Invalid boolean contents");
1296 }
1297 
1298 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1299                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1300   const auto &F = MBB.getParent()->getFunction();
1301   return F.hasOptSize() || F.hasMinSize() ||
1302          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1303 }
1304 
1305 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1306                             LostDebugLocObserver *LocObserver,
1307                             SmallInstListTy &DeadInstChain) {
1308   for (MachineOperand &Op : MI.uses()) {
1309     if (Op.isReg() && Op.getReg().isVirtual())
1310       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1311   }
1312   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1313   DeadInstChain.remove(&MI);
1314   MI.eraseFromParent();
1315   if (LocObserver)
1316     LocObserver->checkpoint(false);
1317 }
1318 
1319 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1320                        MachineRegisterInfo &MRI,
1321                        LostDebugLocObserver *LocObserver) {
1322   SmallInstListTy DeadInstChain;
1323   for (MachineInstr *MI : DeadInstrs)
1324     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1325 
1326   while (!DeadInstChain.empty()) {
1327     MachineInstr *Inst = DeadInstChain.pop_back_val();
1328     if (!isTriviallyDead(*Inst, MRI))
1329       continue;
1330     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1331   }
1332 }
1333 
1334 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1335                       LostDebugLocObserver *LocObserver) {
1336   return eraseInstrs({&MI}, MRI, LocObserver);
1337 }
1338