xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/CodeGen/CodeGenCommonISel.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/RegisterBankInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Transforms/Utils/SizeOpts.h"
35 #include <numeric>
36 #include <optional>
37 
38 #define DEBUG_TYPE "globalisel-utils"
39 
40 using namespace llvm;
41 using namespace MIPatternMatch;
42 
43 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
44                                    const TargetInstrInfo &TII,
45                                    const RegisterBankInfo &RBI, Register Reg,
46                                    const TargetRegisterClass &RegClass) {
47   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
48     return MRI.createVirtualRegister(&RegClass);
49 
50   return Reg;
51 }
52 
53 Register llvm::constrainOperandRegClass(
54     const MachineFunction &MF, const TargetRegisterInfo &TRI,
55     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
56     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
57     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
58   Register Reg = RegMO.getReg();
59   // Assume physical registers are properly constrained.
60   assert(Reg.isVirtual() && "PhysReg not implemented");
61 
62   // Save the old register class to check whether
63   // the change notifications will be required.
64   // TODO: A better approach would be to pass
65   // the observers to constrainRegToClass().
66   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
67   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
68   // If we created a new virtual register because the class is not compatible
69   // then create a copy between the new and the old register.
70   if (ConstrainedReg != Reg) {
71     MachineBasicBlock::iterator InsertIt(&InsertPt);
72     MachineBasicBlock &MBB = *InsertPt.getParent();
73     // FIXME: The copy needs to have the classes constrained for its operands.
74     // Use operand's regbank to get the class for old register (Reg).
75     if (RegMO.isUse()) {
76       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
77               TII.get(TargetOpcode::COPY), ConstrainedReg)
78           .addReg(Reg);
79     } else {
80       assert(RegMO.isDef() && "Must be a definition");
81       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
82               TII.get(TargetOpcode::COPY), Reg)
83           .addReg(ConstrainedReg);
84     }
85     if (GISelChangeObserver *Observer = MF.getObserver()) {
86       Observer->changingInstr(*RegMO.getParent());
87     }
88     RegMO.setReg(ConstrainedReg);
89     if (GISelChangeObserver *Observer = MF.getObserver()) {
90       Observer->changedInstr(*RegMO.getParent());
91     }
92   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
93     if (GISelChangeObserver *Observer = MF.getObserver()) {
94       if (!RegMO.isDef()) {
95         MachineInstr *RegDef = MRI.getVRegDef(Reg);
96         Observer->changedInstr(*RegDef);
97       }
98       Observer->changingAllUsesOfReg(MRI, Reg);
99       Observer->finishedChangingAllUsesOfReg();
100     }
101   }
102   return ConstrainedReg;
103 }
104 
105 Register llvm::constrainOperandRegClass(
106     const MachineFunction &MF, const TargetRegisterInfo &TRI,
107     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
108     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
109     MachineOperand &RegMO, unsigned OpIdx) {
110   Register Reg = RegMO.getReg();
111   // Assume physical registers are properly constrained.
112   assert(Reg.isVirtual() && "PhysReg not implemented");
113 
114   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
115   // Some of the target independent instructions, like COPY, may not impose any
116   // register class constraints on some of their operands: If it's a use, we can
117   // skip constraining as the instruction defining the register would constrain
118   // it.
119 
120   if (OpRC) {
121     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
122     // can have multiple regbanks for a superclass that combine different
123     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
124     // resolved by targets during regbankselect should not be overridden.
125     if (const auto *SubRC = TRI.getCommonSubClass(
126             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
127       OpRC = SubRC;
128 
129     OpRC = TRI.getAllocatableClass(OpRC);
130   }
131 
132   if (!OpRC) {
133     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
134            "Register class constraint is required unless either the "
135            "instruction is target independent or the operand is a use");
136     // FIXME: Just bailing out like this here could be not enough, unless we
137     // expect the users of this function to do the right thing for PHIs and
138     // COPY:
139     //   v1 = COPY v0
140     //   v2 = COPY v1
141     // v1 here may end up not being constrained at all. Please notice that to
142     // reproduce the issue we likely need a destination pattern of a selection
143     // rule producing such extra copies, not just an input GMIR with them as
144     // every existing target using selectImpl handles copies before calling it
145     // and they never reach this function.
146     return Reg;
147   }
148   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
149                                   RegMO);
150 }
151 
152 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
153                                             const TargetInstrInfo &TII,
154                                             const TargetRegisterInfo &TRI,
155                                             const RegisterBankInfo &RBI) {
156   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
157          "A selected instruction is expected");
158   MachineBasicBlock &MBB = *I.getParent();
159   MachineFunction &MF = *MBB.getParent();
160   MachineRegisterInfo &MRI = MF.getRegInfo();
161 
162   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
163     MachineOperand &MO = I.getOperand(OpI);
164 
165     // There's nothing to be done on non-register operands.
166     if (!MO.isReg())
167       continue;
168 
169     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
170     assert(MO.isReg() && "Unsupported non-reg operand");
171 
172     Register Reg = MO.getReg();
173     // Physical registers don't need to be constrained.
174     if (Reg.isPhysical())
175       continue;
176 
177     // Register operands with a value of 0 (e.g. predicate operands) don't need
178     // to be constrained.
179     if (Reg == 0)
180       continue;
181 
182     // If the operand is a vreg, we should constrain its regclass, and only
183     // insert COPYs if that's impossible.
184     // constrainOperandRegClass does that for us.
185     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
186 
187     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
188     // done.
189     if (MO.isUse()) {
190       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
191       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
192         I.tieOperands(DefIdx, OpI);
193     }
194   }
195   return true;
196 }
197 
198 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
199                          MachineRegisterInfo &MRI) {
200   // Give up if either DstReg or SrcReg  is a physical register.
201   if (DstReg.isPhysical() || SrcReg.isPhysical())
202     return false;
203   // Give up if the types don't match.
204   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
205     return false;
206   // Replace if either DstReg has no constraints or the register
207   // constraints match.
208   return !MRI.getRegClassOrRegBank(DstReg) ||
209          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
210 }
211 
212 bool llvm::isTriviallyDead(const MachineInstr &MI,
213                            const MachineRegisterInfo &MRI) {
214   // FIXME: This logical is mostly duplicated with
215   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
216   // MachineInstr::isLabel?
217 
218   // Don't delete frame allocation labels.
219   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
220     return false;
221   // LIFETIME markers should be preserved even if they seem dead.
222   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
223       MI.getOpcode() == TargetOpcode::LIFETIME_END)
224     return false;
225 
226   // If we can move an instruction, we can remove it.  Otherwise, it has
227   // a side-effect of some sort.
228   bool SawStore = false;
229   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
230     return false;
231 
232   // Instructions without side-effects are dead iff they only define dead vregs.
233   for (const auto &MO : MI.all_defs()) {
234     Register Reg = MO.getReg();
235     if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
236       return false;
237   }
238   return true;
239 }
240 
241 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
242                                   MachineFunction &MF,
243                                   const TargetPassConfig &TPC,
244                                   MachineOptimizationRemarkEmitter &MORE,
245                                   MachineOptimizationRemarkMissed &R) {
246   bool IsFatal = Severity == DS_Error &&
247                  TPC.isGlobalISelAbortEnabled();
248   // Print the function name explicitly if we don't have a debug location (which
249   // makes the diagnostic less useful) or if we're going to emit a raw error.
250   if (!R.getLocation().isValid() || IsFatal)
251     R << (" (in function: " + MF.getName() + ")").str();
252 
253   if (IsFatal)
254     report_fatal_error(Twine(R.getMsg()));
255   else
256     MORE.emit(R);
257 }
258 
259 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
260                               MachineOptimizationRemarkEmitter &MORE,
261                               MachineOptimizationRemarkMissed &R) {
262   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
263 }
264 
265 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
266                               MachineOptimizationRemarkEmitter &MORE,
267                               MachineOptimizationRemarkMissed &R) {
268   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
269   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
270 }
271 
272 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
273                               MachineOptimizationRemarkEmitter &MORE,
274                               const char *PassName, StringRef Msg,
275                               const MachineInstr &MI) {
276   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
277                                     MI.getDebugLoc(), MI.getParent());
278   R << Msg;
279   // Printing MI is expensive;  only do it if expensive remarks are enabled.
280   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
281     R << ": " << ore::MNV("Inst", MI);
282   reportGISelFailure(MF, TPC, MORE, R);
283 }
284 
285 std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
286                                                const MachineRegisterInfo &MRI) {
287   std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
288       VReg, MRI, /*LookThroughInstrs*/ false);
289   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
290          "Value found while looking through instrs");
291   if (!ValAndVReg)
292     return std::nullopt;
293   return ValAndVReg->Value;
294 }
295 
296 std::optional<int64_t>
297 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
298   std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
299   if (Val && Val->getBitWidth() <= 64)
300     return Val->getSExtValue();
301   return std::nullopt;
302 }
303 
304 namespace {
305 
306 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
307 typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
308 
309 std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
310     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
311     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
312     bool LookThroughAnyExt = false) {
313   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
314   MachineInstr *MI;
315 
316   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
317          LookThroughInstrs) {
318     switch (MI->getOpcode()) {
319     case TargetOpcode::G_ANYEXT:
320       if (!LookThroughAnyExt)
321         return std::nullopt;
322       [[fallthrough]];
323     case TargetOpcode::G_TRUNC:
324     case TargetOpcode::G_SEXT:
325     case TargetOpcode::G_ZEXT:
326       SeenOpcodes.push_back(std::make_pair(
327           MI->getOpcode(),
328           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
329       VReg = MI->getOperand(1).getReg();
330       break;
331     case TargetOpcode::COPY:
332       VReg = MI->getOperand(1).getReg();
333       if (VReg.isPhysical())
334         return std::nullopt;
335       break;
336     case TargetOpcode::G_INTTOPTR:
337       VReg = MI->getOperand(1).getReg();
338       break;
339     default:
340       return std::nullopt;
341     }
342   }
343   if (!MI || !IsConstantOpcode(MI))
344     return std::nullopt;
345 
346   std::optional<APInt> MaybeVal = getAPCstValue(MI);
347   if (!MaybeVal)
348     return std::nullopt;
349   APInt &Val = *MaybeVal;
350   while (!SeenOpcodes.empty()) {
351     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
352     switch (OpcodeAndSize.first) {
353     case TargetOpcode::G_TRUNC:
354       Val = Val.trunc(OpcodeAndSize.second);
355       break;
356     case TargetOpcode::G_ANYEXT:
357     case TargetOpcode::G_SEXT:
358       Val = Val.sext(OpcodeAndSize.second);
359       break;
360     case TargetOpcode::G_ZEXT:
361       Val = Val.zext(OpcodeAndSize.second);
362       break;
363     }
364   }
365 
366   return ValueAndVReg{Val, VReg};
367 }
368 
369 bool isIConstant(const MachineInstr *MI) {
370   if (!MI)
371     return false;
372   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
373 }
374 
375 bool isFConstant(const MachineInstr *MI) {
376   if (!MI)
377     return false;
378   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
379 }
380 
381 bool isAnyConstant(const MachineInstr *MI) {
382   if (!MI)
383     return false;
384   unsigned Opc = MI->getOpcode();
385   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
386 }
387 
388 std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
389   const MachineOperand &CstVal = MI->getOperand(1);
390   if (CstVal.isCImm())
391     return CstVal.getCImm()->getValue();
392   return std::nullopt;
393 }
394 
395 std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
396   const MachineOperand &CstVal = MI->getOperand(1);
397   if (CstVal.isCImm())
398     return CstVal.getCImm()->getValue();
399   if (CstVal.isFPImm())
400     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
401   return std::nullopt;
402 }
403 
404 } // end anonymous namespace
405 
406 std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
407     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
408   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
409                                            getCImmAsAPInt, LookThroughInstrs);
410 }
411 
412 std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
413     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
414     bool LookThroughAnyExt) {
415   return getConstantVRegValWithLookThrough(
416       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
417       LookThroughAnyExt);
418 }
419 
420 std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
421     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
422   auto Reg = getConstantVRegValWithLookThrough(
423       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
424   if (!Reg)
425     return std::nullopt;
426   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
427                         Reg->VReg};
428 }
429 
430 const ConstantFP *
431 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
432   MachineInstr *MI = MRI.getVRegDef(VReg);
433   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
434     return nullptr;
435   return MI->getOperand(1).getFPImm();
436 }
437 
438 std::optional<DefinitionAndSourceRegister>
439 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
440   Register DefSrcReg = Reg;
441   auto *DefMI = MRI.getVRegDef(Reg);
442   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
443   if (!DstTy.isValid())
444     return std::nullopt;
445   unsigned Opc = DefMI->getOpcode();
446   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
447     Register SrcReg = DefMI->getOperand(1).getReg();
448     auto SrcTy = MRI.getType(SrcReg);
449     if (!SrcTy.isValid())
450       break;
451     DefMI = MRI.getVRegDef(SrcReg);
452     DefSrcReg = SrcReg;
453     Opc = DefMI->getOpcode();
454   }
455   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
456 }
457 
458 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
459                                          const MachineRegisterInfo &MRI) {
460   std::optional<DefinitionAndSourceRegister> DefSrcReg =
461       getDefSrcRegIgnoringCopies(Reg, MRI);
462   return DefSrcReg ? DefSrcReg->MI : nullptr;
463 }
464 
465 Register llvm::getSrcRegIgnoringCopies(Register Reg,
466                                        const MachineRegisterInfo &MRI) {
467   std::optional<DefinitionAndSourceRegister> DefSrcReg =
468       getDefSrcRegIgnoringCopies(Reg, MRI);
469   return DefSrcReg ? DefSrcReg->Reg : Register();
470 }
471 
472 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
473                                  const MachineRegisterInfo &MRI) {
474   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
475   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
476 }
477 
478 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
479   if (Size == 32)
480     return APFloat(float(Val));
481   if (Size == 64)
482     return APFloat(Val);
483   if (Size != 16)
484     llvm_unreachable("Unsupported FPConstant size");
485   bool Ignored;
486   APFloat APF(Val);
487   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
488   return APF;
489 }
490 
491 std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
492                                              const Register Op1,
493                                              const Register Op2,
494                                              const MachineRegisterInfo &MRI) {
495   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
496   if (!MaybeOp2Cst)
497     return std::nullopt;
498 
499   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
500   if (!MaybeOp1Cst)
501     return std::nullopt;
502 
503   const APInt &C1 = MaybeOp1Cst->Value;
504   const APInt &C2 = MaybeOp2Cst->Value;
505   switch (Opcode) {
506   default:
507     break;
508   case TargetOpcode::G_ADD:
509   case TargetOpcode::G_PTR_ADD:
510     return C1 + C2;
511   case TargetOpcode::G_AND:
512     return C1 & C2;
513   case TargetOpcode::G_ASHR:
514     return C1.ashr(C2);
515   case TargetOpcode::G_LSHR:
516     return C1.lshr(C2);
517   case TargetOpcode::G_MUL:
518     return C1 * C2;
519   case TargetOpcode::G_OR:
520     return C1 | C2;
521   case TargetOpcode::G_SHL:
522     return C1 << C2;
523   case TargetOpcode::G_SUB:
524     return C1 - C2;
525   case TargetOpcode::G_XOR:
526     return C1 ^ C2;
527   case TargetOpcode::G_UDIV:
528     if (!C2.getBoolValue())
529       break;
530     return C1.udiv(C2);
531   case TargetOpcode::G_SDIV:
532     if (!C2.getBoolValue())
533       break;
534     return C1.sdiv(C2);
535   case TargetOpcode::G_UREM:
536     if (!C2.getBoolValue())
537       break;
538     return C1.urem(C2);
539   case TargetOpcode::G_SREM:
540     if (!C2.getBoolValue())
541       break;
542     return C1.srem(C2);
543   case TargetOpcode::G_SMIN:
544     return APIntOps::smin(C1, C2);
545   case TargetOpcode::G_SMAX:
546     return APIntOps::smax(C1, C2);
547   case TargetOpcode::G_UMIN:
548     return APIntOps::umin(C1, C2);
549   case TargetOpcode::G_UMAX:
550     return APIntOps::umax(C1, C2);
551   }
552 
553   return std::nullopt;
554 }
555 
556 std::optional<APFloat>
557 llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
558                           const Register Op2, const MachineRegisterInfo &MRI) {
559   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
560   if (!Op2Cst)
561     return std::nullopt;
562 
563   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
564   if (!Op1Cst)
565     return std::nullopt;
566 
567   APFloat C1 = Op1Cst->getValueAPF();
568   const APFloat &C2 = Op2Cst->getValueAPF();
569   switch (Opcode) {
570   case TargetOpcode::G_FADD:
571     C1.add(C2, APFloat::rmNearestTiesToEven);
572     return C1;
573   case TargetOpcode::G_FSUB:
574     C1.subtract(C2, APFloat::rmNearestTiesToEven);
575     return C1;
576   case TargetOpcode::G_FMUL:
577     C1.multiply(C2, APFloat::rmNearestTiesToEven);
578     return C1;
579   case TargetOpcode::G_FDIV:
580     C1.divide(C2, APFloat::rmNearestTiesToEven);
581     return C1;
582   case TargetOpcode::G_FREM:
583     C1.mod(C2);
584     return C1;
585   case TargetOpcode::G_FCOPYSIGN:
586     C1.copySign(C2);
587     return C1;
588   case TargetOpcode::G_FMINNUM:
589     return minnum(C1, C2);
590   case TargetOpcode::G_FMAXNUM:
591     return maxnum(C1, C2);
592   case TargetOpcode::G_FMINIMUM:
593     return minimum(C1, C2);
594   case TargetOpcode::G_FMAXIMUM:
595     return maximum(C1, C2);
596   case TargetOpcode::G_FMINNUM_IEEE:
597   case TargetOpcode::G_FMAXNUM_IEEE:
598     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
599     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
600     // and currently there isn't a nice wrapper in APFloat for the version with
601     // correct snan handling.
602     break;
603   default:
604     break;
605   }
606 
607   return std::nullopt;
608 }
609 
610 SmallVector<APInt>
611 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
612                               const Register Op2,
613                               const MachineRegisterInfo &MRI) {
614   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
615   if (!SrcVec2)
616     return SmallVector<APInt>();
617 
618   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
619   if (!SrcVec1)
620     return SmallVector<APInt>();
621 
622   SmallVector<APInt> FoldedElements;
623   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
624     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
625                                       SrcVec2->getSourceReg(Idx), MRI);
626     if (!MaybeCst)
627       return SmallVector<APInt>();
628     FoldedElements.push_back(*MaybeCst);
629   }
630   return FoldedElements;
631 }
632 
633 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
634                            bool SNaN) {
635   const MachineInstr *DefMI = MRI.getVRegDef(Val);
636   if (!DefMI)
637     return false;
638 
639   const TargetMachine& TM = DefMI->getMF()->getTarget();
640   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
641     return true;
642 
643   // If the value is a constant, we can obviously see if it is a NaN or not.
644   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
645     return !FPVal->getValueAPF().isNaN() ||
646            (SNaN && !FPVal->getValueAPF().isSignaling());
647   }
648 
649   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
650     for (const auto &Op : DefMI->uses())
651       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
652         return false;
653     return true;
654   }
655 
656   switch (DefMI->getOpcode()) {
657   default:
658     break;
659   case TargetOpcode::G_FADD:
660   case TargetOpcode::G_FSUB:
661   case TargetOpcode::G_FMUL:
662   case TargetOpcode::G_FDIV:
663   case TargetOpcode::G_FREM:
664   case TargetOpcode::G_FSIN:
665   case TargetOpcode::G_FCOS:
666   case TargetOpcode::G_FMA:
667   case TargetOpcode::G_FMAD:
668     if (SNaN)
669       return true;
670 
671     // TODO: Need isKnownNeverInfinity
672     return false;
673   case TargetOpcode::G_FMINNUM_IEEE:
674   case TargetOpcode::G_FMAXNUM_IEEE: {
675     if (SNaN)
676       return true;
677     // This can return a NaN if either operand is an sNaN, or if both operands
678     // are NaN.
679     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
680             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
681            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
682             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
683   }
684   case TargetOpcode::G_FMINNUM:
685   case TargetOpcode::G_FMAXNUM: {
686     // Only one needs to be known not-nan, since it will be returned if the
687     // other ends up being one.
688     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
689            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
690   }
691   }
692 
693   if (SNaN) {
694     // FP operations quiet. For now, just handle the ones inserted during
695     // legalization.
696     switch (DefMI->getOpcode()) {
697     case TargetOpcode::G_FPEXT:
698     case TargetOpcode::G_FPTRUNC:
699     case TargetOpcode::G_FCANONICALIZE:
700       return true;
701     default:
702       return false;
703     }
704   }
705 
706   return false;
707 }
708 
709 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
710                                   const MachinePointerInfo &MPO) {
711   auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
712   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
713     MachineFrameInfo &MFI = MF.getFrameInfo();
714     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
715                            MPO.Offset);
716   }
717 
718   if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
719     const Module *M = MF.getFunction().getParent();
720     return V->getPointerAlignment(M->getDataLayout());
721   }
722 
723   return Align(1);
724 }
725 
726 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
727                                         const TargetInstrInfo &TII,
728                                         MCRegister PhysReg,
729                                         const TargetRegisterClass &RC,
730                                         const DebugLoc &DL, LLT RegTy) {
731   MachineBasicBlock &EntryMBB = MF.front();
732   MachineRegisterInfo &MRI = MF.getRegInfo();
733   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
734   if (LiveIn) {
735     MachineInstr *Def = MRI.getVRegDef(LiveIn);
736     if (Def) {
737       // FIXME: Should the verifier check this is in the entry block?
738       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
739       return LiveIn;
740     }
741 
742     // It's possible the incoming argument register and copy was added during
743     // lowering, but later deleted due to being/becoming dead. If this happens,
744     // re-insert the copy.
745   } else {
746     // The live in register was not present, so add it.
747     LiveIn = MF.addLiveIn(PhysReg, &RC);
748     if (RegTy.isValid())
749       MRI.setType(LiveIn, RegTy);
750   }
751 
752   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
753     .addReg(PhysReg);
754   if (!EntryMBB.isLiveIn(PhysReg))
755     EntryMBB.addLiveIn(PhysReg);
756   return LiveIn;
757 }
758 
759 std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
760                                              const Register Op1, uint64_t Imm,
761                                              const MachineRegisterInfo &MRI) {
762   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
763   if (MaybeOp1Cst) {
764     switch (Opcode) {
765     default:
766       break;
767     case TargetOpcode::G_SEXT_INREG: {
768       LLT Ty = MRI.getType(Op1);
769       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
770     }
771     }
772   }
773   return std::nullopt;
774 }
775 
776 std::optional<APFloat>
777 llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
778                              const MachineRegisterInfo &MRI) {
779   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
780   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
781     APFloat DstVal(getFltSemanticForLLT(DstTy));
782     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
783                             APFloat::rmNearestTiesToEven);
784     return DstVal;
785   }
786   return std::nullopt;
787 }
788 
789 std::optional<SmallVector<unsigned>>
790 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
791   LLT Ty = MRI.getType(Src);
792   SmallVector<unsigned> FoldedCTLZs;
793   auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
794     auto MaybeCst = getIConstantVRegVal(R, MRI);
795     if (!MaybeCst)
796       return std::nullopt;
797     return MaybeCst->countl_zero();
798   };
799   if (Ty.isVector()) {
800     // Try to constant fold each element.
801     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
802     if (!BV)
803       return std::nullopt;
804     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
805       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
806         FoldedCTLZs.emplace_back(*MaybeFold);
807         continue;
808       }
809       return std::nullopt;
810     }
811     return FoldedCTLZs;
812   }
813   if (auto MaybeCst = tryFoldScalar(Src)) {
814     FoldedCTLZs.emplace_back(*MaybeCst);
815     return FoldedCTLZs;
816   }
817   return std::nullopt;
818 }
819 
820 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
821                                   GISelKnownBits *KB) {
822   std::optional<DefinitionAndSourceRegister> DefSrcReg =
823       getDefSrcRegIgnoringCopies(Reg, MRI);
824   if (!DefSrcReg)
825     return false;
826 
827   const MachineInstr &MI = *DefSrcReg->MI;
828   const LLT Ty = MRI.getType(Reg);
829 
830   switch (MI.getOpcode()) {
831   case TargetOpcode::G_CONSTANT: {
832     unsigned BitWidth = Ty.getScalarSizeInBits();
833     const ConstantInt *CI = MI.getOperand(1).getCImm();
834     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
835   }
836   case TargetOpcode::G_SHL: {
837     // A left-shift of a constant one will have exactly one bit set because
838     // shifting the bit off the end is undefined.
839 
840     // TODO: Constant splat
841     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
842       if (*ConstLHS == 1)
843         return true;
844     }
845 
846     break;
847   }
848   case TargetOpcode::G_LSHR: {
849     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
850       if (ConstLHS->isSignMask())
851         return true;
852     }
853 
854     break;
855   }
856   case TargetOpcode::G_BUILD_VECTOR: {
857     // TODO: Probably should have a recursion depth guard since you could have
858     // bitcasted vector elements.
859     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
860       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
861         return false;
862 
863     return true;
864   }
865   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
866     // Only handle constants since we would need to know if number of leading
867     // zeros is greater than the truncation amount.
868     const unsigned BitWidth = Ty.getScalarSizeInBits();
869     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
870       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
871       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
872         return false;
873     }
874 
875     return true;
876   }
877   default:
878     break;
879   }
880 
881   if (!KB)
882     return false;
883 
884   // More could be done here, though the above checks are enough
885   // to handle some common cases.
886 
887   // Fall back to computeKnownBits to catch other known cases.
888   KnownBits Known = KB->getKnownBits(Reg);
889   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
890 }
891 
892 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
893   AU.addPreserved<StackProtector>();
894 }
895 
896 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
897   const unsigned OrigSize = OrigTy.getSizeInBits();
898   const unsigned TargetSize = TargetTy.getSizeInBits();
899 
900   if (OrigSize == TargetSize)
901     return OrigTy;
902 
903   if (OrigTy.isVector()) {
904     const LLT OrigElt = OrigTy.getElementType();
905 
906     if (TargetTy.isVector()) {
907       const LLT TargetElt = TargetTy.getElementType();
908 
909       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
910         int GCDElts =
911             std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements());
912         // Prefer the original element type.
913         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
914         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
915                            OrigTy.getElementType());
916       }
917     } else {
918       if (OrigElt.getSizeInBits() == TargetSize)
919         return OrigTy;
920     }
921 
922     unsigned LCMSize = std::lcm(OrigSize, TargetSize);
923     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
924   }
925 
926   if (TargetTy.isVector()) {
927     unsigned LCMSize = std::lcm(OrigSize, TargetSize);
928     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
929   }
930 
931   unsigned LCMSize = std::lcm(OrigSize, TargetSize);
932 
933   // Preserve pointer types.
934   if (LCMSize == OrigSize)
935     return OrigTy;
936   if (LCMSize == TargetSize)
937     return TargetTy;
938 
939   return LLT::scalar(LCMSize);
940 }
941 
942 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
943   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
944       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
945     return getLCMType(OrigTy, TargetTy);
946 
947   unsigned OrigTyNumElts = OrigTy.getNumElements();
948   unsigned TargetTyNumElts = TargetTy.getNumElements();
949   if (OrigTyNumElts % TargetTyNumElts == 0)
950     return OrigTy;
951 
952   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
953   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
954                              OrigTy.getElementType());
955 }
956 
957 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
958   const unsigned OrigSize = OrigTy.getSizeInBits();
959   const unsigned TargetSize = TargetTy.getSizeInBits();
960 
961   if (OrigSize == TargetSize)
962     return OrigTy;
963 
964   if (OrigTy.isVector()) {
965     LLT OrigElt = OrigTy.getElementType();
966     if (TargetTy.isVector()) {
967       LLT TargetElt = TargetTy.getElementType();
968       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
969         int GCD = std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements());
970         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
971       }
972     } else {
973       // If the source is a vector of pointers, return a pointer element.
974       if (OrigElt.getSizeInBits() == TargetSize)
975         return OrigElt;
976     }
977 
978     unsigned GCD = std::gcd(OrigSize, TargetSize);
979     if (GCD == OrigElt.getSizeInBits())
980       return OrigElt;
981 
982     // If we can't produce the original element type, we have to use a smaller
983     // scalar.
984     if (GCD < OrigElt.getSizeInBits())
985       return LLT::scalar(GCD);
986     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
987   }
988 
989   if (TargetTy.isVector()) {
990     // Try to preserve the original element type.
991     LLT TargetElt = TargetTy.getElementType();
992     if (TargetElt.getSizeInBits() == OrigSize)
993       return OrigTy;
994   }
995 
996   unsigned GCD = std::gcd(OrigSize, TargetSize);
997   return LLT::scalar(GCD);
998 }
999 
1000 std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
1001   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1002          "Only G_SHUFFLE_VECTOR can have a splat index!");
1003   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1004   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1005 
1006   // If all elements are undefined, this shuffle can be considered a splat.
1007   // Return 0 for better potential for callers to simplify.
1008   if (FirstDefinedIdx == Mask.end())
1009     return 0;
1010 
1011   // Make sure all remaining elements are either undef or the same
1012   // as the first non-undef value.
1013   int SplatValue = *FirstDefinedIdx;
1014   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1015              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1016     return std::nullopt;
1017 
1018   return SplatValue;
1019 }
1020 
1021 static bool isBuildVectorOp(unsigned Opcode) {
1022   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1023          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1024 }
1025 
1026 namespace {
1027 
1028 std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1029                                                 const MachineRegisterInfo &MRI,
1030                                                 bool AllowUndef) {
1031   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1032   if (!MI)
1033     return std::nullopt;
1034 
1035   bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1036   if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1037     return std::nullopt;
1038 
1039   std::optional<ValueAndVReg> SplatValAndReg;
1040   for (MachineOperand &Op : MI->uses()) {
1041     Register Element = Op.getReg();
1042     // If we have a G_CONCAT_VECTOR, we recursively look into the
1043     // vectors that we're concatenating to see if they're splats.
1044     auto ElementValAndReg =
1045         isConcatVectorsOp
1046             ? getAnyConstantSplat(Element, MRI, AllowUndef)
1047             : getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1048 
1049     // If AllowUndef, treat undef as value that will result in a constant splat.
1050     if (!ElementValAndReg) {
1051       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1052         continue;
1053       return std::nullopt;
1054     }
1055 
1056     // Record splat value
1057     if (!SplatValAndReg)
1058       SplatValAndReg = ElementValAndReg;
1059 
1060     // Different constant than the one already recorded, not a constant splat.
1061     if (SplatValAndReg->Value != ElementValAndReg->Value)
1062       return std::nullopt;
1063   }
1064 
1065   return SplatValAndReg;
1066 }
1067 
1068 } // end anonymous namespace
1069 
1070 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1071                                       const MachineRegisterInfo &MRI,
1072                                       int64_t SplatValue, bool AllowUndef) {
1073   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1074     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1075   return false;
1076 }
1077 
1078 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1079                                       const MachineRegisterInfo &MRI,
1080                                       int64_t SplatValue, bool AllowUndef) {
1081   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1082                                     AllowUndef);
1083 }
1084 
1085 std::optional<APInt>
1086 llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
1087   if (auto SplatValAndReg =
1088           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1089     std::optional<ValueAndVReg> ValAndVReg =
1090         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1091     return ValAndVReg->Value;
1092   }
1093 
1094   return std::nullopt;
1095 }
1096 
1097 std::optional<APInt>
1098 llvm::getIConstantSplatVal(const MachineInstr &MI,
1099                            const MachineRegisterInfo &MRI) {
1100   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1101 }
1102 
1103 std::optional<int64_t>
1104 llvm::getIConstantSplatSExtVal(const Register Reg,
1105                                const MachineRegisterInfo &MRI) {
1106   if (auto SplatValAndReg =
1107           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1108     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1109   return std::nullopt;
1110 }
1111 
1112 std::optional<int64_t>
1113 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
1114                                const MachineRegisterInfo &MRI) {
1115   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1116 }
1117 
1118 std::optional<FPValueAndVReg>
1119 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
1120                         bool AllowUndef) {
1121   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1122     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1123   return std::nullopt;
1124 }
1125 
1126 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1127                                  const MachineRegisterInfo &MRI,
1128                                  bool AllowUndef) {
1129   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1130 }
1131 
1132 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1133                                 const MachineRegisterInfo &MRI,
1134                                 bool AllowUndef) {
1135   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1136 }
1137 
1138 std::optional<RegOrConstant>
1139 llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
1140   unsigned Opc = MI.getOpcode();
1141   if (!isBuildVectorOp(Opc))
1142     return std::nullopt;
1143   if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1144     return RegOrConstant(*Splat);
1145   auto Reg = MI.getOperand(1).getReg();
1146   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1147              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1148     return std::nullopt;
1149   return RegOrConstant(Reg);
1150 }
1151 
1152 static bool isConstantScalar(const MachineInstr &MI,
1153                              const MachineRegisterInfo &MRI,
1154                              bool AllowFP = true,
1155                              bool AllowOpaqueConstants = true) {
1156   switch (MI.getOpcode()) {
1157   case TargetOpcode::G_CONSTANT:
1158   case TargetOpcode::G_IMPLICIT_DEF:
1159     return true;
1160   case TargetOpcode::G_FCONSTANT:
1161     return AllowFP;
1162   case TargetOpcode::G_GLOBAL_VALUE:
1163   case TargetOpcode::G_FRAME_INDEX:
1164   case TargetOpcode::G_BLOCK_ADDR:
1165   case TargetOpcode::G_JUMP_TABLE:
1166     return AllowOpaqueConstants;
1167   default:
1168     return false;
1169   }
1170 }
1171 
1172 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1173                                       const MachineRegisterInfo &MRI) {
1174   Register Def = MI.getOperand(0).getReg();
1175   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1176     return true;
1177   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1178   if (!BV)
1179     return false;
1180   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1181     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1182         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1183       continue;
1184     return false;
1185   }
1186   return true;
1187 }
1188 
1189 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1190                                       const MachineRegisterInfo &MRI,
1191                                       bool AllowFP, bool AllowOpaqueConstants) {
1192   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1193     return true;
1194 
1195   if (!isBuildVectorOp(MI.getOpcode()))
1196     return false;
1197 
1198   const unsigned NumOps = MI.getNumOperands();
1199   for (unsigned I = 1; I != NumOps; ++I) {
1200     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1201     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1202       return false;
1203   }
1204 
1205   return true;
1206 }
1207 
1208 std::optional<APInt>
1209 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1210                                       const MachineRegisterInfo &MRI) {
1211   Register Def = MI.getOperand(0).getReg();
1212   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1213     return C->Value;
1214   auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1215   if (!MaybeCst)
1216     return std::nullopt;
1217   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1218   return APInt(ScalarSize, *MaybeCst, true);
1219 }
1220 
1221 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1222                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1223   switch (MI.getOpcode()) {
1224   case TargetOpcode::G_IMPLICIT_DEF:
1225     return AllowUndefs;
1226   case TargetOpcode::G_CONSTANT:
1227     return MI.getOperand(1).getCImm()->isNullValue();
1228   case TargetOpcode::G_FCONSTANT: {
1229     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1230     return FPImm->isZero() && !FPImm->isNegative();
1231   }
1232   default:
1233     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1234       return false;
1235     return isBuildVectorAllZeros(MI, MRI);
1236   }
1237 }
1238 
1239 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1240                                    const MachineRegisterInfo &MRI,
1241                                    bool AllowUndefs) {
1242   switch (MI.getOpcode()) {
1243   case TargetOpcode::G_IMPLICIT_DEF:
1244     return AllowUndefs;
1245   case TargetOpcode::G_CONSTANT:
1246     return MI.getOperand(1).getCImm()->isAllOnesValue();
1247   default:
1248     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1249       return false;
1250     return isBuildVectorAllOnes(MI, MRI);
1251   }
1252 }
1253 
1254 bool llvm::matchUnaryPredicate(
1255     const MachineRegisterInfo &MRI, Register Reg,
1256     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1257 
1258   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1259   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1260     return Match(nullptr);
1261 
1262   // TODO: Also handle fconstant
1263   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1264     return Match(Def->getOperand(1).getCImm());
1265 
1266   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1267     return false;
1268 
1269   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1270     Register SrcElt = Def->getOperand(I).getReg();
1271     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1272     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1273       if (!Match(nullptr))
1274         return false;
1275       continue;
1276     }
1277 
1278     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1279         !Match(SrcDef->getOperand(1).getCImm()))
1280       return false;
1281   }
1282 
1283   return true;
1284 }
1285 
1286 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1287                           bool IsFP) {
1288   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1289   case TargetLowering::UndefinedBooleanContent:
1290     return Val & 0x1;
1291   case TargetLowering::ZeroOrOneBooleanContent:
1292     return Val == 1;
1293   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1294     return Val == -1;
1295   }
1296   llvm_unreachable("Invalid boolean contents");
1297 }
1298 
1299 bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1300                            bool IsVector, bool IsFP) {
1301   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1302   case TargetLowering::UndefinedBooleanContent:
1303     return ~Val & 0x1;
1304   case TargetLowering::ZeroOrOneBooleanContent:
1305   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1306     return Val == 0;
1307   }
1308   llvm_unreachable("Invalid boolean contents");
1309 }
1310 
1311 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1312                              bool IsFP) {
1313   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1314   case TargetLowering::UndefinedBooleanContent:
1315   case TargetLowering::ZeroOrOneBooleanContent:
1316     return 1;
1317   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1318     return -1;
1319   }
1320   llvm_unreachable("Invalid boolean contents");
1321 }
1322 
1323 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1324                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1325   const auto &F = MBB.getParent()->getFunction();
1326   return F.hasOptSize() || F.hasMinSize() ||
1327          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1328 }
1329 
1330 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1331                             LostDebugLocObserver *LocObserver,
1332                             SmallInstListTy &DeadInstChain) {
1333   for (MachineOperand &Op : MI.uses()) {
1334     if (Op.isReg() && Op.getReg().isVirtual())
1335       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1336   }
1337   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1338   DeadInstChain.remove(&MI);
1339   MI.eraseFromParent();
1340   if (LocObserver)
1341     LocObserver->checkpoint(false);
1342 }
1343 
1344 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1345                        MachineRegisterInfo &MRI,
1346                        LostDebugLocObserver *LocObserver) {
1347   SmallInstListTy DeadInstChain;
1348   for (MachineInstr *MI : DeadInstrs)
1349     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1350 
1351   while (!DeadInstChain.empty()) {
1352     MachineInstr *Inst = DeadInstChain.pop_back_val();
1353     if (!isTriviallyDead(*Inst, MRI))
1354       continue;
1355     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1356   }
1357 }
1358 
1359 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1360                       LostDebugLocObserver *LocObserver) {
1361   return eraseInstrs({&MI}, MRI, LocObserver);
1362 }
1363 
1364 void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
1365   for (auto &Def : MI.defs()) {
1366     assert(Def.isReg() && "Must be a reg");
1367 
1368     SmallVector<MachineOperand *, 16> DbgUsers;
1369     for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1370       MachineInstr *DbgValue = MOUse.getParent();
1371       // Ignore partially formed DBG_VALUEs.
1372       if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1373         DbgUsers.push_back(&MOUse);
1374       }
1375     }
1376 
1377     if (!DbgUsers.empty()) {
1378       salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1379     }
1380   }
1381 }
1382