xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "SIFoldOperands.h"
12 #include "AMDGPU.h"
13 #include "GCNSubtarget.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "SIInstrInfo.h"
16 #include "SIMachineFunctionInfo.h"
17 #include "SIRegisterInfo.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineOperand.h"
22 
23 #define DEBUG_TYPE "si-fold-operands"
24 using namespace llvm;
25 
26 namespace {
27 
28 /// Track a value we may want to fold into downstream users, applying
29 /// subregister extracts along the way.
30 struct FoldableDef {
31   union {
32     MachineOperand *OpToFold = nullptr;
33     uint64_t ImmToFold;
34     int FrameIndexToFold;
35   };
36 
37   /// Register class of the originally defined value.
38   const TargetRegisterClass *DefRC = nullptr;
39 
40   /// Track the original defining instruction for the value.
41   const MachineInstr *DefMI = nullptr;
42 
43   /// Subregister to apply to the value at the use point.
44   unsigned DefSubReg = AMDGPU::NoSubRegister;
45 
46   /// Kind of value stored in the union.
47   MachineOperand::MachineOperandType Kind;
48 
49   FoldableDef() = delete;
50   FoldableDef(MachineOperand &FoldOp, const TargetRegisterClass *DefRC,
51               unsigned DefSubReg = AMDGPU::NoSubRegister)
52       : DefRC(DefRC), DefSubReg(DefSubReg), Kind(FoldOp.getType()) {
53 
54     if (FoldOp.isImm()) {
55       ImmToFold = FoldOp.getImm();
56     } else if (FoldOp.isFI()) {
57       FrameIndexToFold = FoldOp.getIndex();
58     } else {
59       assert(FoldOp.isReg() || FoldOp.isGlobal());
60       OpToFold = &FoldOp;
61     }
62 
63     DefMI = FoldOp.getParent();
64   }
65 
66   FoldableDef(int64_t FoldImm, const TargetRegisterClass *DefRC,
67               unsigned DefSubReg = AMDGPU::NoSubRegister)
68       : ImmToFold(FoldImm), DefRC(DefRC), DefSubReg(DefSubReg),
69         Kind(MachineOperand::MO_Immediate) {}
70 
71   /// Copy the current def and apply \p SubReg to the value.
72   FoldableDef getWithSubReg(const SIRegisterInfo &TRI, unsigned SubReg) const {
73     FoldableDef Copy(*this);
74     Copy.DefSubReg = TRI.composeSubRegIndices(DefSubReg, SubReg);
75     return Copy;
76   }
77 
78   bool isReg() const { return Kind == MachineOperand::MO_Register; }
79 
80   Register getReg() const {
81     assert(isReg());
82     return OpToFold->getReg();
83   }
84 
85   unsigned getSubReg() const {
86     assert(isReg());
87     return OpToFold->getSubReg();
88   }
89 
90   bool isImm() const { return Kind == MachineOperand::MO_Immediate; }
91 
92   bool isFI() const {
93     return Kind == MachineOperand::MO_FrameIndex;
94   }
95 
96   int getFI() const {
97     assert(isFI());
98     return FrameIndexToFold;
99   }
100 
101   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
102 
103   /// Return the effective immediate value defined by this instruction, after
104   /// application of any subregister extracts which may exist between the use
105   /// and def instruction.
106   std::optional<int64_t> getEffectiveImmVal() const {
107     assert(isImm());
108     return SIInstrInfo::extractSubregFromImm(ImmToFold, DefSubReg);
109   }
110 
111   /// Check if it is legal to fold this effective value into \p MI's \p OpNo
112   /// operand.
113   bool isOperandLegal(const SIInstrInfo &TII, const MachineInstr &MI,
114                       unsigned OpIdx) const {
115     switch (Kind) {
116     case MachineOperand::MO_Immediate: {
117       std::optional<int64_t> ImmToFold = getEffectiveImmVal();
118       if (!ImmToFold)
119         return false;
120 
121       // TODO: Should verify the subregister index is supported by the class
122       // TODO: Avoid the temporary MachineOperand
123       MachineOperand TmpOp = MachineOperand::CreateImm(*ImmToFold);
124       return TII.isOperandLegal(MI, OpIdx, &TmpOp);
125     }
126     case MachineOperand::MO_FrameIndex: {
127       if (DefSubReg != AMDGPU::NoSubRegister)
128         return false;
129       MachineOperand TmpOp = MachineOperand::CreateFI(FrameIndexToFold);
130       return TII.isOperandLegal(MI, OpIdx, &TmpOp);
131     }
132     default:
133       // TODO: Try to apply DefSubReg, for global address we can extract
134       // low/high.
135       if (DefSubReg != AMDGPU::NoSubRegister)
136         return false;
137       return TII.isOperandLegal(MI, OpIdx, OpToFold);
138     }
139 
140     llvm_unreachable("covered MachineOperand kind switch");
141   }
142 };
143 
144 struct FoldCandidate {
145   MachineInstr *UseMI;
146   FoldableDef Def;
147   int ShrinkOpcode;
148   unsigned UseOpNo;
149   bool Commuted;
150 
151   FoldCandidate(MachineInstr *MI, unsigned OpNo, FoldableDef Def,
152                 bool Commuted = false, int ShrinkOp = -1)
153       : UseMI(MI), Def(Def), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
154         Commuted(Commuted) {}
155 
156   bool isFI() const { return Def.isFI(); }
157 
158   int getFI() const {
159     assert(isFI());
160     return Def.FrameIndexToFold;
161   }
162 
163   bool isImm() const { return Def.isImm(); }
164 
165   bool isReg() const { return Def.isReg(); }
166 
167   Register getReg() const { return Def.getReg(); }
168 
169   bool isGlobal() const { return Def.isGlobal(); }
170 
171   bool needsShrink() const { return ShrinkOpcode != -1; }
172 };
173 
174 class SIFoldOperandsImpl {
175 public:
176   MachineRegisterInfo *MRI;
177   const SIInstrInfo *TII;
178   const SIRegisterInfo *TRI;
179   const GCNSubtarget *ST;
180   const SIMachineFunctionInfo *MFI;
181 
182   bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
183                          const FoldableDef &OpToFold) const;
184 
185   // TODO: Just use TII::getVALUOp
186   unsigned convertToVALUOp(unsigned Opc, bool UseVOP3 = false) const {
187     switch (Opc) {
188     case AMDGPU::S_ADD_I32: {
189       if (ST->hasAddNoCarry())
190         return UseVOP3 ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_U32_e32;
191       return UseVOP3 ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
192     }
193     case AMDGPU::S_OR_B32:
194       return UseVOP3 ? AMDGPU::V_OR_B32_e64 : AMDGPU::V_OR_B32_e32;
195     case AMDGPU::S_AND_B32:
196       return UseVOP3 ? AMDGPU::V_AND_B32_e64 : AMDGPU::V_AND_B32_e32;
197     case AMDGPU::S_MUL_I32:
198       return AMDGPU::V_MUL_LO_U32_e64;
199     default:
200       return AMDGPU::INSTRUCTION_LIST_END;
201     }
202   }
203 
204   bool foldCopyToVGPROfScalarAddOfFrameIndex(Register DstReg, Register SrcReg,
205                                              MachineInstr &MI) const;
206 
207   bool updateOperand(FoldCandidate &Fold) const;
208 
209   bool canUseImmWithOpSel(const MachineInstr *MI, unsigned UseOpNo,
210                           int64_t ImmVal) const;
211 
212   /// Try to fold immediate \p ImmVal into \p MI's operand at index \p UseOpNo.
213   bool tryFoldImmWithOpSel(MachineInstr *MI, unsigned UseOpNo,
214                            int64_t ImmVal) const;
215 
216   bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
217                         MachineInstr *MI, unsigned OpNo,
218                         const FoldableDef &OpToFold) const;
219   bool isUseSafeToFold(const MachineInstr &MI,
220                        const MachineOperand &UseMO) const;
221 
222   const TargetRegisterClass *getRegSeqInit(
223       MachineInstr &RegSeq,
224       SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs) const;
225 
226   const TargetRegisterClass *
227   getRegSeqInit(SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
228                 Register UseReg) const;
229 
230   std::pair<int64_t, const TargetRegisterClass *>
231   isRegSeqSplat(MachineInstr &RegSeg) const;
232 
233   bool tryFoldRegSeqSplat(MachineInstr *UseMI, unsigned UseOpIdx,
234                           int64_t SplatVal,
235                           const TargetRegisterClass *SplatRC) const;
236 
237   bool tryToFoldACImm(const FoldableDef &OpToFold, MachineInstr *UseMI,
238                       unsigned UseOpIdx,
239                       SmallVectorImpl<FoldCandidate> &FoldList) const;
240   void foldOperand(FoldableDef OpToFold, MachineInstr *UseMI, int UseOpIdx,
241                    SmallVectorImpl<FoldCandidate> &FoldList,
242                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
243 
244   std::optional<int64_t> getImmOrMaterializedImm(MachineOperand &Op) const;
245   bool tryConstantFoldOp(MachineInstr *MI) const;
246   bool tryFoldCndMask(MachineInstr &MI) const;
247   bool tryFoldZeroHighBits(MachineInstr &MI) const;
248   bool foldInstOperand(MachineInstr &MI, const FoldableDef &OpToFold) const;
249 
250   bool foldCopyToAGPRRegSequence(MachineInstr *CopyMI) const;
251   bool tryFoldFoldableCopy(MachineInstr &MI,
252                            MachineOperand *&CurrentKnownM0Val) const;
253 
254   const MachineOperand *isClamp(const MachineInstr &MI) const;
255   bool tryFoldClamp(MachineInstr &MI);
256 
257   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
258   bool tryFoldOMod(MachineInstr &MI);
259   bool tryFoldRegSequence(MachineInstr &MI);
260   bool tryFoldPhiAGPR(MachineInstr &MI);
261   bool tryFoldLoad(MachineInstr &MI);
262 
263   bool tryOptimizeAGPRPhis(MachineBasicBlock &MBB);
264 
265 public:
266   SIFoldOperandsImpl() = default;
267 
268   bool run(MachineFunction &MF);
269 };
270 
271 class SIFoldOperandsLegacy : public MachineFunctionPass {
272 public:
273   static char ID;
274 
275   SIFoldOperandsLegacy() : MachineFunctionPass(ID) {}
276 
277   bool runOnMachineFunction(MachineFunction &MF) override {
278     if (skipFunction(MF.getFunction()))
279       return false;
280     return SIFoldOperandsImpl().run(MF);
281   }
282 
283   StringRef getPassName() const override { return "SI Fold Operands"; }
284 
285   void getAnalysisUsage(AnalysisUsage &AU) const override {
286     AU.setPreservesCFG();
287     MachineFunctionPass::getAnalysisUsage(AU);
288   }
289 
290   MachineFunctionProperties getRequiredProperties() const override {
291     return MachineFunctionProperties().setIsSSA();
292   }
293 };
294 
295 } // End anonymous namespace.
296 
297 INITIALIZE_PASS(SIFoldOperandsLegacy, DEBUG_TYPE, "SI Fold Operands", false,
298                 false)
299 
300 char SIFoldOperandsLegacy::ID = 0;
301 
302 char &llvm::SIFoldOperandsLegacyID = SIFoldOperandsLegacy::ID;
303 
304 static const TargetRegisterClass *getRegOpRC(const MachineRegisterInfo &MRI,
305                                              const TargetRegisterInfo &TRI,
306                                              const MachineOperand &MO) {
307   const TargetRegisterClass *RC = MRI.getRegClass(MO.getReg());
308   if (const TargetRegisterClass *SubRC =
309           TRI.getSubRegisterClass(RC, MO.getSubReg()))
310     RC = SubRC;
311   return RC;
312 }
313 
314 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
315 static unsigned macToMad(unsigned Opc) {
316   switch (Opc) {
317   case AMDGPU::V_MAC_F32_e64:
318     return AMDGPU::V_MAD_F32_e64;
319   case AMDGPU::V_MAC_F16_e64:
320     return AMDGPU::V_MAD_F16_e64;
321   case AMDGPU::V_FMAC_F32_e64:
322     return AMDGPU::V_FMA_F32_e64;
323   case AMDGPU::V_FMAC_F16_e64:
324     return AMDGPU::V_FMA_F16_gfx9_e64;
325   case AMDGPU::V_FMAC_F16_t16_e64:
326     return AMDGPU::V_FMA_F16_gfx9_t16_e64;
327   case AMDGPU::V_FMAC_F16_fake16_e64:
328     return AMDGPU::V_FMA_F16_gfx9_fake16_e64;
329   case AMDGPU::V_FMAC_LEGACY_F32_e64:
330     return AMDGPU::V_FMA_LEGACY_F32_e64;
331   case AMDGPU::V_FMAC_F64_e64:
332     return AMDGPU::V_FMA_F64_e64;
333   }
334   return AMDGPU::INSTRUCTION_LIST_END;
335 }
336 
337 // TODO: Add heuristic that the frame index might not fit in the addressing mode
338 // immediate offset to avoid materializing in loops.
339 bool SIFoldOperandsImpl::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
340                                            const FoldableDef &OpToFold) const {
341   if (!OpToFold.isFI())
342     return false;
343 
344   const unsigned Opc = UseMI.getOpcode();
345   switch (Opc) {
346   case AMDGPU::S_ADD_I32:
347   case AMDGPU::S_ADD_U32:
348   case AMDGPU::V_ADD_U32_e32:
349   case AMDGPU::V_ADD_CO_U32_e32:
350     // TODO: Possibly relax hasOneUse. It matters more for mubuf, since we have
351     // to insert the wave size shift at every point we use the index.
352     // TODO: Fix depending on visit order to fold immediates into the operand
353     return UseMI.getOperand(OpNo == 1 ? 2 : 1).isImm() &&
354            MRI->hasOneNonDBGUse(UseMI.getOperand(OpNo).getReg());
355   case AMDGPU::V_ADD_U32_e64:
356   case AMDGPU::V_ADD_CO_U32_e64:
357     return UseMI.getOperand(OpNo == 2 ? 3 : 2).isImm() &&
358            MRI->hasOneNonDBGUse(UseMI.getOperand(OpNo).getReg());
359   default:
360     break;
361   }
362 
363   if (TII->isMUBUF(UseMI))
364     return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
365   if (!TII->isFLATScratch(UseMI))
366     return false;
367 
368   int SIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
369   if (OpNo == SIdx)
370     return true;
371 
372   int VIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
373   return OpNo == VIdx && SIdx == -1;
374 }
375 
376 /// Fold %vgpr = COPY (S_ADD_I32 x, frameindex)
377 ///
378 ///   => %vgpr = V_ADD_U32 x, frameindex
379 bool SIFoldOperandsImpl::foldCopyToVGPROfScalarAddOfFrameIndex(
380     Register DstReg, Register SrcReg, MachineInstr &MI) const {
381   if (TRI->isVGPR(*MRI, DstReg) && TRI->isSGPRReg(*MRI, SrcReg) &&
382       MRI->hasOneNonDBGUse(SrcReg)) {
383     MachineInstr *Def = MRI->getVRegDef(SrcReg);
384     if (!Def || Def->getNumOperands() != 4)
385       return false;
386 
387     MachineOperand *Src0 = &Def->getOperand(1);
388     MachineOperand *Src1 = &Def->getOperand(2);
389 
390     // TODO: This is profitable with more operand types, and for more
391     // opcodes. But ultimately this is working around poor / nonexistent
392     // regbankselect.
393     if (!Src0->isFI() && !Src1->isFI())
394       return false;
395 
396     if (Src0->isFI())
397       std::swap(Src0, Src1);
398 
399     const bool UseVOP3 = !Src0->isImm() || TII->isInlineConstant(*Src0);
400     unsigned NewOp = convertToVALUOp(Def->getOpcode(), UseVOP3);
401     if (NewOp == AMDGPU::INSTRUCTION_LIST_END ||
402         !Def->getOperand(3).isDead()) // Check if scc is dead
403       return false;
404 
405     MachineBasicBlock *MBB = Def->getParent();
406     const DebugLoc &DL = Def->getDebugLoc();
407     if (NewOp != AMDGPU::V_ADD_CO_U32_e32) {
408       MachineInstrBuilder Add =
409           BuildMI(*MBB, *Def, DL, TII->get(NewOp), DstReg);
410 
411       if (Add->getDesc().getNumDefs() == 2) {
412         Register CarryOutReg = MRI->createVirtualRegister(TRI->getBoolRC());
413         Add.addDef(CarryOutReg, RegState::Dead);
414         MRI->setRegAllocationHint(CarryOutReg, 0, TRI->getVCC());
415       }
416 
417       Add.add(*Src0).add(*Src1).setMIFlags(Def->getFlags());
418       if (AMDGPU::hasNamedOperand(NewOp, AMDGPU::OpName::clamp))
419         Add.addImm(0);
420 
421       Def->eraseFromParent();
422       MI.eraseFromParent();
423       return true;
424     }
425 
426     assert(NewOp == AMDGPU::V_ADD_CO_U32_e32);
427 
428     MachineBasicBlock::LivenessQueryResult Liveness =
429         MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, *Def, 16);
430     if (Liveness == MachineBasicBlock::LQR_Dead) {
431       // TODO: If src1 satisfies operand constraints, use vop3 version.
432       BuildMI(*MBB, *Def, DL, TII->get(NewOp), DstReg)
433           .add(*Src0)
434           .add(*Src1)
435           .setOperandDead(3) // implicit-def $vcc
436           .setMIFlags(Def->getFlags());
437       Def->eraseFromParent();
438       MI.eraseFromParent();
439       return true;
440     }
441   }
442 
443   return false;
444 }
445 
446 FunctionPass *llvm::createSIFoldOperandsLegacyPass() {
447   return new SIFoldOperandsLegacy();
448 }
449 
450 bool SIFoldOperandsImpl::canUseImmWithOpSel(const MachineInstr *MI,
451                                             unsigned UseOpNo,
452                                             int64_t ImmVal) const {
453   const uint64_t TSFlags = MI->getDesc().TSFlags;
454 
455   if (!(TSFlags & SIInstrFlags::IsPacked) || (TSFlags & SIInstrFlags::IsMAI) ||
456       (TSFlags & SIInstrFlags::IsWMMA) || (TSFlags & SIInstrFlags::IsSWMMAC) ||
457       (ST->hasDOTOpSelHazard() && (TSFlags & SIInstrFlags::IsDOT)))
458     return false;
459 
460   const MachineOperand &Old = MI->getOperand(UseOpNo);
461   int OpNo = MI->getOperandNo(&Old);
462 
463   unsigned Opcode = MI->getOpcode();
464   uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType;
465   switch (OpType) {
466   default:
467     return false;
468   case AMDGPU::OPERAND_REG_IMM_V2FP16:
469   case AMDGPU::OPERAND_REG_IMM_V2BF16:
470   case AMDGPU::OPERAND_REG_IMM_V2INT16:
471   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
472   case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
473   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
474     // VOP3 packed instructions ignore op_sel source modifiers, we cannot encode
475     // two different constants.
476     if ((TSFlags & SIInstrFlags::VOP3) && !(TSFlags & SIInstrFlags::VOP3P) &&
477         static_cast<uint16_t>(ImmVal) != static_cast<uint16_t>(ImmVal >> 16))
478       return false;
479     break;
480   }
481 
482   return true;
483 }
484 
485 bool SIFoldOperandsImpl::tryFoldImmWithOpSel(MachineInstr *MI, unsigned UseOpNo,
486                                              int64_t ImmVal) const {
487   MachineOperand &Old = MI->getOperand(UseOpNo);
488   unsigned Opcode = MI->getOpcode();
489   int OpNo = MI->getOperandNo(&Old);
490   uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType;
491 
492   // If the literal can be inlined as-is, apply it and short-circuit the
493   // tests below. The main motivation for this is to avoid unintuitive
494   // uses of opsel.
495   if (AMDGPU::isInlinableLiteralV216(ImmVal, OpType)) {
496     Old.ChangeToImmediate(ImmVal);
497     return true;
498   }
499 
500   // Refer to op_sel/op_sel_hi and check if we can change the immediate and
501   // op_sel in a way that allows an inline constant.
502   AMDGPU::OpName ModName = AMDGPU::OpName::NUM_OPERAND_NAMES;
503   unsigned SrcIdx = ~0;
504   if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) {
505     ModName = AMDGPU::OpName::src0_modifiers;
506     SrcIdx = 0;
507   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) {
508     ModName = AMDGPU::OpName::src1_modifiers;
509     SrcIdx = 1;
510   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) {
511     ModName = AMDGPU::OpName::src2_modifiers;
512     SrcIdx = 2;
513   }
514   assert(ModName != AMDGPU::OpName::NUM_OPERAND_NAMES);
515   int ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModName);
516   MachineOperand &Mod = MI->getOperand(ModIdx);
517   unsigned ModVal = Mod.getImm();
518 
519   uint16_t ImmLo =
520       static_cast<uint16_t>(ImmVal >> (ModVal & SISrcMods::OP_SEL_0 ? 16 : 0));
521   uint16_t ImmHi =
522       static_cast<uint16_t>(ImmVal >> (ModVal & SISrcMods::OP_SEL_1 ? 16 : 0));
523   uint32_t Imm = (static_cast<uint32_t>(ImmHi) << 16) | ImmLo;
524   unsigned NewModVal = ModVal & ~(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1);
525 
526   // Helper function that attempts to inline the given value with a newly
527   // chosen opsel pattern.
528   auto tryFoldToInline = [&](uint32_t Imm) -> bool {
529     if (AMDGPU::isInlinableLiteralV216(Imm, OpType)) {
530       Mod.setImm(NewModVal | SISrcMods::OP_SEL_1);
531       Old.ChangeToImmediate(Imm);
532       return true;
533     }
534 
535     // Try to shuffle the halves around and leverage opsel to get an inline
536     // constant.
537     uint16_t Lo = static_cast<uint16_t>(Imm);
538     uint16_t Hi = static_cast<uint16_t>(Imm >> 16);
539     if (Lo == Hi) {
540       if (AMDGPU::isInlinableLiteralV216(Lo, OpType)) {
541         Mod.setImm(NewModVal);
542         Old.ChangeToImmediate(Lo);
543         return true;
544       }
545 
546       if (static_cast<int16_t>(Lo) < 0) {
547         int32_t SExt = static_cast<int16_t>(Lo);
548         if (AMDGPU::isInlinableLiteralV216(SExt, OpType)) {
549           Mod.setImm(NewModVal);
550           Old.ChangeToImmediate(SExt);
551           return true;
552         }
553       }
554 
555       // This check is only useful for integer instructions
556       if (OpType == AMDGPU::OPERAND_REG_IMM_V2INT16) {
557         if (AMDGPU::isInlinableLiteralV216(Lo << 16, OpType)) {
558           Mod.setImm(NewModVal | SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1);
559           Old.ChangeToImmediate(static_cast<uint32_t>(Lo) << 16);
560           return true;
561         }
562       }
563     } else {
564       uint32_t Swapped = (static_cast<uint32_t>(Lo) << 16) | Hi;
565       if (AMDGPU::isInlinableLiteralV216(Swapped, OpType)) {
566         Mod.setImm(NewModVal | SISrcMods::OP_SEL_0);
567         Old.ChangeToImmediate(Swapped);
568         return true;
569       }
570     }
571 
572     return false;
573   };
574 
575   if (tryFoldToInline(Imm))
576     return true;
577 
578   // Replace integer addition by subtraction and vice versa if it allows
579   // folding the immediate to an inline constant.
580   //
581   // We should only ever get here for SrcIdx == 1 due to canonicalization
582   // earlier in the pipeline, but we double-check here to be safe / fully
583   // general.
584   bool IsUAdd = Opcode == AMDGPU::V_PK_ADD_U16;
585   bool IsUSub = Opcode == AMDGPU::V_PK_SUB_U16;
586   if (SrcIdx == 1 && (IsUAdd || IsUSub)) {
587     unsigned ClampIdx =
588         AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::clamp);
589     bool Clamp = MI->getOperand(ClampIdx).getImm() != 0;
590 
591     if (!Clamp) {
592       uint16_t NegLo = -static_cast<uint16_t>(Imm);
593       uint16_t NegHi = -static_cast<uint16_t>(Imm >> 16);
594       uint32_t NegImm = (static_cast<uint32_t>(NegHi) << 16) | NegLo;
595 
596       if (tryFoldToInline(NegImm)) {
597         unsigned NegOpcode =
598             IsUAdd ? AMDGPU::V_PK_SUB_U16 : AMDGPU::V_PK_ADD_U16;
599         MI->setDesc(TII->get(NegOpcode));
600         return true;
601       }
602     }
603   }
604 
605   return false;
606 }
607 
608 bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const {
609   MachineInstr *MI = Fold.UseMI;
610   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
611   assert(Old.isReg());
612 
613   std::optional<int64_t> ImmVal;
614   if (Fold.isImm())
615     ImmVal = Fold.Def.getEffectiveImmVal();
616 
617   if (ImmVal && canUseImmWithOpSel(Fold.UseMI, Fold.UseOpNo, *ImmVal)) {
618     if (tryFoldImmWithOpSel(Fold.UseMI, Fold.UseOpNo, *ImmVal))
619       return true;
620 
621     // We can't represent the candidate as an inline constant. Try as a literal
622     // with the original opsel, checking constant bus limitations.
623     MachineOperand New = MachineOperand::CreateImm(*ImmVal);
624     int OpNo = MI->getOperandNo(&Old);
625     if (!TII->isOperandLegal(*MI, OpNo, &New))
626       return false;
627     Old.ChangeToImmediate(*ImmVal);
628     return true;
629   }
630 
631   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
632     MachineBasicBlock *MBB = MI->getParent();
633     auto Liveness = MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 16);
634     if (Liveness != MachineBasicBlock::LQR_Dead) {
635       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
636       return false;
637     }
638 
639     int Op32 = Fold.ShrinkOpcode;
640     MachineOperand &Dst0 = MI->getOperand(0);
641     MachineOperand &Dst1 = MI->getOperand(1);
642     assert(Dst0.isDef() && Dst1.isDef());
643 
644     bool HaveNonDbgCarryUse = !MRI->use_nodbg_empty(Dst1.getReg());
645 
646     const TargetRegisterClass *Dst0RC = MRI->getRegClass(Dst0.getReg());
647     Register NewReg0 = MRI->createVirtualRegister(Dst0RC);
648 
649     MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32);
650 
651     if (HaveNonDbgCarryUse) {
652       BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY),
653               Dst1.getReg())
654         .addReg(AMDGPU::VCC, RegState::Kill);
655     }
656 
657     // Keep the old instruction around to avoid breaking iterators, but
658     // replace it with a dummy instruction to remove uses.
659     //
660     // FIXME: We should not invert how this pass looks at operands to avoid
661     // this. Should track set of foldable movs instead of looking for uses
662     // when looking at a use.
663     Dst0.setReg(NewReg0);
664     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
665       MI->removeOperand(I);
666     MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF));
667 
668     if (Fold.Commuted)
669       TII->commuteInstruction(*Inst32, false);
670     return true;
671   }
672 
673   assert(!Fold.needsShrink() && "not handled");
674 
675   if (ImmVal) {
676     if (Old.isTied()) {
677       int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
678       if (NewMFMAOpc == -1)
679         return false;
680       MI->setDesc(TII->get(NewMFMAOpc));
681       MI->untieRegOperand(0);
682     }
683 
684     // TODO: Should we try to avoid adding this to the candidate list?
685     MachineOperand New = MachineOperand::CreateImm(*ImmVal);
686     int OpNo = MI->getOperandNo(&Old);
687     if (!TII->isOperandLegal(*MI, OpNo, &New))
688       return false;
689 
690     Old.ChangeToImmediate(*ImmVal);
691     return true;
692   }
693 
694   if (Fold.isGlobal()) {
695     Old.ChangeToGA(Fold.Def.OpToFold->getGlobal(),
696                    Fold.Def.OpToFold->getOffset(),
697                    Fold.Def.OpToFold->getTargetFlags());
698     return true;
699   }
700 
701   if (Fold.isFI()) {
702     Old.ChangeToFrameIndex(Fold.getFI());
703     return true;
704   }
705 
706   MachineOperand *New = Fold.Def.OpToFold;
707   // Rework once the VS_16 register class is updated to include proper
708   // 16-bit SGPRs instead of 32-bit ones.
709   if (Old.getSubReg() == AMDGPU::lo16 && TRI->isSGPRReg(*MRI, New->getReg()))
710     Old.setSubReg(AMDGPU::NoSubRegister);
711   Old.substVirtReg(New->getReg(), New->getSubReg(), *TRI);
712   Old.setIsUndef(New->isUndef());
713   return true;
714 }
715 
716 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
717                                 FoldCandidate &&Entry) {
718   // Skip additional folding on the same operand.
719   for (FoldCandidate &Fold : FoldList)
720     if (Fold.UseMI == Entry.UseMI && Fold.UseOpNo == Entry.UseOpNo)
721       return;
722   LLVM_DEBUG(dbgs() << "Append " << (Entry.Commuted ? "commuted" : "normal")
723                     << " operand " << Entry.UseOpNo << "\n  " << *Entry.UseMI);
724   FoldList.push_back(Entry);
725 }
726 
727 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
728                                 MachineInstr *MI, unsigned OpNo,
729                                 const FoldableDef &FoldOp,
730                                 bool Commuted = false, int ShrinkOp = -1) {
731   appendFoldCandidate(FoldList,
732                       FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
733 }
734 
735 bool SIFoldOperandsImpl::tryAddToFoldList(
736     SmallVectorImpl<FoldCandidate> &FoldList, MachineInstr *MI, unsigned OpNo,
737     const FoldableDef &OpToFold) const {
738   const unsigned Opc = MI->getOpcode();
739 
740   auto tryToFoldAsFMAAKorMK = [&]() {
741     if (!OpToFold.isImm())
742       return false;
743 
744     const bool TryAK = OpNo == 3;
745     const unsigned NewOpc = TryAK ? AMDGPU::S_FMAAK_F32 : AMDGPU::S_FMAMK_F32;
746     MI->setDesc(TII->get(NewOpc));
747 
748     // We have to fold into operand which would be Imm not into OpNo.
749     bool FoldAsFMAAKorMK =
750         tryAddToFoldList(FoldList, MI, TryAK ? 3 : 2, OpToFold);
751     if (FoldAsFMAAKorMK) {
752       // Untie Src2 of fmac.
753       MI->untieRegOperand(3);
754       // For fmamk swap operands 1 and 2 if OpToFold was meant for operand 1.
755       if (OpNo == 1) {
756         MachineOperand &Op1 = MI->getOperand(1);
757         MachineOperand &Op2 = MI->getOperand(2);
758         Register OldReg = Op1.getReg();
759         // Operand 2 might be an inlinable constant
760         if (Op2.isImm()) {
761           Op1.ChangeToImmediate(Op2.getImm());
762           Op2.ChangeToRegister(OldReg, false);
763         } else {
764           Op1.setReg(Op2.getReg());
765           Op2.setReg(OldReg);
766         }
767       }
768       return true;
769     }
770     MI->setDesc(TII->get(Opc));
771     return false;
772   };
773 
774   bool IsLegal = OpToFold.isOperandLegal(*TII, *MI, OpNo);
775   if (!IsLegal && OpToFold.isImm()) {
776     if (std::optional<int64_t> ImmVal = OpToFold.getEffectiveImmVal())
777       IsLegal = canUseImmWithOpSel(MI, OpNo, *ImmVal);
778   }
779 
780   if (!IsLegal) {
781     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
782     unsigned NewOpc = macToMad(Opc);
783     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
784       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
785       // to fold the operand.
786       MI->setDesc(TII->get(NewOpc));
787       bool AddOpSel = !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel) &&
788                       AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel);
789       if (AddOpSel)
790         MI->addOperand(MachineOperand::CreateImm(0));
791       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold);
792       if (FoldAsMAD) {
793         MI->untieRegOperand(OpNo);
794         return true;
795       }
796       if (AddOpSel)
797         MI->removeOperand(MI->getNumExplicitOperands() - 1);
798       MI->setDesc(TII->get(Opc));
799     }
800 
801     // Special case for s_fmac_f32 if we are trying to fold into Src2.
802     // By transforming into fmaak we can untie Src2 and make folding legal.
803     if (Opc == AMDGPU::S_FMAC_F32 && OpNo == 3) {
804       if (tryToFoldAsFMAAKorMK())
805         return true;
806     }
807 
808     // Special case for s_setreg_b32
809     if (OpToFold.isImm()) {
810       unsigned ImmOpc = 0;
811       if (Opc == AMDGPU::S_SETREG_B32)
812         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
813       else if (Opc == AMDGPU::S_SETREG_B32_mode)
814         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
815       if (ImmOpc) {
816         MI->setDesc(TII->get(ImmOpc));
817         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
818         return true;
819       }
820     }
821 
822     // Operand is not legal, so try to commute the instruction to
823     // see if this makes it possible to fold.
824     unsigned CommuteOpNo = TargetInstrInfo::CommuteAnyOperandIndex;
825     bool CanCommute = TII->findCommutedOpIndices(*MI, OpNo, CommuteOpNo);
826     if (!CanCommute)
827       return false;
828 
829     MachineOperand &Op = MI->getOperand(OpNo);
830     MachineOperand &CommutedOp = MI->getOperand(CommuteOpNo);
831 
832     // One of operands might be an Imm operand, and OpNo may refer to it after
833     // the call of commuteInstruction() below. Such situations are avoided
834     // here explicitly as OpNo must be a register operand to be a candidate
835     // for memory folding.
836     if (!Op.isReg() || !CommutedOp.isReg())
837       return false;
838 
839     // The same situation with an immediate could reproduce if both inputs are
840     // the same register.
841     if (Op.isReg() && CommutedOp.isReg() &&
842         (Op.getReg() == CommutedOp.getReg() &&
843          Op.getSubReg() == CommutedOp.getSubReg()))
844       return false;
845 
846     if (!TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo))
847       return false;
848 
849     int Op32 = -1;
850     if (!OpToFold.isOperandLegal(*TII, *MI, CommuteOpNo)) {
851       if ((Opc != AMDGPU::V_ADD_CO_U32_e64 && Opc != AMDGPU::V_SUB_CO_U32_e64 &&
852            Opc != AMDGPU::V_SUBREV_CO_U32_e64) || // FIXME
853           (!OpToFold.isImm() && !OpToFold.isFI() && !OpToFold.isGlobal())) {
854         TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo);
855         return false;
856       }
857 
858       // Verify the other operand is a VGPR, otherwise we would violate the
859       // constant bus restriction.
860       MachineOperand &OtherOp = MI->getOperand(OpNo);
861       if (!OtherOp.isReg() ||
862           !TII->getRegisterInfo().isVGPR(*MRI, OtherOp.getReg()))
863         return false;
864 
865       assert(MI->getOperand(1).isDef());
866 
867       // Make sure to get the 32-bit version of the commuted opcode.
868       unsigned MaybeCommutedOpc = MI->getOpcode();
869       Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
870     }
871 
872     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, /*Commuted=*/true,
873                         Op32);
874     return true;
875   }
876 
877   // Special case for s_fmac_f32 if we are trying to fold into Src0 or Src1.
878   // By changing into fmamk we can untie Src2.
879   // If folding for Src0 happens first and it is identical operand to Src1 we
880   // should avoid transforming into fmamk which requires commuting as it would
881   // cause folding into Src1 to fail later on due to wrong OpNo used.
882   if (Opc == AMDGPU::S_FMAC_F32 &&
883       (OpNo != 1 || !MI->getOperand(1).isIdenticalTo(MI->getOperand(2)))) {
884     if (tryToFoldAsFMAAKorMK())
885       return true;
886   }
887 
888   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
889   return true;
890 }
891 
892 bool SIFoldOperandsImpl::isUseSafeToFold(const MachineInstr &MI,
893                                          const MachineOperand &UseMO) const {
894   // Operands of SDWA instructions must be registers.
895   return !TII->isSDWA(MI);
896 }
897 
898 static MachineOperand *lookUpCopyChain(const SIInstrInfo &TII,
899                                        const MachineRegisterInfo &MRI,
900                                        Register SrcReg) {
901   MachineOperand *Sub = nullptr;
902   for (MachineInstr *SubDef = MRI.getVRegDef(SrcReg);
903        SubDef && TII.isFoldableCopy(*SubDef);
904        SubDef = MRI.getVRegDef(Sub->getReg())) {
905     MachineOperand &SrcOp = SubDef->getOperand(1);
906     if (SrcOp.isImm())
907       return &SrcOp;
908     if (!SrcOp.isReg() || SrcOp.getReg().isPhysical())
909       break;
910     Sub = &SrcOp;
911     // TODO: Support compose
912     if (SrcOp.getSubReg())
913       break;
914   }
915 
916   return Sub;
917 }
918 
919 const TargetRegisterClass *SIFoldOperandsImpl::getRegSeqInit(
920     MachineInstr &RegSeq,
921     SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs) const {
922 
923   assert(RegSeq.isRegSequence());
924 
925   const TargetRegisterClass *RC = nullptr;
926 
927   for (unsigned I = 1, E = RegSeq.getNumExplicitOperands(); I != E; I += 2) {
928     MachineOperand &SrcOp = RegSeq.getOperand(I);
929     unsigned SubRegIdx = RegSeq.getOperand(I + 1).getImm();
930 
931     // Only accept reg_sequence with uniform reg class inputs for simplicity.
932     const TargetRegisterClass *OpRC = getRegOpRC(*MRI, *TRI, SrcOp);
933     if (!RC)
934       RC = OpRC;
935     else if (!TRI->getCommonSubClass(RC, OpRC))
936       return nullptr;
937 
938     if (SrcOp.getSubReg()) {
939       // TODO: Handle subregister compose
940       Defs.emplace_back(&SrcOp, SubRegIdx);
941       continue;
942     }
943 
944     MachineOperand *DefSrc = lookUpCopyChain(*TII, *MRI, SrcOp.getReg());
945     if (DefSrc && (DefSrc->isReg() || DefSrc->isImm())) {
946       Defs.emplace_back(DefSrc, SubRegIdx);
947       continue;
948     }
949 
950     Defs.emplace_back(&SrcOp, SubRegIdx);
951   }
952 
953   return RC;
954 }
955 
956 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
957 // for each subreg, tracking it to an immediate if possible. Returns the
958 // register class of the inputs on success.
959 const TargetRegisterClass *SIFoldOperandsImpl::getRegSeqInit(
960     SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
961     Register UseReg) const {
962   MachineInstr *Def = MRI->getVRegDef(UseReg);
963   if (!Def || !Def->isRegSequence())
964     return nullptr;
965 
966   return getRegSeqInit(*Def, Defs);
967 }
968 
969 std::pair<int64_t, const TargetRegisterClass *>
970 SIFoldOperandsImpl::isRegSeqSplat(MachineInstr &RegSeq) const {
971   SmallVector<std::pair<MachineOperand *, unsigned>, 32> Defs;
972   const TargetRegisterClass *SrcRC = getRegSeqInit(RegSeq, Defs);
973   if (!SrcRC)
974     return {};
975 
976   bool TryToMatchSplat64 = false;
977 
978   int64_t Imm;
979   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
980     const MachineOperand *Op = Defs[I].first;
981     if (!Op->isImm())
982       return {};
983 
984     int64_t SubImm = Op->getImm();
985     if (!I) {
986       Imm = SubImm;
987       continue;
988     }
989 
990     if (Imm != SubImm) {
991       if (I == 1 && (E & 1) == 0) {
992         // If we have an even number of inputs, there's a chance this is a
993         // 64-bit element splat broken into 32-bit pieces.
994         TryToMatchSplat64 = true;
995         break;
996       }
997 
998       return {}; // Can only fold splat constants
999     }
1000   }
1001 
1002   if (!TryToMatchSplat64)
1003     return {Defs[0].first->getImm(), SrcRC};
1004 
1005   // Fallback to recognizing 64-bit splats broken into 32-bit pieces
1006   // (i.e. recognize every other other element is 0 for 64-bit immediates)
1007   int64_t SplatVal64;
1008   for (unsigned I = 0, E = Defs.size(); I != E; I += 2) {
1009     const MachineOperand *Op0 = Defs[I].first;
1010     const MachineOperand *Op1 = Defs[I + 1].first;
1011 
1012     if (!Op0->isImm() || !Op1->isImm())
1013       return {};
1014 
1015     unsigned SubReg0 = Defs[I].second;
1016     unsigned SubReg1 = Defs[I + 1].second;
1017 
1018     // Assume we're going to generally encounter reg_sequences with sorted
1019     // subreg indexes, so reject any that aren't consecutive.
1020     if (TRI->getChannelFromSubReg(SubReg0) + 1 !=
1021         TRI->getChannelFromSubReg(SubReg1))
1022       return {};
1023 
1024     int64_t MergedVal = Make_64(Op1->getImm(), Op0->getImm());
1025     if (I == 0)
1026       SplatVal64 = MergedVal;
1027     else if (SplatVal64 != MergedVal)
1028       return {};
1029   }
1030 
1031   const TargetRegisterClass *RC64 = TRI->getSubRegisterClass(
1032       MRI->getRegClass(RegSeq.getOperand(0).getReg()), AMDGPU::sub0_sub1);
1033 
1034   return {SplatVal64, RC64};
1035 }
1036 
1037 bool SIFoldOperandsImpl::tryFoldRegSeqSplat(
1038     MachineInstr *UseMI, unsigned UseOpIdx, int64_t SplatVal,
1039     const TargetRegisterClass *SplatRC) const {
1040   const MCInstrDesc &Desc = UseMI->getDesc();
1041   if (UseOpIdx >= Desc.getNumOperands())
1042     return false;
1043 
1044   // Filter out unhandled pseudos.
1045   if (!AMDGPU::isSISrcOperand(Desc, UseOpIdx))
1046     return false;
1047 
1048   int16_t RCID = Desc.operands()[UseOpIdx].RegClass;
1049   if (RCID == -1)
1050     return false;
1051 
1052   const TargetRegisterClass *OpRC = TRI->getRegClass(RCID);
1053 
1054   // Special case 0/-1, since when interpreted as a 64-bit element both halves
1055   // have the same bits. These are the only cases where a splat has the same
1056   // interpretation for 32-bit and 64-bit splats.
1057   if (SplatVal != 0 && SplatVal != -1) {
1058     // We need to figure out the scalar type read by the operand. e.g. the MFMA
1059     // operand will be AReg_128, and we want to check if it's compatible with an
1060     // AReg_32 constant.
1061     uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType;
1062     switch (OpTy) {
1063     case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
1064     case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1065       OpRC = TRI->getSubRegisterClass(OpRC, AMDGPU::sub0);
1066       break;
1067     case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
1068       OpRC = TRI->getSubRegisterClass(OpRC, AMDGPU::sub0_sub1);
1069       break;
1070     default:
1071       return false;
1072     }
1073 
1074     if (!TRI->getCommonSubClass(OpRC, SplatRC))
1075       return false;
1076   }
1077 
1078   MachineOperand TmpOp = MachineOperand::CreateImm(SplatVal);
1079   if (!TII->isOperandLegal(*UseMI, UseOpIdx, &TmpOp))
1080     return false;
1081 
1082   return true;
1083 }
1084 
1085 bool SIFoldOperandsImpl::tryToFoldACImm(
1086     const FoldableDef &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
1087     SmallVectorImpl<FoldCandidate> &FoldList) const {
1088   const MCInstrDesc &Desc = UseMI->getDesc();
1089   if (UseOpIdx >= Desc.getNumOperands())
1090     return false;
1091 
1092   // Filter out unhandled pseudos.
1093   if (!AMDGPU::isSISrcOperand(Desc, UseOpIdx))
1094     return false;
1095 
1096   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
1097   if (OpToFold.isImm() && OpToFold.isOperandLegal(*TII, *UseMI, UseOpIdx)) {
1098     appendFoldCandidate(FoldList, UseMI, UseOpIdx, OpToFold);
1099     return true;
1100   }
1101 
1102   // TODO: Verify the following code handles subregisters correctly.
1103   // TODO: Handle extract of global reference
1104   if (UseOp.getSubReg())
1105     return false;
1106 
1107   if (!OpToFold.isReg())
1108     return false;
1109 
1110   Register UseReg = OpToFold.getReg();
1111   if (!UseReg.isVirtual())
1112     return false;
1113 
1114   // Maybe it is just a COPY of an immediate itself.
1115 
1116   // FIXME: Remove this handling. There is already special case folding of
1117   // immediate into copy in foldOperand. This is looking for the def of the
1118   // value the folding started from in the first place.
1119   MachineInstr *Def = MRI->getVRegDef(UseReg);
1120   if (Def && TII->isFoldableCopy(*Def)) {
1121     MachineOperand &DefOp = Def->getOperand(1);
1122     if (DefOp.isImm() && TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
1123       FoldableDef FoldableImm(DefOp.getImm(), OpToFold.DefRC,
1124                               OpToFold.DefSubReg);
1125       appendFoldCandidate(FoldList, UseMI, UseOpIdx, FoldableImm);
1126       return true;
1127     }
1128   }
1129 
1130   return false;
1131 }
1132 
1133 void SIFoldOperandsImpl::foldOperand(
1134     FoldableDef OpToFold, MachineInstr *UseMI, int UseOpIdx,
1135     SmallVectorImpl<FoldCandidate> &FoldList,
1136     SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
1137   const MachineOperand *UseOp = &UseMI->getOperand(UseOpIdx);
1138 
1139   if (!isUseSafeToFold(*UseMI, *UseOp))
1140     return;
1141 
1142   // FIXME: Fold operands with subregs.
1143   if (UseOp->isReg() && OpToFold.isReg()) {
1144     if (UseOp->isImplicit())
1145       return;
1146     // Allow folding from SGPRs to 16-bit VGPRs.
1147     if (UseOp->getSubReg() != AMDGPU::NoSubRegister &&
1148         (UseOp->getSubReg() != AMDGPU::lo16 ||
1149          !TRI->isSGPRReg(*MRI, OpToFold.getReg())))
1150       return;
1151   }
1152 
1153   // Special case for REG_SEQUENCE: We can't fold literals into
1154   // REG_SEQUENCE instructions, so we have to fold them into the
1155   // uses of REG_SEQUENCE.
1156   if (UseMI->isRegSequence()) {
1157     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
1158     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
1159 
1160     int64_t SplatVal;
1161     const TargetRegisterClass *SplatRC;
1162     std::tie(SplatVal, SplatRC) = isRegSeqSplat(*UseMI);
1163 
1164     // Grab the use operands first
1165     SmallVector<MachineOperand *, 4> UsesToProcess(
1166         llvm::make_pointer_range(MRI->use_nodbg_operands(RegSeqDstReg)));
1167     for (auto *RSUse : UsesToProcess) {
1168       MachineInstr *RSUseMI = RSUse->getParent();
1169       unsigned OpNo = RSUseMI->getOperandNo(RSUse);
1170 
1171       if (SplatRC) {
1172         if (tryFoldRegSeqSplat(RSUseMI, OpNo, SplatVal, SplatRC)) {
1173           FoldableDef SplatDef(SplatVal, SplatRC);
1174           appendFoldCandidate(FoldList, RSUseMI, OpNo, SplatDef);
1175           continue;
1176         }
1177       }
1178 
1179       // TODO: Handle general compose
1180       if (RSUse->getSubReg() != RegSeqDstSubReg)
1181         continue;
1182 
1183       // FIXME: We should avoid recursing here. There should be a cleaner split
1184       // between the in-place mutations and adding to the fold list.
1185       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(RSUse), FoldList,
1186                   CopiesToReplace);
1187     }
1188 
1189     return;
1190   }
1191 
1192   if (tryToFoldACImm(OpToFold, UseMI, UseOpIdx, FoldList))
1193     return;
1194 
1195   if (frameIndexMayFold(*UseMI, UseOpIdx, OpToFold)) {
1196     // Verify that this is a stack access.
1197     // FIXME: Should probably use stack pseudos before frame lowering.
1198 
1199     if (TII->isMUBUF(*UseMI)) {
1200       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
1201           MFI->getScratchRSrcReg())
1202         return;
1203 
1204       // Ensure this is either relative to the current frame or the current
1205       // wave.
1206       MachineOperand &SOff =
1207           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
1208       if (!SOff.isImm() || SOff.getImm() != 0)
1209         return;
1210     }
1211 
1212     // A frame index will resolve to a positive constant, so it should always be
1213     // safe to fold the addressing mode, even pre-GFX9.
1214     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getFI());
1215 
1216     const unsigned Opc = UseMI->getOpcode();
1217     if (TII->isFLATScratch(*UseMI) &&
1218         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr) &&
1219         !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::saddr)) {
1220       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(Opc);
1221       UseMI->setDesc(TII->get(NewOpc));
1222     }
1223 
1224     return;
1225   }
1226 
1227   bool FoldingImmLike =
1228       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1229 
1230   if (FoldingImmLike && UseMI->isCopy()) {
1231     Register DestReg = UseMI->getOperand(0).getReg();
1232     Register SrcReg = UseMI->getOperand(1).getReg();
1233     assert(SrcReg.isVirtual());
1234 
1235     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
1236 
1237     // Don't fold into a copy to a physical register with the same class. Doing
1238     // so would interfere with the register coalescer's logic which would avoid
1239     // redundant initializations.
1240     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
1241       return;
1242 
1243     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
1244     if (!DestReg.isPhysical() && DestRC == &AMDGPU::AGPR_32RegClass) {
1245       std::optional<int64_t> UseImmVal = OpToFold.getEffectiveImmVal();
1246       if (UseImmVal && TII->isInlineConstant(
1247                            *UseImmVal, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
1248         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
1249         UseMI->getOperand(1).ChangeToImmediate(*UseImmVal);
1250         CopiesToReplace.push_back(UseMI);
1251         return;
1252       }
1253     }
1254 
1255     // Allow immediates COPYd into sgpr_lo16 to be further folded while
1256     // still being legal if not further folded
1257     if (DestRC == &AMDGPU::SGPR_LO16RegClass) {
1258       assert(ST->useRealTrue16Insts());
1259       MRI->setRegClass(DestReg, &AMDGPU::SGPR_32RegClass);
1260       DestRC = &AMDGPU::SGPR_32RegClass;
1261     }
1262 
1263     // In order to fold immediates into copies, we need to change the
1264     // copy to a MOV.
1265 
1266     unsigned MovOp = TII->getMovOpcode(DestRC);
1267     if (MovOp == AMDGPU::COPY)
1268       return;
1269 
1270     // Fold if the destination register class of the MOV instruction (ResRC)
1271     // is a superclass of (or equal to) the destination register class of the
1272     // COPY (DestRC). If this condition fails, folding would be illegal.
1273     const MCInstrDesc &MovDesc = TII->get(MovOp);
1274     assert(MovDesc.getNumDefs() > 0 && MovDesc.operands()[0].RegClass != -1);
1275     const TargetRegisterClass *ResRC =
1276         TRI->getRegClass(MovDesc.operands()[0].RegClass);
1277     if (!DestRC->hasSuperClassEq(ResRC))
1278       return;
1279 
1280     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
1281     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
1282     while (ImpOpI != ImpOpE) {
1283       MachineInstr::mop_iterator Tmp = ImpOpI;
1284       ImpOpI++;
1285       UseMI->removeOperand(UseMI->getOperandNo(Tmp));
1286     }
1287     UseMI->setDesc(TII->get(MovOp));
1288 
1289     if (MovOp == AMDGPU::V_MOV_B16_t16_e64) {
1290       const auto &SrcOp = UseMI->getOperand(UseOpIdx);
1291       MachineOperand NewSrcOp(SrcOp);
1292       MachineFunction *MF = UseMI->getParent()->getParent();
1293       UseMI->removeOperand(1);
1294       UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // src0_modifiers
1295       UseMI->addOperand(NewSrcOp);                          // src0
1296       UseMI->addOperand(*MF, MachineOperand::CreateImm(0)); // op_sel
1297       UseOpIdx = 2;
1298       UseOp = &UseMI->getOperand(UseOpIdx);
1299     }
1300     CopiesToReplace.push_back(UseMI);
1301   } else {
1302     if (UseMI->isCopy() && OpToFold.isReg() &&
1303         UseMI->getOperand(0).getReg().isVirtual() &&
1304         !UseMI->getOperand(1).getSubReg() &&
1305         OpToFold.DefMI->implicit_operands().empty()) {
1306       LLVM_DEBUG(dbgs() << "Folding " << OpToFold.OpToFold << "\n into "
1307                         << *UseMI);
1308       unsigned Size = TII->getOpSize(*UseMI, 1);
1309       Register UseReg = OpToFold.getReg();
1310       UseMI->getOperand(1).setReg(UseReg);
1311       unsigned SubRegIdx = OpToFold.getSubReg();
1312       // Hack to allow 32-bit SGPRs to be folded into True16 instructions
1313       // Remove this if 16-bit SGPRs (i.e. SGPR_LO16) are added to the
1314       // VS_16RegClass
1315       //
1316       // Excerpt from AMDGPUGenRegisterInfo.inc
1317       // NoSubRegister, //0
1318       // hi16, // 1
1319       // lo16, // 2
1320       // sub0, // 3
1321       // ...
1322       // sub1, // 11
1323       // sub1_hi16, // 12
1324       // sub1_lo16, // 13
1325       static_assert(AMDGPU::sub1_hi16 == 12, "Subregister layout has changed");
1326       if (Size == 2 && TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
1327           TRI->isSGPRReg(*MRI, UseReg)) {
1328         // Produce the 32 bit subregister index to which the 16-bit subregister
1329         // is aligned.
1330         if (SubRegIdx > AMDGPU::sub1) {
1331           LaneBitmask M = TRI->getSubRegIndexLaneMask(SubRegIdx);
1332           M |= M.getLane(M.getHighestLane() - 1);
1333           SmallVector<unsigned, 4> Indexes;
1334           TRI->getCoveringSubRegIndexes(TRI->getRegClassForReg(*MRI, UseReg), M,
1335                                         Indexes);
1336           assert(Indexes.size() == 1 && "Expected one 32-bit subreg to cover");
1337           SubRegIdx = Indexes[0];
1338           // 32-bit registers do not have a sub0 index
1339         } else if (TII->getOpSize(*UseMI, 1) == 4)
1340           SubRegIdx = 0;
1341         else
1342           SubRegIdx = AMDGPU::sub0;
1343       }
1344       UseMI->getOperand(1).setSubReg(SubRegIdx);
1345       UseMI->getOperand(1).setIsKill(false);
1346       CopiesToReplace.push_back(UseMI);
1347       OpToFold.OpToFold->setIsKill(false);
1348 
1349       // Remove kill flags as kills may now be out of order with uses.
1350       MRI->clearKillFlags(UseReg);
1351       if (foldCopyToAGPRRegSequence(UseMI))
1352         return;
1353     }
1354 
1355     unsigned UseOpc = UseMI->getOpcode();
1356     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
1357         (UseOpc == AMDGPU::V_READLANE_B32 &&
1358          (int)UseOpIdx ==
1359          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
1360       // %vgpr = V_MOV_B32 imm
1361       // %sgpr = V_READFIRSTLANE_B32 %vgpr
1362       // =>
1363       // %sgpr = S_MOV_B32 imm
1364       if (FoldingImmLike) {
1365         if (execMayBeModifiedBeforeUse(*MRI,
1366                                        UseMI->getOperand(UseOpIdx).getReg(),
1367                                        *OpToFold.DefMI, *UseMI))
1368           return;
1369 
1370         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
1371 
1372         if (OpToFold.isImm()) {
1373           UseMI->getOperand(1).ChangeToImmediate(
1374               *OpToFold.getEffectiveImmVal());
1375         } else if (OpToFold.isFI())
1376           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getFI());
1377         else {
1378           assert(OpToFold.isGlobal());
1379           UseMI->getOperand(1).ChangeToGA(OpToFold.OpToFold->getGlobal(),
1380                                           OpToFold.OpToFold->getOffset(),
1381                                           OpToFold.OpToFold->getTargetFlags());
1382         }
1383         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
1384         return;
1385       }
1386 
1387       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
1388         if (execMayBeModifiedBeforeUse(*MRI,
1389                                        UseMI->getOperand(UseOpIdx).getReg(),
1390                                        *OpToFold.DefMI, *UseMI))
1391           return;
1392 
1393         // %vgpr = COPY %sgpr0
1394         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
1395         // =>
1396         // %sgpr1 = COPY %sgpr0
1397         UseMI->setDesc(TII->get(AMDGPU::COPY));
1398         UseMI->getOperand(1).setReg(OpToFold.getReg());
1399         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
1400         UseMI->getOperand(1).setIsKill(false);
1401         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
1402         return;
1403       }
1404     }
1405 
1406     const MCInstrDesc &UseDesc = UseMI->getDesc();
1407 
1408     // Don't fold into target independent nodes.  Target independent opcodes
1409     // don't have defined register classes.
1410     if (UseDesc.isVariadic() || UseOp->isImplicit() ||
1411         UseDesc.operands()[UseOpIdx].RegClass == -1)
1412       return;
1413   }
1414 
1415   if (!FoldingImmLike) {
1416     if (OpToFold.isReg() && ST->needsAlignedVGPRs()) {
1417       // Don't fold if OpToFold doesn't hold an aligned register.
1418       const TargetRegisterClass *RC =
1419           TRI->getRegClassForReg(*MRI, OpToFold.getReg());
1420       assert(RC);
1421       if (TRI->hasVectorRegisters(RC) && OpToFold.getSubReg()) {
1422         unsigned SubReg = OpToFold.getSubReg();
1423         if (const TargetRegisterClass *SubRC =
1424                 TRI->getSubRegisterClass(RC, SubReg))
1425           RC = SubRC;
1426       }
1427 
1428       if (!RC || !TRI->isProperlyAlignedRC(*RC))
1429         return;
1430     }
1431 
1432     tryAddToFoldList(FoldList, UseMI, UseOpIdx, OpToFold);
1433 
1434     // FIXME: We could try to change the instruction from 64-bit to 32-bit
1435     // to enable more folding opportunities.  The shrink operands pass
1436     // already does this.
1437     return;
1438   }
1439 
1440   tryAddToFoldList(FoldList, UseMI, UseOpIdx, OpToFold);
1441 }
1442 
1443 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
1444                                   uint32_t LHS, uint32_t RHS) {
1445   switch (Opcode) {
1446   case AMDGPU::V_AND_B32_e64:
1447   case AMDGPU::V_AND_B32_e32:
1448   case AMDGPU::S_AND_B32:
1449     Result = LHS & RHS;
1450     return true;
1451   case AMDGPU::V_OR_B32_e64:
1452   case AMDGPU::V_OR_B32_e32:
1453   case AMDGPU::S_OR_B32:
1454     Result = LHS | RHS;
1455     return true;
1456   case AMDGPU::V_XOR_B32_e64:
1457   case AMDGPU::V_XOR_B32_e32:
1458   case AMDGPU::S_XOR_B32:
1459     Result = LHS ^ RHS;
1460     return true;
1461   case AMDGPU::S_XNOR_B32:
1462     Result = ~(LHS ^ RHS);
1463     return true;
1464   case AMDGPU::S_NAND_B32:
1465     Result = ~(LHS & RHS);
1466     return true;
1467   case AMDGPU::S_NOR_B32:
1468     Result = ~(LHS | RHS);
1469     return true;
1470   case AMDGPU::S_ANDN2_B32:
1471     Result = LHS & ~RHS;
1472     return true;
1473   case AMDGPU::S_ORN2_B32:
1474     Result = LHS | ~RHS;
1475     return true;
1476   case AMDGPU::V_LSHL_B32_e64:
1477   case AMDGPU::V_LSHL_B32_e32:
1478   case AMDGPU::S_LSHL_B32:
1479     // The instruction ignores the high bits for out of bounds shifts.
1480     Result = LHS << (RHS & 31);
1481     return true;
1482   case AMDGPU::V_LSHLREV_B32_e64:
1483   case AMDGPU::V_LSHLREV_B32_e32:
1484     Result = RHS << (LHS & 31);
1485     return true;
1486   case AMDGPU::V_LSHR_B32_e64:
1487   case AMDGPU::V_LSHR_B32_e32:
1488   case AMDGPU::S_LSHR_B32:
1489     Result = LHS >> (RHS & 31);
1490     return true;
1491   case AMDGPU::V_LSHRREV_B32_e64:
1492   case AMDGPU::V_LSHRREV_B32_e32:
1493     Result = RHS >> (LHS & 31);
1494     return true;
1495   case AMDGPU::V_ASHR_I32_e64:
1496   case AMDGPU::V_ASHR_I32_e32:
1497   case AMDGPU::S_ASHR_I32:
1498     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
1499     return true;
1500   case AMDGPU::V_ASHRREV_I32_e64:
1501   case AMDGPU::V_ASHRREV_I32_e32:
1502     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1503     return true;
1504   default:
1505     return false;
1506   }
1507 }
1508 
1509 static unsigned getMovOpc(bool IsScalar) {
1510   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1511 }
1512 
1513 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1514   MI.setDesc(NewDesc);
1515 
1516   // Remove any leftover implicit operands from mutating the instruction. e.g.
1517   // if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1518   // anymore.
1519   const MCInstrDesc &Desc = MI.getDesc();
1520   unsigned NumOps = Desc.getNumOperands() + Desc.implicit_uses().size() +
1521                     Desc.implicit_defs().size();
1522 
1523   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1524     MI.removeOperand(I);
1525 }
1526 
1527 std::optional<int64_t>
1528 SIFoldOperandsImpl::getImmOrMaterializedImm(MachineOperand &Op) const {
1529   if (Op.isImm())
1530     return Op.getImm();
1531 
1532   if (!Op.isReg() || !Op.getReg().isVirtual())
1533     return std::nullopt;
1534 
1535   const MachineInstr *Def = MRI->getVRegDef(Op.getReg());
1536   if (Def && Def->isMoveImmediate()) {
1537     const MachineOperand &ImmSrc = Def->getOperand(1);
1538     if (ImmSrc.isImm())
1539       return TII->extractSubregFromImm(ImmSrc.getImm(), Op.getSubReg());
1540   }
1541 
1542   return std::nullopt;
1543 }
1544 
1545 // Try to simplify operations with a constant that may appear after instruction
1546 // selection.
1547 // TODO: See if a frame index with a fixed offset can fold.
1548 bool SIFoldOperandsImpl::tryConstantFoldOp(MachineInstr *MI) const {
1549   if (!MI->allImplicitDefsAreDead())
1550     return false;
1551 
1552   unsigned Opc = MI->getOpcode();
1553 
1554   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1555   if (Src0Idx == -1)
1556     return false;
1557 
1558   MachineOperand *Src0 = &MI->getOperand(Src0Idx);
1559   std::optional<int64_t> Src0Imm = getImmOrMaterializedImm(*Src0);
1560 
1561   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1562        Opc == AMDGPU::S_NOT_B32) &&
1563       Src0Imm) {
1564     MI->getOperand(1).ChangeToImmediate(~*Src0Imm);
1565     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1566     return true;
1567   }
1568 
1569   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1570   if (Src1Idx == -1)
1571     return false;
1572 
1573   MachineOperand *Src1 = &MI->getOperand(Src1Idx);
1574   std::optional<int64_t> Src1Imm = getImmOrMaterializedImm(*Src1);
1575 
1576   if (!Src0Imm && !Src1Imm)
1577     return false;
1578 
1579   // and k0, k1 -> v_mov_b32 (k0 & k1)
1580   // or k0, k1 -> v_mov_b32 (k0 | k1)
1581   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1582   if (Src0Imm && Src1Imm) {
1583     int32_t NewImm;
1584     if (!evalBinaryInstruction(Opc, NewImm, *Src0Imm, *Src1Imm))
1585       return false;
1586 
1587     bool IsSGPR = TRI->isSGPRReg(*MRI, MI->getOperand(0).getReg());
1588 
1589     // Be careful to change the right operand, src0 may belong to a different
1590     // instruction.
1591     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1592     MI->removeOperand(Src1Idx);
1593     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1594     return true;
1595   }
1596 
1597   if (!MI->isCommutable())
1598     return false;
1599 
1600   if (Src0Imm && !Src1Imm) {
1601     std::swap(Src0, Src1);
1602     std::swap(Src0Idx, Src1Idx);
1603     std::swap(Src0Imm, Src1Imm);
1604   }
1605 
1606   int32_t Src1Val = static_cast<int32_t>(*Src1Imm);
1607   if (Opc == AMDGPU::V_OR_B32_e64 ||
1608       Opc == AMDGPU::V_OR_B32_e32 ||
1609       Opc == AMDGPU::S_OR_B32) {
1610     if (Src1Val == 0) {
1611       // y = or x, 0 => y = copy x
1612       MI->removeOperand(Src1Idx);
1613       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1614     } else if (Src1Val == -1) {
1615       // y = or x, -1 => y = v_mov_b32 -1
1616       MI->removeOperand(Src1Idx);
1617       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1618     } else
1619       return false;
1620 
1621     return true;
1622   }
1623 
1624   if (Opc == AMDGPU::V_AND_B32_e64 || Opc == AMDGPU::V_AND_B32_e32 ||
1625       Opc == AMDGPU::S_AND_B32) {
1626     if (Src1Val == 0) {
1627       // y = and x, 0 => y = v_mov_b32 0
1628       MI->removeOperand(Src0Idx);
1629       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1630     } else if (Src1Val == -1) {
1631       // y = and x, -1 => y = copy x
1632       MI->removeOperand(Src1Idx);
1633       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1634     } else
1635       return false;
1636 
1637     return true;
1638   }
1639 
1640   if (Opc == AMDGPU::V_XOR_B32_e64 || Opc == AMDGPU::V_XOR_B32_e32 ||
1641       Opc == AMDGPU::S_XOR_B32) {
1642     if (Src1Val == 0) {
1643       // y = xor x, 0 => y = copy x
1644       MI->removeOperand(Src1Idx);
1645       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1646       return true;
1647     }
1648   }
1649 
1650   return false;
1651 }
1652 
1653 // Try to fold an instruction into a simpler one
1654 bool SIFoldOperandsImpl::tryFoldCndMask(MachineInstr &MI) const {
1655   unsigned Opc = MI.getOpcode();
1656   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
1657       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
1658     return false;
1659 
1660   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1661   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1662   if (!Src1->isIdenticalTo(*Src0)) {
1663     std::optional<int64_t> Src1Imm = getImmOrMaterializedImm(*Src1);
1664     if (!Src1Imm)
1665       return false;
1666 
1667     std::optional<int64_t> Src0Imm = getImmOrMaterializedImm(*Src0);
1668     if (!Src0Imm || *Src0Imm != *Src1Imm)
1669       return false;
1670   }
1671 
1672   int Src1ModIdx =
1673       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1674   int Src0ModIdx =
1675       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1676   if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) ||
1677       (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0))
1678     return false;
1679 
1680   LLVM_DEBUG(dbgs() << "Folded " << MI << " into ");
1681   auto &NewDesc =
1682       TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1683   int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1684   if (Src2Idx != -1)
1685     MI.removeOperand(Src2Idx);
1686   MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1687   if (Src1ModIdx != -1)
1688     MI.removeOperand(Src1ModIdx);
1689   if (Src0ModIdx != -1)
1690     MI.removeOperand(Src0ModIdx);
1691   mutateCopyOp(MI, NewDesc);
1692   LLVM_DEBUG(dbgs() << MI);
1693   return true;
1694 }
1695 
1696 bool SIFoldOperandsImpl::tryFoldZeroHighBits(MachineInstr &MI) const {
1697   if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 &&
1698       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
1699     return false;
1700 
1701   std::optional<int64_t> Src0Imm = getImmOrMaterializedImm(MI.getOperand(1));
1702   if (!Src0Imm || *Src0Imm != 0xffff || !MI.getOperand(2).isReg())
1703     return false;
1704 
1705   Register Src1 = MI.getOperand(2).getReg();
1706   MachineInstr *SrcDef = MRI->getVRegDef(Src1);
1707   if (!ST->zeroesHigh16BitsOfDest(SrcDef->getOpcode()))
1708     return false;
1709 
1710   Register Dst = MI.getOperand(0).getReg();
1711   MRI->replaceRegWith(Dst, Src1);
1712   if (!MI.getOperand(2).isKill())
1713     MRI->clearKillFlags(Src1);
1714   MI.eraseFromParent();
1715   return true;
1716 }
1717 
1718 bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
1719                                          const FoldableDef &OpToFold) const {
1720   // We need mutate the operands of new mov instructions to add implicit
1721   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1722   // this.
1723   SmallVector<MachineInstr *, 4> CopiesToReplace;
1724   SmallVector<FoldCandidate, 4> FoldList;
1725   MachineOperand &Dst = MI.getOperand(0);
1726   bool Changed = false;
1727 
1728   if (OpToFold.isImm()) {
1729     for (auto &UseMI :
1730          make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) {
1731       // Folding the immediate may reveal operations that can be constant
1732       // folded or replaced with a copy. This can happen for example after
1733       // frame indices are lowered to constants or from splitting 64-bit
1734       // constants.
1735       //
1736       // We may also encounter cases where one or both operands are
1737       // immediates materialized into a register, which would ordinarily not
1738       // be folded due to multiple uses or operand constraints.
1739       if (tryConstantFoldOp(&UseMI)) {
1740         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
1741         Changed = true;
1742       }
1743     }
1744   }
1745 
1746   SmallVector<MachineOperand *, 4> UsesToProcess(
1747       llvm::make_pointer_range(MRI->use_nodbg_operands(Dst.getReg())));
1748   for (auto *U : UsesToProcess) {
1749     MachineInstr *UseMI = U->getParent();
1750 
1751     FoldableDef SubOpToFold = OpToFold.getWithSubReg(*TRI, U->getSubReg());
1752     foldOperand(SubOpToFold, UseMI, UseMI->getOperandNo(U), FoldList,
1753                 CopiesToReplace);
1754   }
1755 
1756   if (CopiesToReplace.empty() && FoldList.empty())
1757     return Changed;
1758 
1759   MachineFunction *MF = MI.getParent()->getParent();
1760   // Make sure we add EXEC uses to any new v_mov instructions created.
1761   for (MachineInstr *Copy : CopiesToReplace)
1762     Copy->addImplicitDefUseOperands(*MF);
1763 
1764   for (FoldCandidate &Fold : FoldList) {
1765     assert(!Fold.isReg() || Fold.Def.OpToFold);
1766     if (Fold.isReg() && Fold.getReg().isVirtual()) {
1767       Register Reg = Fold.getReg();
1768       const MachineInstr *DefMI = Fold.Def.DefMI;
1769       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1770           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1771         continue;
1772     }
1773     if (updateOperand(Fold)) {
1774       // Clear kill flags.
1775       if (Fold.isReg()) {
1776         assert(Fold.Def.OpToFold && Fold.isReg());
1777         // FIXME: Probably shouldn't bother trying to fold if not an
1778         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1779         // copies.
1780         MRI->clearKillFlags(Fold.getReg());
1781       }
1782       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1783                         << static_cast<int>(Fold.UseOpNo) << " of "
1784                         << *Fold.UseMI);
1785 
1786       if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
1787         LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
1788         Changed = true;
1789       }
1790 
1791     } else if (Fold.Commuted) {
1792       // Restoring instruction's original operand order if fold has failed.
1793       TII->commuteInstruction(*Fold.UseMI, false);
1794     }
1795   }
1796   return true;
1797 }
1798 
1799 /// Fold %agpr = COPY (REG_SEQUENCE x_MOV_B32, ...) into REG_SEQUENCE
1800 ///  (V_ACCVGPR_WRITE_B32_e64) ... depending on the reg_sequence input values.
1801 bool SIFoldOperandsImpl::foldCopyToAGPRRegSequence(MachineInstr *CopyMI) const {
1802   // It is very tricky to store a value into an AGPR. v_accvgpr_write_b32 can
1803   // only accept VGPR or inline immediate. Recreate a reg_sequence with its
1804   // initializers right here, so we will rematerialize immediates and avoid
1805   // copies via different reg classes.
1806   const TargetRegisterClass *DefRC =
1807       MRI->getRegClass(CopyMI->getOperand(0).getReg());
1808   if (!TRI->isAGPRClass(DefRC))
1809     return false;
1810 
1811   Register UseReg = CopyMI->getOperand(1).getReg();
1812   MachineInstr *RegSeq = MRI->getVRegDef(UseReg);
1813   if (!RegSeq || !RegSeq->isRegSequence())
1814     return false;
1815 
1816   const DebugLoc &DL = CopyMI->getDebugLoc();
1817   MachineBasicBlock &MBB = *CopyMI->getParent();
1818 
1819   MachineInstrBuilder B(*MBB.getParent(), CopyMI);
1820   DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
1821 
1822   const TargetRegisterClass *UseRC =
1823       MRI->getRegClass(CopyMI->getOperand(1).getReg());
1824 
1825   // Value, subregindex for new REG_SEQUENCE
1826   SmallVector<std::pair<MachineOperand *, unsigned>, 32> NewDefs;
1827 
1828   unsigned NumRegSeqOperands = RegSeq->getNumOperands();
1829   unsigned NumFoldable = 0;
1830 
1831   for (unsigned I = 1; I != NumRegSeqOperands; I += 2) {
1832     MachineOperand &RegOp = RegSeq->getOperand(I);
1833     unsigned SubRegIdx = RegSeq->getOperand(I + 1).getImm();
1834 
1835     if (RegOp.getSubReg()) {
1836       // TODO: Handle subregister compose
1837       NewDefs.emplace_back(&RegOp, SubRegIdx);
1838       continue;
1839     }
1840 
1841     MachineOperand *Lookup = lookUpCopyChain(*TII, *MRI, RegOp.getReg());
1842     if (!Lookup)
1843       Lookup = &RegOp;
1844 
1845     if (Lookup->isImm()) {
1846       // Check if this is an agpr_32 subregister.
1847       const TargetRegisterClass *DestSuperRC = TRI->getMatchingSuperRegClass(
1848           DefRC, &AMDGPU::AGPR_32RegClass, SubRegIdx);
1849       if (DestSuperRC &&
1850           TII->isInlineConstant(*Lookup, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
1851         ++NumFoldable;
1852         NewDefs.emplace_back(Lookup, SubRegIdx);
1853         continue;
1854       }
1855     }
1856 
1857     const TargetRegisterClass *InputRC =
1858         Lookup->isReg() ? MRI->getRegClass(Lookup->getReg())
1859                         : MRI->getRegClass(RegOp.getReg());
1860 
1861     // TODO: Account for Lookup->getSubReg()
1862 
1863     // If we can't find a matching super class, this is an SGPR->AGPR or
1864     // VGPR->AGPR subreg copy (or something constant-like we have to materialize
1865     // in the AGPR). We can't directly copy from SGPR to AGPR on gfx908, so we
1866     // want to rewrite to copy to an intermediate VGPR class.
1867     const TargetRegisterClass *MatchRC =
1868         TRI->getMatchingSuperRegClass(DefRC, InputRC, SubRegIdx);
1869     if (!MatchRC) {
1870       ++NumFoldable;
1871       NewDefs.emplace_back(&RegOp, SubRegIdx);
1872       continue;
1873     }
1874 
1875     NewDefs.emplace_back(&RegOp, SubRegIdx);
1876   }
1877 
1878   // Do not clone a reg_sequence and merely change the result register class.
1879   if (NumFoldable == 0)
1880     return false;
1881 
1882   CopyMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
1883   for (unsigned I = CopyMI->getNumOperands() - 1; I > 0; --I)
1884     CopyMI->removeOperand(I);
1885 
1886   for (auto [Def, DestSubIdx] : NewDefs) {
1887     if (!Def->isReg()) {
1888       // TODO: Should we use single write for each repeated value like in
1889       // register case?
1890       Register Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
1891       BuildMI(MBB, CopyMI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp)
1892           .add(*Def);
1893       B.addReg(Tmp);
1894     } else {
1895       TargetInstrInfo::RegSubRegPair Src = getRegSubRegPair(*Def);
1896       Def->setIsKill(false);
1897 
1898       Register &VGPRCopy = VGPRCopies[Src];
1899       if (!VGPRCopy) {
1900         const TargetRegisterClass *VGPRUseSubRC =
1901             TRI->getSubRegisterClass(UseRC, DestSubIdx);
1902 
1903         // We cannot build a reg_sequence out of the same registers, they
1904         // must be copied. Better do it here before copyPhysReg() created
1905         // several reads to do the AGPR->VGPR->AGPR copy.
1906 
1907         // Direct copy from SGPR to AGPR is not possible on gfx908. To avoid
1908         // creation of exploded copies SGPR->VGPR->AGPR in the copyPhysReg()
1909         // later, create a copy here and track if we already have such a copy.
1910         if (TRI->getSubRegisterClass(MRI->getRegClass(Src.Reg), Src.SubReg) !=
1911             VGPRUseSubRC) {
1912           VGPRCopy = MRI->createVirtualRegister(VGPRUseSubRC);
1913           BuildMI(MBB, CopyMI, DL, TII->get(AMDGPU::COPY), VGPRCopy).add(*Def);
1914           B.addReg(VGPRCopy);
1915         } else {
1916           // If it is already a VGPR, do not copy the register.
1917           B.add(*Def);
1918         }
1919       } else {
1920         B.addReg(VGPRCopy);
1921       }
1922     }
1923 
1924     B.addImm(DestSubIdx);
1925   }
1926 
1927   LLVM_DEBUG(dbgs() << "Folded " << *CopyMI);
1928   return true;
1929 }
1930 
1931 bool SIFoldOperandsImpl::tryFoldFoldableCopy(
1932     MachineInstr &MI, MachineOperand *&CurrentKnownM0Val) const {
1933   Register DstReg = MI.getOperand(0).getReg();
1934   // Specially track simple redefs of m0 to the same value in a block, so we
1935   // can erase the later ones.
1936   if (DstReg == AMDGPU::M0) {
1937     MachineOperand &NewM0Val = MI.getOperand(1);
1938     if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1939       MI.eraseFromParent();
1940       return true;
1941     }
1942 
1943     // We aren't tracking other physical registers
1944     CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical())
1945                             ? nullptr
1946                             : &NewM0Val;
1947     return false;
1948   }
1949 
1950   MachineOperand *OpToFoldPtr;
1951   if (MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e64) {
1952     // Folding when any src_modifiers are non-zero is unsupported
1953     if (TII->hasAnyModifiersSet(MI))
1954       return false;
1955     OpToFoldPtr = &MI.getOperand(2);
1956   } else
1957     OpToFoldPtr = &MI.getOperand(1);
1958   MachineOperand &OpToFold = *OpToFoldPtr;
1959   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1960 
1961   // FIXME: We could also be folding things like TargetIndexes.
1962   if (!FoldingImm && !OpToFold.isReg())
1963     return false;
1964 
1965   if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1966     return false;
1967 
1968   // Prevent folding operands backwards in the function. For example,
1969   // the COPY opcode must not be replaced by 1 in this example:
1970   //
1971   //    %3 = COPY %vgpr0; VGPR_32:%3
1972   //    ...
1973   //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1974   if (!DstReg.isVirtual())
1975     return false;
1976 
1977   const TargetRegisterClass *DstRC =
1978       MRI->getRegClass(MI.getOperand(0).getReg());
1979 
1980   // True16: Fix malformed 16-bit sgpr COPY produced by peephole-opt
1981   // Can remove this code if proper 16-bit SGPRs are implemented
1982   // Example: Pre-peephole-opt
1983   // %29:sgpr_lo16 = COPY %16.lo16:sreg_32
1984   // %32:sreg_32 = COPY %29:sgpr_lo16
1985   // %30:sreg_32 = S_PACK_LL_B32_B16 killed %31:sreg_32, killed %32:sreg_32
1986   // Post-peephole-opt and DCE
1987   // %32:sreg_32 = COPY %16.lo16:sreg_32
1988   // %30:sreg_32 = S_PACK_LL_B32_B16 killed %31:sreg_32, killed %32:sreg_32
1989   // After this transform
1990   // %32:sreg_32 = COPY %16:sreg_32
1991   // %30:sreg_32 = S_PACK_LL_B32_B16 killed %31:sreg_32, killed %32:sreg_32
1992   // After the fold operands pass
1993   // %30:sreg_32 = S_PACK_LL_B32_B16 killed %31:sreg_32, killed %16:sreg_32
1994   if (MI.getOpcode() == AMDGPU::COPY && OpToFold.isReg() &&
1995       OpToFold.getSubReg()) {
1996     if (DstRC == &AMDGPU::SReg_32RegClass &&
1997         DstRC == MRI->getRegClass(OpToFold.getReg())) {
1998       assert(OpToFold.getSubReg() == AMDGPU::lo16);
1999       OpToFold.setSubReg(0);
2000     }
2001   }
2002 
2003   // Fold copy to AGPR through reg_sequence
2004   // TODO: Handle with subregister extract
2005   if (OpToFold.isReg() && MI.isCopy() && !MI.getOperand(1).getSubReg()) {
2006     if (foldCopyToAGPRRegSequence(&MI))
2007       return true;
2008   }
2009 
2010   FoldableDef Def(OpToFold, DstRC);
2011   bool Changed = foldInstOperand(MI, Def);
2012 
2013   // If we managed to fold all uses of this copy then we might as well
2014   // delete it now.
2015   // The only reason we need to follow chains of copies here is that
2016   // tryFoldRegSequence looks forward through copies before folding a
2017   // REG_SEQUENCE into its eventual users.
2018   auto *InstToErase = &MI;
2019   while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
2020     auto &SrcOp = InstToErase->getOperand(1);
2021     auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register();
2022     InstToErase->eraseFromParent();
2023     Changed = true;
2024     InstToErase = nullptr;
2025     if (!SrcReg || SrcReg.isPhysical())
2026       break;
2027     InstToErase = MRI->getVRegDef(SrcReg);
2028     if (!InstToErase || !TII->isFoldableCopy(*InstToErase))
2029       break;
2030   }
2031 
2032   if (InstToErase && InstToErase->isRegSequence() &&
2033       MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
2034     InstToErase->eraseFromParent();
2035     Changed = true;
2036   }
2037 
2038   if (Changed)
2039     return true;
2040 
2041   // Run this after foldInstOperand to avoid turning scalar additions into
2042   // vector additions when the result scalar result could just be folded into
2043   // the user(s).
2044   return OpToFold.isReg() &&
2045          foldCopyToVGPROfScalarAddOfFrameIndex(DstReg, OpToFold.getReg(), MI);
2046 }
2047 
2048 // Clamp patterns are canonically selected to v_max_* instructions, so only
2049 // handle them.
2050 const MachineOperand *
2051 SIFoldOperandsImpl::isClamp(const MachineInstr &MI) const {
2052   unsigned Op = MI.getOpcode();
2053   switch (Op) {
2054   case AMDGPU::V_MAX_F32_e64:
2055   case AMDGPU::V_MAX_F16_e64:
2056   case AMDGPU::V_MAX_F16_t16_e64:
2057   case AMDGPU::V_MAX_F16_fake16_e64:
2058   case AMDGPU::V_MAX_F64_e64:
2059   case AMDGPU::V_MAX_NUM_F64_e64:
2060   case AMDGPU::V_PK_MAX_F16: {
2061     if (MI.mayRaiseFPException())
2062       return nullptr;
2063 
2064     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
2065       return nullptr;
2066 
2067     // Make sure sources are identical.
2068     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
2069     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
2070     if (!Src0->isReg() || !Src1->isReg() ||
2071         Src0->getReg() != Src1->getReg() ||
2072         Src0->getSubReg() != Src1->getSubReg() ||
2073         Src0->getSubReg() != AMDGPU::NoSubRegister)
2074       return nullptr;
2075 
2076     // Can't fold up if we have modifiers.
2077     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
2078       return nullptr;
2079 
2080     unsigned Src0Mods
2081       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
2082     unsigned Src1Mods
2083       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
2084 
2085     // Having a 0 op_sel_hi would require swizzling the output in the source
2086     // instruction, which we can't do.
2087     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
2088                                                       : 0u;
2089     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
2090       return nullptr;
2091     return Src0;
2092   }
2093   default:
2094     return nullptr;
2095   }
2096 }
2097 
2098 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
2099 bool SIFoldOperandsImpl::tryFoldClamp(MachineInstr &MI) {
2100   const MachineOperand *ClampSrc = isClamp(MI);
2101   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
2102     return false;
2103 
2104   if (!ClampSrc->getReg().isVirtual())
2105     return false;
2106 
2107   // Look through COPY. COPY only observed with True16.
2108   Register DefSrcReg = TRI->lookThruCopyLike(ClampSrc->getReg(), MRI);
2109   MachineInstr *Def =
2110       MRI->getVRegDef(DefSrcReg.isVirtual() ? DefSrcReg : ClampSrc->getReg());
2111 
2112   // The type of clamp must be compatible.
2113   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
2114     return false;
2115 
2116   if (Def->mayRaiseFPException())
2117     return false;
2118 
2119   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
2120   if (!DefClamp)
2121     return false;
2122 
2123   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def);
2124 
2125   // Clamp is applied after omod, so it is OK if omod is set.
2126   DefClamp->setImm(1);
2127 
2128   Register DefReg = Def->getOperand(0).getReg();
2129   Register MIDstReg = MI.getOperand(0).getReg();
2130   if (TRI->isSGPRReg(*MRI, DefReg)) {
2131     // Pseudo scalar instructions have a SGPR for dst and clamp is a v_max*
2132     // instruction with a VGPR dst.
2133     BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
2134             MIDstReg)
2135         .addReg(DefReg);
2136   } else {
2137     MRI->replaceRegWith(MIDstReg, DefReg);
2138   }
2139   MI.eraseFromParent();
2140 
2141   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
2142   // instruction, so we might as well convert it to the more flexible VOP3-only
2143   // mad/fma form.
2144   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
2145     Def->eraseFromParent();
2146 
2147   return true;
2148 }
2149 
2150 static int getOModValue(unsigned Opc, int64_t Val) {
2151   switch (Opc) {
2152   case AMDGPU::V_MUL_F64_e64:
2153   case AMDGPU::V_MUL_F64_pseudo_e64: {
2154     switch (Val) {
2155     case 0x3fe0000000000000: // 0.5
2156       return SIOutMods::DIV2;
2157     case 0x4000000000000000: // 2.0
2158       return SIOutMods::MUL2;
2159     case 0x4010000000000000: // 4.0
2160       return SIOutMods::MUL4;
2161     default:
2162       return SIOutMods::NONE;
2163     }
2164   }
2165   case AMDGPU::V_MUL_F32_e64: {
2166     switch (static_cast<uint32_t>(Val)) {
2167     case 0x3f000000: // 0.5
2168       return SIOutMods::DIV2;
2169     case 0x40000000: // 2.0
2170       return SIOutMods::MUL2;
2171     case 0x40800000: // 4.0
2172       return SIOutMods::MUL4;
2173     default:
2174       return SIOutMods::NONE;
2175     }
2176   }
2177   case AMDGPU::V_MUL_F16_e64:
2178   case AMDGPU::V_MUL_F16_t16_e64:
2179   case AMDGPU::V_MUL_F16_fake16_e64: {
2180     switch (static_cast<uint16_t>(Val)) {
2181     case 0x3800: // 0.5
2182       return SIOutMods::DIV2;
2183     case 0x4000: // 2.0
2184       return SIOutMods::MUL2;
2185     case 0x4400: // 4.0
2186       return SIOutMods::MUL4;
2187     default:
2188       return SIOutMods::NONE;
2189     }
2190   }
2191   default:
2192     llvm_unreachable("invalid mul opcode");
2193   }
2194 }
2195 
2196 // FIXME: Does this really not support denormals with f16?
2197 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
2198 // handled, so will anything other than that break?
2199 std::pair<const MachineOperand *, int>
2200 SIFoldOperandsImpl::isOMod(const MachineInstr &MI) const {
2201   unsigned Op = MI.getOpcode();
2202   switch (Op) {
2203   case AMDGPU::V_MUL_F64_e64:
2204   case AMDGPU::V_MUL_F64_pseudo_e64:
2205   case AMDGPU::V_MUL_F32_e64:
2206   case AMDGPU::V_MUL_F16_t16_e64:
2207   case AMDGPU::V_MUL_F16_fake16_e64:
2208   case AMDGPU::V_MUL_F16_e64: {
2209     // If output denormals are enabled, omod is ignored.
2210     if ((Op == AMDGPU::V_MUL_F32_e64 &&
2211          MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) ||
2212         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F64_pseudo_e64 ||
2213           Op == AMDGPU::V_MUL_F16_e64 || Op == AMDGPU::V_MUL_F16_t16_e64 ||
2214           Op == AMDGPU::V_MUL_F16_fake16_e64) &&
2215          MFI->getMode().FP64FP16Denormals.Output !=
2216              DenormalMode::PreserveSign) ||
2217         MI.mayRaiseFPException())
2218       return std::pair(nullptr, SIOutMods::NONE);
2219 
2220     const MachineOperand *RegOp = nullptr;
2221     const MachineOperand *ImmOp = nullptr;
2222     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
2223     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
2224     if (Src0->isImm()) {
2225       ImmOp = Src0;
2226       RegOp = Src1;
2227     } else if (Src1->isImm()) {
2228       ImmOp = Src1;
2229       RegOp = Src0;
2230     } else
2231       return std::pair(nullptr, SIOutMods::NONE);
2232 
2233     int OMod = getOModValue(Op, ImmOp->getImm());
2234     if (OMod == SIOutMods::NONE ||
2235         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
2236         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
2237         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
2238         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
2239       return std::pair(nullptr, SIOutMods::NONE);
2240 
2241     return std::pair(RegOp, OMod);
2242   }
2243   case AMDGPU::V_ADD_F64_e64:
2244   case AMDGPU::V_ADD_F64_pseudo_e64:
2245   case AMDGPU::V_ADD_F32_e64:
2246   case AMDGPU::V_ADD_F16_e64:
2247   case AMDGPU::V_ADD_F16_t16_e64:
2248   case AMDGPU::V_ADD_F16_fake16_e64: {
2249     // If output denormals are enabled, omod is ignored.
2250     if ((Op == AMDGPU::V_ADD_F32_e64 &&
2251          MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) ||
2252         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F64_pseudo_e64 ||
2253           Op == AMDGPU::V_ADD_F16_e64 || Op == AMDGPU::V_ADD_F16_t16_e64 ||
2254           Op == AMDGPU::V_ADD_F16_fake16_e64) &&
2255          MFI->getMode().FP64FP16Denormals.Output != DenormalMode::PreserveSign))
2256       return std::pair(nullptr, SIOutMods::NONE);
2257 
2258     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
2259     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
2260     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
2261 
2262     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
2263         Src0->getSubReg() == Src1->getSubReg() &&
2264         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
2265         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
2266         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
2267         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
2268       return std::pair(Src0, SIOutMods::MUL2);
2269 
2270     return std::pair(nullptr, SIOutMods::NONE);
2271   }
2272   default:
2273     return std::pair(nullptr, SIOutMods::NONE);
2274   }
2275 }
2276 
2277 // FIXME: Does this need to check IEEE bit on function?
2278 bool SIFoldOperandsImpl::tryFoldOMod(MachineInstr &MI) {
2279   const MachineOperand *RegOp;
2280   int OMod;
2281   std::tie(RegOp, OMod) = isOMod(MI);
2282   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
2283       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
2284       !MRI->hasOneNonDBGUser(RegOp->getReg()))
2285     return false;
2286 
2287   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
2288   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
2289   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
2290     return false;
2291 
2292   if (Def->mayRaiseFPException())
2293     return false;
2294 
2295   // Clamp is applied after omod. If the source already has clamp set, don't
2296   // fold it.
2297   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
2298     return false;
2299 
2300   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def);
2301 
2302   DefOMod->setImm(OMod);
2303   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
2304   // Kill flags can be wrong if we replaced a def inside a loop with a def
2305   // outside the loop.
2306   MRI->clearKillFlags(Def->getOperand(0).getReg());
2307   MI.eraseFromParent();
2308 
2309   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
2310   // instruction, so we might as well convert it to the more flexible VOP3-only
2311   // mad/fma form.
2312   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
2313     Def->eraseFromParent();
2314 
2315   return true;
2316 }
2317 
2318 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
2319 // instruction which can take an agpr. So far that means a store.
2320 bool SIFoldOperandsImpl::tryFoldRegSequence(MachineInstr &MI) {
2321   assert(MI.isRegSequence());
2322   auto Reg = MI.getOperand(0).getReg();
2323 
2324   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
2325       !MRI->hasOneNonDBGUse(Reg))
2326     return false;
2327 
2328   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
2329   if (!getRegSeqInit(Defs, Reg))
2330     return false;
2331 
2332   for (auto &[Op, SubIdx] : Defs) {
2333     if (!Op->isReg())
2334       return false;
2335     if (TRI->isAGPR(*MRI, Op->getReg()))
2336       continue;
2337     // Maybe this is a COPY from AREG
2338     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
2339     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
2340       return false;
2341     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
2342       return false;
2343   }
2344 
2345   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
2346   MachineInstr *UseMI = Op->getParent();
2347   while (UseMI->isCopy() && !Op->getSubReg()) {
2348     Reg = UseMI->getOperand(0).getReg();
2349     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
2350       return false;
2351     Op = &*MRI->use_nodbg_begin(Reg);
2352     UseMI = Op->getParent();
2353   }
2354 
2355   if (Op->getSubReg())
2356     return false;
2357 
2358   unsigned OpIdx = Op - &UseMI->getOperand(0);
2359   const MCInstrDesc &InstDesc = UseMI->getDesc();
2360   const TargetRegisterClass *OpRC =
2361       TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF());
2362   if (!OpRC || !TRI->isVectorSuperClass(OpRC))
2363     return false;
2364 
2365   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
2366   auto Dst = MRI->createVirtualRegister(NewDstRC);
2367   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
2368                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
2369 
2370   for (auto &[Def, SubIdx] : Defs) {
2371     Def->setIsKill(false);
2372     if (TRI->isAGPR(*MRI, Def->getReg())) {
2373       RS.add(*Def);
2374     } else { // This is a copy
2375       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
2376       SubDef->getOperand(1).setIsKill(false);
2377       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
2378     }
2379     RS.addImm(SubIdx);
2380   }
2381 
2382   Op->setReg(Dst);
2383   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
2384     Op->setReg(Reg);
2385     RS->eraseFromParent();
2386     return false;
2387   }
2388 
2389   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);
2390 
2391   // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users,
2392   // in which case we can erase them all later in runOnMachineFunction.
2393   if (MRI->use_nodbg_empty(MI.getOperand(0).getReg()))
2394     MI.eraseFromParent();
2395   return true;
2396 }
2397 
2398 /// Checks whether \p Copy is a AGPR -> VGPR copy. Returns `true` on success and
2399 /// stores the AGPR register in \p OutReg and the subreg in \p OutSubReg
2400 static bool isAGPRCopy(const SIRegisterInfo &TRI,
2401                        const MachineRegisterInfo &MRI, const MachineInstr &Copy,
2402                        Register &OutReg, unsigned &OutSubReg) {
2403   assert(Copy.isCopy());
2404 
2405   const MachineOperand &CopySrc = Copy.getOperand(1);
2406   Register CopySrcReg = CopySrc.getReg();
2407   if (!CopySrcReg.isVirtual())
2408     return false;
2409 
2410   // Common case: copy from AGPR directly, e.g.
2411   //  %1:vgpr_32 = COPY %0:agpr_32
2412   if (TRI.isAGPR(MRI, CopySrcReg)) {
2413     OutReg = CopySrcReg;
2414     OutSubReg = CopySrc.getSubReg();
2415     return true;
2416   }
2417 
2418   // Sometimes it can also involve two copies, e.g.
2419   //  %1:vgpr_256 = COPY %0:agpr_256
2420   //  %2:vgpr_32 = COPY %1:vgpr_256.sub0
2421   const MachineInstr *CopySrcDef = MRI.getVRegDef(CopySrcReg);
2422   if (!CopySrcDef || !CopySrcDef->isCopy())
2423     return false;
2424 
2425   const MachineOperand &OtherCopySrc = CopySrcDef->getOperand(1);
2426   Register OtherCopySrcReg = OtherCopySrc.getReg();
2427   if (!OtherCopySrcReg.isVirtual() ||
2428       CopySrcDef->getOperand(0).getSubReg() != AMDGPU::NoSubRegister ||
2429       OtherCopySrc.getSubReg() != AMDGPU::NoSubRegister ||
2430       !TRI.isAGPR(MRI, OtherCopySrcReg))
2431     return false;
2432 
2433   OutReg = OtherCopySrcReg;
2434   OutSubReg = CopySrc.getSubReg();
2435   return true;
2436 }
2437 
2438 // Try to hoist an AGPR to VGPR copy across a PHI.
2439 // This should allow folding of an AGPR into a consumer which may support it.
2440 //
2441 // Example 1: LCSSA PHI
2442 //      loop:
2443 //        %1:vreg = COPY %0:areg
2444 //      exit:
2445 //        %2:vreg = PHI %1:vreg, %loop
2446 //  =>
2447 //      loop:
2448 //      exit:
2449 //        %1:areg = PHI %0:areg, %loop
2450 //        %2:vreg = COPY %1:areg
2451 //
2452 // Example 2: PHI with multiple incoming values:
2453 //      entry:
2454 //        %1:vreg = GLOBAL_LOAD(..)
2455 //      loop:
2456 //        %2:vreg = PHI %1:vreg, %entry, %5:vreg, %loop
2457 //        %3:areg = COPY %2:vreg
2458 //        %4:areg = (instr using %3:areg)
2459 //        %5:vreg = COPY %4:areg
2460 //  =>
2461 //      entry:
2462 //        %1:vreg = GLOBAL_LOAD(..)
2463 //        %2:areg = COPY %1:vreg
2464 //      loop:
2465 //        %3:areg = PHI %2:areg, %entry, %X:areg,
2466 //        %4:areg = (instr using %3:areg)
2467 bool SIFoldOperandsImpl::tryFoldPhiAGPR(MachineInstr &PHI) {
2468   assert(PHI.isPHI());
2469 
2470   Register PhiOut = PHI.getOperand(0).getReg();
2471   if (!TRI->isVGPR(*MRI, PhiOut))
2472     return false;
2473 
2474   // Iterate once over all incoming values of the PHI to check if this PHI is
2475   // eligible, and determine the exact AGPR RC we'll target.
2476   const TargetRegisterClass *ARC = nullptr;
2477   for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) {
2478     MachineOperand &MO = PHI.getOperand(K);
2479     MachineInstr *Copy = MRI->getVRegDef(MO.getReg());
2480     if (!Copy || !Copy->isCopy())
2481       continue;
2482 
2483     Register AGPRSrc;
2484     unsigned AGPRRegMask = AMDGPU::NoSubRegister;
2485     if (!isAGPRCopy(*TRI, *MRI, *Copy, AGPRSrc, AGPRRegMask))
2486       continue;
2487 
2488     const TargetRegisterClass *CopyInRC = MRI->getRegClass(AGPRSrc);
2489     if (const auto *SubRC = TRI->getSubRegisterClass(CopyInRC, AGPRRegMask))
2490       CopyInRC = SubRC;
2491 
2492     if (ARC && !ARC->hasSubClassEq(CopyInRC))
2493       return false;
2494     ARC = CopyInRC;
2495   }
2496 
2497   if (!ARC)
2498     return false;
2499 
2500   bool IsAGPR32 = (ARC == &AMDGPU::AGPR_32RegClass);
2501 
2502   // Rewrite the PHI's incoming values to ARC.
2503   LLVM_DEBUG(dbgs() << "Folding AGPR copies into: " << PHI);
2504   for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) {
2505     MachineOperand &MO = PHI.getOperand(K);
2506     Register Reg = MO.getReg();
2507 
2508     MachineBasicBlock::iterator InsertPt;
2509     MachineBasicBlock *InsertMBB = nullptr;
2510 
2511     // Look at the def of Reg, ignoring all copies.
2512     unsigned CopyOpc = AMDGPU::COPY;
2513     if (MachineInstr *Def = MRI->getVRegDef(Reg)) {
2514 
2515       // Look at pre-existing COPY instructions from ARC: Steal the operand. If
2516       // the copy was single-use, it will be removed by DCE later.
2517       if (Def->isCopy()) {
2518         Register AGPRSrc;
2519         unsigned AGPRSubReg = AMDGPU::NoSubRegister;
2520         if (isAGPRCopy(*TRI, *MRI, *Def, AGPRSrc, AGPRSubReg)) {
2521           MO.setReg(AGPRSrc);
2522           MO.setSubReg(AGPRSubReg);
2523           continue;
2524         }
2525 
2526         // If this is a multi-use SGPR -> VGPR copy, use V_ACCVGPR_WRITE on
2527         // GFX908 directly instead of a COPY. Otherwise, SIFoldOperand may try
2528         // to fold the sgpr -> vgpr -> agpr copy into a sgpr -> agpr copy which
2529         // is unlikely to be profitable.
2530         //
2531         // Note that V_ACCVGPR_WRITE is only used for AGPR_32.
2532         MachineOperand &CopyIn = Def->getOperand(1);
2533         if (IsAGPR32 && !ST->hasGFX90AInsts() && !MRI->hasOneNonDBGUse(Reg) &&
2534             TRI->isSGPRReg(*MRI, CopyIn.getReg()))
2535           CopyOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
2536       }
2537 
2538       InsertMBB = Def->getParent();
2539       InsertPt = InsertMBB->SkipPHIsLabelsAndDebug(++Def->getIterator());
2540     } else {
2541       InsertMBB = PHI.getOperand(MO.getOperandNo() + 1).getMBB();
2542       InsertPt = InsertMBB->getFirstTerminator();
2543     }
2544 
2545     Register NewReg = MRI->createVirtualRegister(ARC);
2546     MachineInstr *MI = BuildMI(*InsertMBB, InsertPt, PHI.getDebugLoc(),
2547                                TII->get(CopyOpc), NewReg)
2548                            .addReg(Reg);
2549     MO.setReg(NewReg);
2550 
2551     (void)MI;
2552     LLVM_DEBUG(dbgs() << "  Created COPY: " << *MI);
2553   }
2554 
2555   // Replace the PHI's result with a new register.
2556   Register NewReg = MRI->createVirtualRegister(ARC);
2557   PHI.getOperand(0).setReg(NewReg);
2558 
2559   // COPY that new register back to the original PhiOut register. This COPY will
2560   // usually be folded out later.
2561   MachineBasicBlock *MBB = PHI.getParent();
2562   BuildMI(*MBB, MBB->getFirstNonPHI(), PHI.getDebugLoc(),
2563           TII->get(AMDGPU::COPY), PhiOut)
2564       .addReg(NewReg);
2565 
2566   LLVM_DEBUG(dbgs() << "  Done: Folded " << PHI);
2567   return true;
2568 }
2569 
2570 // Attempt to convert VGPR load to an AGPR load.
2571 bool SIFoldOperandsImpl::tryFoldLoad(MachineInstr &MI) {
2572   assert(MI.mayLoad());
2573   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
2574     return false;
2575 
2576   MachineOperand &Def = MI.getOperand(0);
2577   if (!Def.isDef())
2578     return false;
2579 
2580   Register DefReg = Def.getReg();
2581 
2582   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
2583     return false;
2584 
2585   SmallVector<const MachineInstr *, 8> Users(
2586       llvm::make_pointer_range(MRI->use_nodbg_instructions(DefReg)));
2587   SmallVector<Register, 8> MoveRegs;
2588 
2589   if (Users.empty())
2590     return false;
2591 
2592   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
2593   while (!Users.empty()) {
2594     const MachineInstr *I = Users.pop_back_val();
2595     if (!I->isCopy() && !I->isRegSequence())
2596       return false;
2597     Register DstReg = I->getOperand(0).getReg();
2598     // Physical registers may have more than one instruction definitions
2599     if (DstReg.isPhysical())
2600       return false;
2601     if (TRI->isAGPR(*MRI, DstReg))
2602       continue;
2603     MoveRegs.push_back(DstReg);
2604     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg))
2605       Users.push_back(&U);
2606   }
2607 
2608   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
2609   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
2610   if (!TII->isOperandLegal(MI, 0, &Def)) {
2611     MRI->setRegClass(DefReg, RC);
2612     return false;
2613   }
2614 
2615   while (!MoveRegs.empty()) {
2616     Register Reg = MoveRegs.pop_back_val();
2617     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
2618   }
2619 
2620   LLVM_DEBUG(dbgs() << "Folded " << MI);
2621 
2622   return true;
2623 }
2624 
2625 // tryFoldPhiAGPR will aggressively try to create AGPR PHIs.
2626 // For GFX90A and later, this is pretty much always a good thing, but for GFX908
2627 // there's cases where it can create a lot more AGPR-AGPR copies, which are
2628 // expensive on this architecture due to the lack of V_ACCVGPR_MOV.
2629 //
2630 // This function looks at all AGPR PHIs in a basic block and collects their
2631 // operands. Then, it checks for register that are used more than once across
2632 // all PHIs and caches them in a VGPR. This prevents ExpandPostRAPseudo from
2633 // having to create one VGPR temporary per use, which can get very messy if
2634 // these PHIs come from a broken-up large PHI (e.g. 32 AGPR phis, one per vector
2635 // element).
2636 //
2637 // Example
2638 //      a:
2639 //        %in:agpr_256 = COPY %foo:vgpr_256
2640 //      c:
2641 //        %x:agpr_32 = ..
2642 //      b:
2643 //        %0:areg = PHI %in.sub0:agpr_32, %a, %x, %c
2644 //        %1:areg = PHI %in.sub0:agpr_32, %a, %y, %c
2645 //        %2:areg = PHI %in.sub0:agpr_32, %a, %z, %c
2646 //  =>
2647 //      a:
2648 //        %in:agpr_256 = COPY %foo:vgpr_256
2649 //        %tmp:vgpr_32 = V_ACCVGPR_READ_B32_e64 %in.sub0:agpr_32
2650 //        %tmp_agpr:agpr_32 = COPY %tmp
2651 //      c:
2652 //        %x:agpr_32 = ..
2653 //      b:
2654 //        %0:areg = PHI %tmp_agpr, %a, %x, %c
2655 //        %1:areg = PHI %tmp_agpr, %a, %y, %c
2656 //        %2:areg = PHI %tmp_agpr, %a, %z, %c
2657 bool SIFoldOperandsImpl::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) {
2658   // This is only really needed on GFX908 where AGPR-AGPR copies are
2659   // unreasonably difficult.
2660   if (ST->hasGFX90AInsts())
2661     return false;
2662 
2663   // Look at all AGPR Phis and collect the register + subregister used.
2664   DenseMap<std::pair<Register, unsigned>, std::vector<MachineOperand *>>
2665       RegToMO;
2666 
2667   for (auto &MI : MBB) {
2668     if (!MI.isPHI())
2669       break;
2670 
2671     if (!TRI->isAGPR(*MRI, MI.getOperand(0).getReg()))
2672       continue;
2673 
2674     for (unsigned K = 1; K < MI.getNumOperands(); K += 2) {
2675       MachineOperand &PhiMO = MI.getOperand(K);
2676       if (!PhiMO.getSubReg())
2677         continue;
2678       RegToMO[{PhiMO.getReg(), PhiMO.getSubReg()}].push_back(&PhiMO);
2679     }
2680   }
2681 
2682   // For all (Reg, SubReg) pair that are used more than once, cache the value in
2683   // a VGPR.
2684   bool Changed = false;
2685   for (const auto &[Entry, MOs] : RegToMO) {
2686     if (MOs.size() == 1)
2687       continue;
2688 
2689     const auto [Reg, SubReg] = Entry;
2690     MachineInstr *Def = MRI->getVRegDef(Reg);
2691     MachineBasicBlock *DefMBB = Def->getParent();
2692 
2693     // Create a copy in a VGPR using V_ACCVGPR_READ_B32_e64 so it's not folded
2694     // out.
2695     const TargetRegisterClass *ARC = getRegOpRC(*MRI, *TRI, *MOs.front());
2696     Register TempVGPR =
2697         MRI->createVirtualRegister(TRI->getEquivalentVGPRClass(ARC));
2698     MachineInstr *VGPRCopy =
2699         BuildMI(*DefMBB, ++Def->getIterator(), Def->getDebugLoc(),
2700                 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TempVGPR)
2701             .addReg(Reg, /* flags */ 0, SubReg);
2702 
2703     // Copy back to an AGPR and use that instead of the AGPR subreg in all MOs.
2704     Register TempAGPR = MRI->createVirtualRegister(ARC);
2705     BuildMI(*DefMBB, ++VGPRCopy->getIterator(), Def->getDebugLoc(),
2706             TII->get(AMDGPU::COPY), TempAGPR)
2707         .addReg(TempVGPR);
2708 
2709     LLVM_DEBUG(dbgs() << "Caching AGPR into VGPR: " << *VGPRCopy);
2710     for (MachineOperand *MO : MOs) {
2711       MO->setReg(TempAGPR);
2712       MO->setSubReg(AMDGPU::NoSubRegister);
2713       LLVM_DEBUG(dbgs() << "  Changed PHI Operand: " << *MO << "\n");
2714     }
2715 
2716     Changed = true;
2717   }
2718 
2719   return Changed;
2720 }
2721 
2722 bool SIFoldOperandsImpl::run(MachineFunction &MF) {
2723   MRI = &MF.getRegInfo();
2724   ST = &MF.getSubtarget<GCNSubtarget>();
2725   TII = ST->getInstrInfo();
2726   TRI = &TII->getRegisterInfo();
2727   MFI = MF.getInfo<SIMachineFunctionInfo>();
2728 
2729   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
2730   // correctly handle signed zeros.
2731   //
2732   // FIXME: Also need to check strictfp
2733   bool IsIEEEMode = MFI->getMode().IEEE;
2734   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
2735 
2736   bool Changed = false;
2737   for (MachineBasicBlock *MBB : depth_first(&MF)) {
2738     MachineOperand *CurrentKnownM0Val = nullptr;
2739     for (auto &MI : make_early_inc_range(*MBB)) {
2740       Changed |= tryFoldCndMask(MI);
2741 
2742       if (tryFoldZeroHighBits(MI)) {
2743         Changed = true;
2744         continue;
2745       }
2746 
2747       if (MI.isRegSequence() && tryFoldRegSequence(MI)) {
2748         Changed = true;
2749         continue;
2750       }
2751 
2752       if (MI.isPHI() && tryFoldPhiAGPR(MI)) {
2753         Changed = true;
2754         continue;
2755       }
2756 
2757       if (MI.mayLoad() && tryFoldLoad(MI)) {
2758         Changed = true;
2759         continue;
2760       }
2761 
2762       if (TII->isFoldableCopy(MI)) {
2763         Changed |= tryFoldFoldableCopy(MI, CurrentKnownM0Val);
2764         continue;
2765       }
2766 
2767       // Saw an unknown clobber of m0, so we no longer know what it is.
2768       if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
2769         CurrentKnownM0Val = nullptr;
2770 
2771       // TODO: Omod might be OK if there is NSZ only on the source
2772       // instruction, and not the omod multiply.
2773       if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
2774           !tryFoldOMod(MI))
2775         Changed |= tryFoldClamp(MI);
2776     }
2777 
2778     Changed |= tryOptimizeAGPRPhis(*MBB);
2779   }
2780 
2781   return Changed;
2782 }
2783 
2784 PreservedAnalyses SIFoldOperandsPass::run(MachineFunction &MF,
2785                                           MachineFunctionAnalysisManager &) {
2786   MFPropsModifier _(*this, MF);
2787 
2788   bool Changed = SIFoldOperandsImpl().run(MF);
2789   if (!Changed) {
2790     return PreservedAnalyses::all();
2791   }
2792   auto PA = getMachineFunctionPassPreservedAnalyses();
2793   PA.preserveSet<CFGAnalyses>();
2794   return PA;
2795 }
2796