xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 //=== lib/CodeGen/GlobalISel/AMDGPUPreLegalizerCombiner.cpp ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass does combining of machine instructions at the generic MI level,
10 // before the legalizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUCombinerHelper.h"
16 #include "AMDGPULegalizerInfo.h"
17 #include "GCNSubtarget.h"
18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
19 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
20 #include "llvm/CodeGen/GlobalISel/Combiner.h"
21 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
22 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/Target/TargetMachine.h"
28 
29 #define DEBUG_TYPE "amdgpu-prelegalizer-combiner"
30 
31 using namespace llvm;
32 using namespace MIPatternMatch;
33 
34 class AMDGPUPreLegalizerCombinerHelper {
35 protected:
36   MachineIRBuilder &B;
37   MachineFunction &MF;
38   MachineRegisterInfo &MRI;
39   AMDGPUCombinerHelper &Helper;
40 
41 public:
42   AMDGPUPreLegalizerCombinerHelper(MachineIRBuilder &B,
43                                    AMDGPUCombinerHelper &Helper)
44       : B(B), MF(B.getMF()), MRI(*B.getMRI()), Helper(Helper){};
45 
46   struct ClampI64ToI16MatchInfo {
47     int64_t Cmp1 = 0;
48     int64_t Cmp2 = 0;
49     Register Origin;
50   };
51 
52   bool matchClampI64ToI16(MachineInstr &MI, MachineRegisterInfo &MRI,
53                           MachineFunction &MF,
54                           ClampI64ToI16MatchInfo &MatchInfo);
55 
56   void applyClampI64ToI16(MachineInstr &MI,
57                           const ClampI64ToI16MatchInfo &MatchInfo);
58 };
59 
60 bool AMDGPUPreLegalizerCombinerHelper::matchClampI64ToI16(
61     MachineInstr &MI, MachineRegisterInfo &MRI, MachineFunction &MF,
62     ClampI64ToI16MatchInfo &MatchInfo) {
63   assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Invalid instruction!");
64 
65   // Try to find a pattern where an i64 value should get clamped to short.
66   const LLT SrcType = MRI.getType(MI.getOperand(1).getReg());
67   if (SrcType != LLT::scalar(64))
68     return false;
69 
70   const LLT DstType = MRI.getType(MI.getOperand(0).getReg());
71   if (DstType != LLT::scalar(16))
72     return false;
73 
74   Register Base;
75 
76   auto IsApplicableForCombine = [&MatchInfo]() -> bool {
77     const auto Cmp1 = MatchInfo.Cmp1;
78     const auto Cmp2 = MatchInfo.Cmp2;
79     const auto Diff = std::abs(Cmp2 - Cmp1);
80 
81     // If the difference between both comparison values is 0 or 1, there is no
82     // need to clamp.
83     if (Diff == 0 || Diff == 1)
84       return false;
85 
86     const int64_t Min = std::numeric_limits<int16_t>::min();
87     const int64_t Max = std::numeric_limits<int16_t>::max();
88 
89     // Check if the comparison values are between SHORT_MIN and SHORT_MAX.
90     return ((Cmp2 >= Cmp1 && Cmp1 >= Min && Cmp2 <= Max) ||
91             (Cmp1 >= Cmp2 && Cmp1 <= Max && Cmp2 >= Min));
92   };
93 
94   // Try to match a combination of min / max MIR opcodes.
95   if (mi_match(MI.getOperand(1).getReg(), MRI,
96                m_GSMin(m_Reg(Base), m_ICst(MatchInfo.Cmp1)))) {
97     if (mi_match(Base, MRI,
98                  m_GSMax(m_Reg(MatchInfo.Origin), m_ICst(MatchInfo.Cmp2)))) {
99       return IsApplicableForCombine();
100     }
101   }
102 
103   if (mi_match(MI.getOperand(1).getReg(), MRI,
104                m_GSMax(m_Reg(Base), m_ICst(MatchInfo.Cmp1)))) {
105     if (mi_match(Base, MRI,
106                  m_GSMin(m_Reg(MatchInfo.Origin), m_ICst(MatchInfo.Cmp2)))) {
107       return IsApplicableForCombine();
108     }
109   }
110 
111   return false;
112 }
113 
114 // We want to find a combination of instructions that
115 // gets generated when an i64 gets clamped to i16.
116 // The corresponding pattern is:
117 // G_MAX / G_MAX for i16 <= G_TRUNC i64.
118 // This can be efficiently written as following:
119 // v_cvt_pk_i16_i32 v0, v0, v1
120 // v_med3_i32 v0, Clamp_Min, v0, Clamp_Max
121 void AMDGPUPreLegalizerCombinerHelper::applyClampI64ToI16(
122     MachineInstr &MI, const ClampI64ToI16MatchInfo &MatchInfo) {
123 
124   Register Src = MatchInfo.Origin;
125   assert(MI.getParent()->getParent()->getRegInfo().getType(Src) ==
126          LLT::scalar(64));
127   const LLT S32 = LLT::scalar(32);
128 
129   B.setInstrAndDebugLoc(MI);
130 
131   auto Unmerge = B.buildUnmerge(S32, Src);
132 
133   assert(MI.getOpcode() != AMDGPU::G_AMDGPU_CVT_PK_I16_I32);
134 
135   const LLT V2S16 = LLT::fixed_vector(2, 16);
136   auto CvtPk =
137       B.buildInstr(AMDGPU::G_AMDGPU_CVT_PK_I16_I32, {V2S16},
138                    {Unmerge.getReg(0), Unmerge.getReg(1)}, MI.getFlags());
139 
140   auto MinBoundary = std::min(MatchInfo.Cmp1, MatchInfo.Cmp2);
141   auto MaxBoundary = std::max(MatchInfo.Cmp1, MatchInfo.Cmp2);
142   auto MinBoundaryDst = B.buildConstant(S32, MinBoundary);
143   auto MaxBoundaryDst = B.buildConstant(S32, MaxBoundary);
144 
145   auto Bitcast = B.buildBitcast({S32}, CvtPk);
146 
147   auto Med3 = B.buildInstr(
148       AMDGPU::G_AMDGPU_SMED3, {S32},
149       {MinBoundaryDst.getReg(0), Bitcast.getReg(0), MaxBoundaryDst.getReg(0)},
150       MI.getFlags());
151 
152   B.buildTrunc(MI.getOperand(0).getReg(), Med3);
153 
154   MI.eraseFromParent();
155 }
156 
157 class AMDGPUPreLegalizerCombinerHelperState {
158 protected:
159   AMDGPUCombinerHelper &Helper;
160   AMDGPUPreLegalizerCombinerHelper &PreLegalizerHelper;
161 
162 public:
163   AMDGPUPreLegalizerCombinerHelperState(
164       AMDGPUCombinerHelper &Helper,
165       AMDGPUPreLegalizerCombinerHelper &PreLegalizerHelper)
166       : Helper(Helper), PreLegalizerHelper(PreLegalizerHelper) {}
167 };
168 
169 #define AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
170 #include "AMDGPUGenPreLegalizeGICombiner.inc"
171 #undef AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS
172 
173 namespace {
174 #define AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
175 #include "AMDGPUGenPreLegalizeGICombiner.inc"
176 #undef AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_H
177 
178 class AMDGPUPreLegalizerCombinerInfo final : public CombinerInfo {
179   GISelKnownBits *KB;
180   MachineDominatorTree *MDT;
181 
182 public:
183   AMDGPUGenPreLegalizerCombinerHelperRuleConfig GeneratedRuleCfg;
184 
185   AMDGPUPreLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
186                                   GISelKnownBits *KB, MachineDominatorTree *MDT)
187       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
188                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
189         KB(KB), MDT(MDT) {
190     if (!GeneratedRuleCfg.parseCommandLineOption())
191       report_fatal_error("Invalid rule identifier");
192   }
193 
194   bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
195                MachineIRBuilder &B) const override;
196 };
197 
198 bool AMDGPUPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
199                                               MachineInstr &MI,
200                                               MachineIRBuilder &B) const {
201   AMDGPUCombinerHelper Helper(Observer, B, KB, MDT);
202   AMDGPUPreLegalizerCombinerHelper PreLegalizerHelper(B, Helper);
203   AMDGPUGenPreLegalizerCombinerHelper Generated(GeneratedRuleCfg, Helper,
204                                                 PreLegalizerHelper);
205 
206   if (Generated.tryCombineAll(Observer, MI, B))
207     return true;
208 
209   switch (MI.getOpcode()) {
210   case TargetOpcode::G_CONCAT_VECTORS:
211     return Helper.tryCombineConcatVectors(MI);
212   case TargetOpcode::G_SHUFFLE_VECTOR:
213     return Helper.tryCombineShuffleVector(MI);
214   }
215 
216   return false;
217 }
218 
219 #define AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
220 #include "AMDGPUGenPreLegalizeGICombiner.inc"
221 #undef AMDGPUPRELEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_CPP
222 
223 // Pass boilerplate
224 // ================
225 
226 class AMDGPUPreLegalizerCombiner : public MachineFunctionPass {
227 public:
228   static char ID;
229 
230   AMDGPUPreLegalizerCombiner(bool IsOptNone = false);
231 
232   StringRef getPassName() const override {
233     return "AMDGPUPreLegalizerCombiner";
234   }
235 
236   bool runOnMachineFunction(MachineFunction &MF) override;
237 
238   void getAnalysisUsage(AnalysisUsage &AU) const override;
239 private:
240   bool IsOptNone;
241 };
242 } // end anonymous namespace
243 
244 void AMDGPUPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
245   AU.addRequired<TargetPassConfig>();
246   AU.setPreservesCFG();
247   getSelectionDAGFallbackAnalysisUsage(AU);
248   AU.addRequired<GISelKnownBitsAnalysis>();
249   AU.addPreserved<GISelKnownBitsAnalysis>();
250   if (!IsOptNone) {
251     AU.addRequired<MachineDominatorTree>();
252     AU.addPreserved<MachineDominatorTree>();
253   }
254 
255   AU.addRequired<GISelCSEAnalysisWrapperPass>();
256   AU.addPreserved<GISelCSEAnalysisWrapperPass>();
257   MachineFunctionPass::getAnalysisUsage(AU);
258 }
259 
260 AMDGPUPreLegalizerCombiner::AMDGPUPreLegalizerCombiner(bool IsOptNone)
261   : MachineFunctionPass(ID), IsOptNone(IsOptNone) {
262   initializeAMDGPUPreLegalizerCombinerPass(*PassRegistry::getPassRegistry());
263 }
264 
265 bool AMDGPUPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
266   if (MF.getProperties().hasProperty(
267           MachineFunctionProperties::Property::FailedISel))
268     return false;
269   auto *TPC = &getAnalysis<TargetPassConfig>();
270   const Function &F = MF.getFunction();
271   bool EnableOpt =
272       MF.getTarget().getOptLevel() != CodeGenOpt::None && !skipFunction(F);
273   GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
274   MachineDominatorTree *MDT =
275       IsOptNone ? nullptr : &getAnalysis<MachineDominatorTree>();
276   AMDGPUPreLegalizerCombinerInfo PCInfo(EnableOpt, F.hasOptSize(),
277                                         F.hasMinSize(), KB, MDT);
278   // Enable CSE.
279   GISelCSEAnalysisWrapper &Wrapper =
280       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
281   auto *CSEInfo = &Wrapper.get(TPC->getCSEConfig());
282 
283   Combiner C(PCInfo, TPC);
284   return C.combineMachineInstrs(MF, CSEInfo);
285 }
286 
287 char AMDGPUPreLegalizerCombiner::ID = 0;
288 INITIALIZE_PASS_BEGIN(AMDGPUPreLegalizerCombiner, DEBUG_TYPE,
289                       "Combine AMDGPU machine instrs before legalization",
290                       false, false)
291 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
292 INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
293 INITIALIZE_PASS_END(AMDGPUPreLegalizerCombiner, DEBUG_TYPE,
294                     "Combine AMDGPU machine instrs before legalization", false,
295                     false)
296 
297 namespace llvm {
298 FunctionPass *createAMDGPUPreLegalizeCombiner(bool IsOptNone) {
299   return new AMDGPUPreLegalizerCombiner(IsOptNone);
300 }
301 } // end namespace llvm
302