xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //=== AArch64PostLegalizerCombiner.cpp --------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// Post-legalization combines on generic MachineInstrs.
11 ///
12 /// The combines here must preserve instruction legality.
13 ///
14 /// Lowering combines (e.g. pseudo matching) should be handled by
15 /// AArch64PostLegalizerLowering.
16 ///
17 /// Combines which don't rely on instruction legality should go in the
18 /// AArch64PreLegalizerCombiner.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AArch64TargetMachine.h"
23 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
24 #include "llvm/CodeGen/GlobalISel/Combiner.h"
25 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
26 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
27 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
28 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
29 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
30 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
31 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
32 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
33 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
34 #include "llvm/CodeGen/GlobalISel/Utils.h"
35 #include "llvm/CodeGen/MachineDominators.h"
36 #include "llvm/CodeGen/MachineFunctionPass.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/TargetOpcodes.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/Support/Debug.h"
41 
42 #define GET_GICOMBINER_DEPS
43 #include "AArch64GenPostLegalizeGICombiner.inc"
44 #undef GET_GICOMBINER_DEPS
45 
46 #define DEBUG_TYPE "aarch64-postlegalizer-combiner"
47 
48 using namespace llvm;
49 using namespace MIPatternMatch;
50 
51 namespace {
52 
53 #define GET_GICOMBINER_TYPES
54 #include "AArch64GenPostLegalizeGICombiner.inc"
55 #undef GET_GICOMBINER_TYPES
56 
57 /// This combine tries do what performExtractVectorEltCombine does in SDAG.
58 /// Rewrite for pairwise fadd pattern
59 ///   (s32 (g_extract_vector_elt
60 ///           (g_fadd (vXs32 Other)
61 ///                  (g_vector_shuffle (vXs32 Other) undef <1,X,...> )) 0))
62 /// ->
63 ///   (s32 (g_fadd (g_extract_vector_elt (vXs32 Other) 0)
64 ///              (g_extract_vector_elt (vXs32 Other) 1))
65 bool matchExtractVecEltPairwiseAdd(
66     MachineInstr &MI, MachineRegisterInfo &MRI,
67     std::tuple<unsigned, LLT, Register> &MatchInfo) {
68   Register Src1 = MI.getOperand(1).getReg();
69   Register Src2 = MI.getOperand(2).getReg();
70   LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
71 
72   auto Cst = getIConstantVRegValWithLookThrough(Src2, MRI);
73   if (!Cst || Cst->Value != 0)
74     return false;
75   // SDAG also checks for FullFP16, but this looks to be beneficial anyway.
76 
77   // Now check for an fadd operation. TODO: expand this for integer add?
78   auto *FAddMI = getOpcodeDef(TargetOpcode::G_FADD, Src1, MRI);
79   if (!FAddMI)
80     return false;
81 
82   // If we add support for integer add, must restrict these types to just s64.
83   unsigned DstSize = DstTy.getSizeInBits();
84   if (DstSize != 16 && DstSize != 32 && DstSize != 64)
85     return false;
86 
87   Register Src1Op1 = FAddMI->getOperand(1).getReg();
88   Register Src1Op2 = FAddMI->getOperand(2).getReg();
89   MachineInstr *Shuffle =
90       getOpcodeDef(TargetOpcode::G_SHUFFLE_VECTOR, Src1Op2, MRI);
91   MachineInstr *Other = MRI.getVRegDef(Src1Op1);
92   if (!Shuffle) {
93     Shuffle = getOpcodeDef(TargetOpcode::G_SHUFFLE_VECTOR, Src1Op1, MRI);
94     Other = MRI.getVRegDef(Src1Op2);
95   }
96 
97   // We're looking for a shuffle that moves the second element to index 0.
98   if (Shuffle && Shuffle->getOperand(3).getShuffleMask()[0] == 1 &&
99       Other == MRI.getVRegDef(Shuffle->getOperand(1).getReg())) {
100     std::get<0>(MatchInfo) = TargetOpcode::G_FADD;
101     std::get<1>(MatchInfo) = DstTy;
102     std::get<2>(MatchInfo) = Other->getOperand(0).getReg();
103     return true;
104   }
105   return false;
106 }
107 
108 void applyExtractVecEltPairwiseAdd(
109     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
110     std::tuple<unsigned, LLT, Register> &MatchInfo) {
111   unsigned Opc = std::get<0>(MatchInfo);
112   assert(Opc == TargetOpcode::G_FADD && "Unexpected opcode!");
113   // We want to generate two extracts of elements 0 and 1, and add them.
114   LLT Ty = std::get<1>(MatchInfo);
115   Register Src = std::get<2>(MatchInfo);
116   LLT s64 = LLT::scalar(64);
117   B.setInstrAndDebugLoc(MI);
118   auto Elt0 = B.buildExtractVectorElement(Ty, Src, B.buildConstant(s64, 0));
119   auto Elt1 = B.buildExtractVectorElement(Ty, Src, B.buildConstant(s64, 1));
120   B.buildInstr(Opc, {MI.getOperand(0).getReg()}, {Elt0, Elt1});
121   MI.eraseFromParent();
122 }
123 
124 bool isSignExtended(Register R, MachineRegisterInfo &MRI) {
125   // TODO: check if extended build vector as well.
126   unsigned Opc = MRI.getVRegDef(R)->getOpcode();
127   return Opc == TargetOpcode::G_SEXT || Opc == TargetOpcode::G_SEXT_INREG;
128 }
129 
130 bool isZeroExtended(Register R, MachineRegisterInfo &MRI) {
131   // TODO: check if extended build vector as well.
132   return MRI.getVRegDef(R)->getOpcode() == TargetOpcode::G_ZEXT;
133 }
134 
135 bool matchAArch64MulConstCombine(
136     MachineInstr &MI, MachineRegisterInfo &MRI,
137     std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {
138   assert(MI.getOpcode() == TargetOpcode::G_MUL);
139   Register LHS = MI.getOperand(1).getReg();
140   Register RHS = MI.getOperand(2).getReg();
141   Register Dst = MI.getOperand(0).getReg();
142   const LLT Ty = MRI.getType(LHS);
143 
144   // The below optimizations require a constant RHS.
145   auto Const = getIConstantVRegValWithLookThrough(RHS, MRI);
146   if (!Const)
147     return false;
148 
149   APInt ConstValue = Const->Value.sext(Ty.getSizeInBits());
150   // The following code is ported from AArch64ISelLowering.
151   // Multiplication of a power of two plus/minus one can be done more
152   // cheaply as as shift+add/sub. For now, this is true unilaterally. If
153   // future CPUs have a cheaper MADD instruction, this may need to be
154   // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
155   // 64-bit is 5 cycles, so this is always a win.
156   // More aggressively, some multiplications N0 * C can be lowered to
157   // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M,
158   // e.g. 6=3*2=(2+1)*2.
159   // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
160   // which equals to (1+2)*16-(1+2).
161   // TrailingZeroes is used to test if the mul can be lowered to
162   // shift+add+shift.
163   unsigned TrailingZeroes = ConstValue.countr_zero();
164   if (TrailingZeroes) {
165     // Conservatively do not lower to shift+add+shift if the mul might be
166     // folded into smul or umul.
167     if (MRI.hasOneNonDBGUse(LHS) &&
168         (isSignExtended(LHS, MRI) || isZeroExtended(LHS, MRI)))
169       return false;
170     // Conservatively do not lower to shift+add+shift if the mul might be
171     // folded into madd or msub.
172     if (MRI.hasOneNonDBGUse(Dst)) {
173       MachineInstr &UseMI = *MRI.use_instr_begin(Dst);
174       unsigned UseOpc = UseMI.getOpcode();
175       if (UseOpc == TargetOpcode::G_ADD || UseOpc == TargetOpcode::G_PTR_ADD ||
176           UseOpc == TargetOpcode::G_SUB)
177         return false;
178     }
179   }
180   // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub
181   // and shift+add+shift.
182   APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes);
183 
184   unsigned ShiftAmt, AddSubOpc;
185   // Is the shifted value the LHS operand of the add/sub?
186   bool ShiftValUseIsLHS = true;
187   // Do we need to negate the result?
188   bool NegateResult = false;
189 
190   if (ConstValue.isNonNegative()) {
191     // (mul x, 2^N + 1) => (add (shl x, N), x)
192     // (mul x, 2^N - 1) => (sub (shl x, N), x)
193     // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M)
194     APInt SCVMinus1 = ShiftedConstValue - 1;
195     APInt CVPlus1 = ConstValue + 1;
196     if (SCVMinus1.isPowerOf2()) {
197       ShiftAmt = SCVMinus1.logBase2();
198       AddSubOpc = TargetOpcode::G_ADD;
199     } else if (CVPlus1.isPowerOf2()) {
200       ShiftAmt = CVPlus1.logBase2();
201       AddSubOpc = TargetOpcode::G_SUB;
202     } else
203       return false;
204   } else {
205     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
206     // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
207     APInt CVNegPlus1 = -ConstValue + 1;
208     APInt CVNegMinus1 = -ConstValue - 1;
209     if (CVNegPlus1.isPowerOf2()) {
210       ShiftAmt = CVNegPlus1.logBase2();
211       AddSubOpc = TargetOpcode::G_SUB;
212       ShiftValUseIsLHS = false;
213     } else if (CVNegMinus1.isPowerOf2()) {
214       ShiftAmt = CVNegMinus1.logBase2();
215       AddSubOpc = TargetOpcode::G_ADD;
216       NegateResult = true;
217     } else
218       return false;
219   }
220 
221   if (NegateResult && TrailingZeroes)
222     return false;
223 
224   ApplyFn = [=](MachineIRBuilder &B, Register DstReg) {
225     auto Shift = B.buildConstant(LLT::scalar(64), ShiftAmt);
226     auto ShiftedVal = B.buildShl(Ty, LHS, Shift);
227 
228     Register AddSubLHS = ShiftValUseIsLHS ? ShiftedVal.getReg(0) : LHS;
229     Register AddSubRHS = ShiftValUseIsLHS ? LHS : ShiftedVal.getReg(0);
230     auto Res = B.buildInstr(AddSubOpc, {Ty}, {AddSubLHS, AddSubRHS});
231     assert(!(NegateResult && TrailingZeroes) &&
232            "NegateResult and TrailingZeroes cannot both be true for now.");
233     // Negate the result.
234     if (NegateResult) {
235       B.buildSub(DstReg, B.buildConstant(Ty, 0), Res);
236       return;
237     }
238     // Shift the result.
239     if (TrailingZeroes) {
240       B.buildShl(DstReg, Res, B.buildConstant(LLT::scalar(64), TrailingZeroes));
241       return;
242     }
243     B.buildCopy(DstReg, Res.getReg(0));
244   };
245   return true;
246 }
247 
248 void applyAArch64MulConstCombine(
249     MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
250     std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {
251   B.setInstrAndDebugLoc(MI);
252   ApplyFn(B, MI.getOperand(0).getReg());
253   MI.eraseFromParent();
254 }
255 
256 /// Try to fold a G_MERGE_VALUES of 2 s32 sources, where the second source
257 /// is a zero, into a G_ZEXT of the first.
258 bool matchFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI) {
259   auto &Merge = cast<GMerge>(MI);
260   LLT SrcTy = MRI.getType(Merge.getSourceReg(0));
261   if (SrcTy != LLT::scalar(32) || Merge.getNumSources() != 2)
262     return false;
263   return mi_match(Merge.getSourceReg(1), MRI, m_SpecificICst(0));
264 }
265 
266 void applyFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI,
267                           MachineIRBuilder &B, GISelChangeObserver &Observer) {
268   // Mutate %d(s64) = G_MERGE_VALUES %a(s32), 0(s32)
269   //  ->
270   // %d(s64) = G_ZEXT %a(s32)
271   Observer.changingInstr(MI);
272   MI.setDesc(B.getTII().get(TargetOpcode::G_ZEXT));
273   MI.removeOperand(2);
274   Observer.changedInstr(MI);
275 }
276 
277 /// \returns True if a G_ANYEXT instruction \p MI should be mutated to a G_ZEXT
278 /// instruction.
279 bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI) {
280   // If this is coming from a scalar compare then we can use a G_ZEXT instead of
281   // a G_ANYEXT:
282   //
283   // %cmp:_(s32) = G_[I|F]CMP ... <-- produces 0/1.
284   // %ext:_(s64) = G_ANYEXT %cmp(s32)
285   //
286   // By doing this, we can leverage more KnownBits combines.
287   assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);
288   Register Dst = MI.getOperand(0).getReg();
289   Register Src = MI.getOperand(1).getReg();
290   return MRI.getType(Dst).isScalar() &&
291          mi_match(Src, MRI,
292                   m_any_of(m_GICmp(m_Pred(), m_Reg(), m_Reg()),
293                            m_GFCmp(m_Pred(), m_Reg(), m_Reg())));
294 }
295 
296 void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
297                              MachineIRBuilder &B,
298                              GISelChangeObserver &Observer) {
299   Observer.changingInstr(MI);
300   MI.setDesc(B.getTII().get(TargetOpcode::G_ZEXT));
301   Observer.changedInstr(MI);
302 }
303 
304 /// Match a 128b store of zero and split it into two 64 bit stores, for
305 /// size/performance reasons.
306 bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {
307   GStore &Store = cast<GStore>(MI);
308   if (!Store.isSimple())
309     return false;
310   LLT ValTy = MRI.getType(Store.getValueReg());
311   if (!ValTy.isVector() || ValTy.getSizeInBits() != 128)
312     return false;
313   if (ValTy.getSizeInBits() != Store.getMemSizeInBits())
314     return false; // Don't split truncating stores.
315   if (!MRI.hasOneNonDBGUse(Store.getValueReg()))
316     return false;
317   auto MaybeCst = isConstantOrConstantSplatVector(
318       *MRI.getVRegDef(Store.getValueReg()), MRI);
319   return MaybeCst && MaybeCst->isZero();
320 }
321 
322 void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
323                             MachineIRBuilder &B,
324                             GISelChangeObserver &Observer) {
325   B.setInstrAndDebugLoc(MI);
326   GStore &Store = cast<GStore>(MI);
327   assert(MRI.getType(Store.getValueReg()).isVector() &&
328          "Expected a vector store value");
329   LLT NewTy = LLT::scalar(64);
330   Register PtrReg = Store.getPointerReg();
331   auto Zero = B.buildConstant(NewTy, 0);
332   auto HighPtr = B.buildPtrAdd(MRI.getType(PtrReg), PtrReg,
333                                B.buildConstant(LLT::scalar(64), 8));
334   auto &MF = *MI.getMF();
335   auto *LowMMO = MF.getMachineMemOperand(&Store.getMMO(), 0, NewTy);
336   auto *HighMMO = MF.getMachineMemOperand(&Store.getMMO(), 8, NewTy);
337   B.buildStore(Zero, PtrReg, *LowMMO);
338   B.buildStore(Zero, HighPtr, *HighMMO);
339   Store.eraseFromParent();
340 }
341 
342 class AArch64PostLegalizerCombinerImpl : public GIMatchTableExecutor {
343 protected:
344   CombinerHelper &Helper;
345   const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig;
346 
347   const AArch64Subtarget &STI;
348   MachineRegisterInfo &MRI;
349   GISelChangeObserver &Observer;
350   MachineIRBuilder &B;
351   MachineFunction &MF;
352 
353 public:
354   AArch64PostLegalizerCombinerImpl(
355       const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
356       const AArch64Subtarget &STI, GISelChangeObserver &Observer,
357       MachineIRBuilder &B, CombinerHelper &Helper);
358 
359   static const char *getName() { return "AArch64PostLegalizerCombiner"; }
360 
361   bool tryCombineAll(MachineInstr &I) const;
362 
363 private:
364 #define GET_GICOMBINER_CLASS_MEMBERS
365 #include "AArch64GenPostLegalizeGICombiner.inc"
366 #undef GET_GICOMBINER_CLASS_MEMBERS
367 };
368 
369 #define GET_GICOMBINER_IMPL
370 #include "AArch64GenPostLegalizeGICombiner.inc"
371 #undef GET_GICOMBINER_IMPL
372 
373 AArch64PostLegalizerCombinerImpl::AArch64PostLegalizerCombinerImpl(
374     const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
375     const AArch64Subtarget &STI, GISelChangeObserver &Observer,
376     MachineIRBuilder &B, CombinerHelper &Helper)
377     : Helper(Helper), RuleConfig(RuleConfig), STI(STI), MRI(*B.getMRI()),
378       Observer(Observer), B(B), MF(B.getMF()),
379 #define GET_GICOMBINER_CONSTRUCTOR_INITS
380 #include "AArch64GenPostLegalizeGICombiner.inc"
381 #undef GET_GICOMBINER_CONSTRUCTOR_INITS
382 {
383 }
384 
385 class AArch64PostLegalizerCombinerInfo : public CombinerInfo {
386   GISelKnownBits *KB;
387   MachineDominatorTree *MDT;
388 
389 public:
390   AArch64PostLegalizerCombinerImplRuleConfig RuleConfig;
391 
392   AArch64PostLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
393                                    GISelKnownBits *KB,
394                                    MachineDominatorTree *MDT)
395       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
396                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
397         KB(KB), MDT(MDT) {
398     if (!RuleConfig.parseCommandLineOption())
399       report_fatal_error("Invalid rule identifier");
400   }
401 
402   bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
403                MachineIRBuilder &B) const override;
404 };
405 
406 bool AArch64PostLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
407                                                MachineInstr &MI,
408                                                MachineIRBuilder &B) const {
409   const auto &STI = MI.getMF()->getSubtarget<AArch64Subtarget>();
410   const auto *LI = STI.getLegalizerInfo();
411   CombinerHelper Helper(Observer, B, /*IsPreLegalize*/ false, KB, MDT, LI);
412   AArch64PostLegalizerCombinerImpl Impl(RuleConfig, STI, Observer, B, Helper);
413   Impl.setupMF(*MI.getMF(), KB);
414   return Impl.tryCombineAll(MI);
415 }
416 
417 class AArch64PostLegalizerCombiner : public MachineFunctionPass {
418 public:
419   static char ID;
420 
421   AArch64PostLegalizerCombiner(bool IsOptNone = false);
422 
423   StringRef getPassName() const override {
424     return "AArch64PostLegalizerCombiner";
425   }
426 
427   bool runOnMachineFunction(MachineFunction &MF) override;
428   void getAnalysisUsage(AnalysisUsage &AU) const override;
429 
430 private:
431   bool IsOptNone;
432 };
433 } // end anonymous namespace
434 
435 void AArch64PostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
436   AU.addRequired<TargetPassConfig>();
437   AU.setPreservesCFG();
438   getSelectionDAGFallbackAnalysisUsage(AU);
439   AU.addRequired<GISelKnownBitsAnalysis>();
440   AU.addPreserved<GISelKnownBitsAnalysis>();
441   if (!IsOptNone) {
442     AU.addRequired<MachineDominatorTree>();
443     AU.addPreserved<MachineDominatorTree>();
444     AU.addRequired<GISelCSEAnalysisWrapperPass>();
445     AU.addPreserved<GISelCSEAnalysisWrapperPass>();
446   }
447   MachineFunctionPass::getAnalysisUsage(AU);
448 }
449 
450 AArch64PostLegalizerCombiner::AArch64PostLegalizerCombiner(bool IsOptNone)
451     : MachineFunctionPass(ID), IsOptNone(IsOptNone) {
452   initializeAArch64PostLegalizerCombinerPass(*PassRegistry::getPassRegistry());
453 }
454 
455 bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
456   if (MF.getProperties().hasProperty(
457           MachineFunctionProperties::Property::FailedISel))
458     return false;
459   assert(MF.getProperties().hasProperty(
460              MachineFunctionProperties::Property::Legalized) &&
461          "Expected a legalized function?");
462   auto *TPC = &getAnalysis<TargetPassConfig>();
463   const Function &F = MF.getFunction();
464   bool EnableOpt =
465       MF.getTarget().getOptLevel() != CodeGenOpt::None && !skipFunction(F);
466   GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
467   MachineDominatorTree *MDT =
468       IsOptNone ? nullptr : &getAnalysis<MachineDominatorTree>();
469   AArch64PostLegalizerCombinerInfo PCInfo(EnableOpt, F.hasOptSize(),
470                                           F.hasMinSize(), KB, MDT);
471   GISelCSEAnalysisWrapper &Wrapper =
472       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
473   auto *CSEInfo = &Wrapper.get(TPC->getCSEConfig());
474   Combiner C(PCInfo, TPC);
475   return C.combineMachineInstrs(MF, CSEInfo);
476 }
477 
478 char AArch64PostLegalizerCombiner::ID = 0;
479 INITIALIZE_PASS_BEGIN(AArch64PostLegalizerCombiner, DEBUG_TYPE,
480                       "Combine AArch64 MachineInstrs after legalization", false,
481                       false)
482 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
483 INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
484 INITIALIZE_PASS_END(AArch64PostLegalizerCombiner, DEBUG_TYPE,
485                     "Combine AArch64 MachineInstrs after legalization", false,
486                     false)
487 
488 namespace llvm {
489 FunctionPass *createAArch64PostLegalizerCombiner(bool IsOptNone) {
490   return new AArch64PostLegalizerCombiner(IsOptNone);
491 }
492 } // end namespace llvm
493