xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //=== lib/CodeGen/GlobalISel/AArch64PreLegalizerCombiner.cpp --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass does combining of machine instructions at the generic MI level,
10 // before the legalizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64GlobalISelUtils.h"
15 #include "AArch64TargetMachine.h"
16 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
17 #include "llvm/CodeGen/GlobalISel/Combiner.h"
18 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
19 #include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
20 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
21 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/Support/Debug.h"
32 
33 #define GET_GICOMBINER_DEPS
34 #include "AArch64GenPreLegalizeGICombiner.inc"
35 #undef GET_GICOMBINER_DEPS
36 
37 #define DEBUG_TYPE "aarch64-prelegalizer-combiner"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 namespace {
43 
44 #define GET_GICOMBINER_TYPES
45 #include "AArch64GenPreLegalizeGICombiner.inc"
46 #undef GET_GICOMBINER_TYPES
47 
48 /// Return true if a G_FCONSTANT instruction is known to be better-represented
49 /// as a G_CONSTANT.
50 bool matchFConstantToConstant(MachineInstr &MI, MachineRegisterInfo &MRI) {
51   assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
52   Register DstReg = MI.getOperand(0).getReg();
53   const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
54   if (DstSize != 32 && DstSize != 64)
55     return false;
56 
57   // When we're storing a value, it doesn't matter what register bank it's on.
58   // Since not all floating point constants can be materialized using a fmov,
59   // it makes more sense to just use a GPR.
60   return all_of(MRI.use_nodbg_instructions(DstReg),
61                 [](const MachineInstr &Use) { return Use.mayStore(); });
62 }
63 
64 /// Change a G_FCONSTANT into a G_CONSTANT.
65 void applyFConstantToConstant(MachineInstr &MI) {
66   assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
67   MachineIRBuilder MIB(MI);
68   const APFloat &ImmValAPF = MI.getOperand(1).getFPImm()->getValueAPF();
69   MIB.buildConstant(MI.getOperand(0).getReg(), ImmValAPF.bitcastToAPInt());
70   MI.eraseFromParent();
71 }
72 
73 /// Try to match a G_ICMP of a G_TRUNC with zero, in which the truncated bits
74 /// are sign bits. In this case, we can transform the G_ICMP to directly compare
75 /// the wide value with a zero.
76 bool matchICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
77                              GISelKnownBits *KB, Register &MatchInfo) {
78   assert(MI.getOpcode() == TargetOpcode::G_ICMP && KB);
79 
80   auto Pred = (CmpInst::Predicate)MI.getOperand(1).getPredicate();
81   if (!ICmpInst::isEquality(Pred))
82     return false;
83 
84   Register LHS = MI.getOperand(2).getReg();
85   LLT LHSTy = MRI.getType(LHS);
86   if (!LHSTy.isScalar())
87     return false;
88 
89   Register RHS = MI.getOperand(3).getReg();
90   Register WideReg;
91 
92   if (!mi_match(LHS, MRI, m_GTrunc(m_Reg(WideReg))) ||
93       !mi_match(RHS, MRI, m_SpecificICst(0)))
94     return false;
95 
96   LLT WideTy = MRI.getType(WideReg);
97   if (KB->computeNumSignBits(WideReg) <=
98       WideTy.getSizeInBits() - LHSTy.getSizeInBits())
99     return false;
100 
101   MatchInfo = WideReg;
102   return true;
103 }
104 
105 void applyICmpRedundantTrunc(MachineInstr &MI, MachineRegisterInfo &MRI,
106                              MachineIRBuilder &Builder,
107                              GISelChangeObserver &Observer, Register &WideReg) {
108   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
109 
110   LLT WideTy = MRI.getType(WideReg);
111   // We're going to directly use the wide register as the LHS, and then use an
112   // equivalent size zero for RHS.
113   Builder.setInstrAndDebugLoc(MI);
114   auto WideZero = Builder.buildConstant(WideTy, 0);
115   Observer.changingInstr(MI);
116   MI.getOperand(2).setReg(WideReg);
117   MI.getOperand(3).setReg(WideZero.getReg(0));
118   Observer.changedInstr(MI);
119 }
120 
121 /// \returns true if it is possible to fold a constant into a G_GLOBAL_VALUE.
122 ///
123 /// e.g.
124 ///
125 /// %g = G_GLOBAL_VALUE @x -> %g = G_GLOBAL_VALUE @x + cst
126 bool matchFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
127                            std::pair<uint64_t, uint64_t> &MatchInfo) {
128   assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
129   MachineFunction &MF = *MI.getMF();
130   auto &GlobalOp = MI.getOperand(1);
131   auto *GV = GlobalOp.getGlobal();
132   if (GV->isThreadLocal())
133     return false;
134 
135   // Don't allow anything that could represent offsets etc.
136   if (MF.getSubtarget<AArch64Subtarget>().ClassifyGlobalReference(
137           GV, MF.getTarget()) != AArch64II::MO_NO_FLAG)
138     return false;
139 
140   // Look for a G_GLOBAL_VALUE only used by G_PTR_ADDs against constants:
141   //
142   //  %g = G_GLOBAL_VALUE @x
143   //  %ptr1 = G_PTR_ADD %g, cst1
144   //  %ptr2 = G_PTR_ADD %g, cst2
145   //  ...
146   //  %ptrN = G_PTR_ADD %g, cstN
147   //
148   // Identify the *smallest* constant. We want to be able to form this:
149   //
150   //  %offset_g = G_GLOBAL_VALUE @x + min_cst
151   //  %g = G_PTR_ADD %offset_g, -min_cst
152   //  %ptr1 = G_PTR_ADD %g, cst1
153   //  ...
154   Register Dst = MI.getOperand(0).getReg();
155   uint64_t MinOffset = -1ull;
156   for (auto &UseInstr : MRI.use_nodbg_instructions(Dst)) {
157     if (UseInstr.getOpcode() != TargetOpcode::G_PTR_ADD)
158       return false;
159     auto Cst = getIConstantVRegValWithLookThrough(
160         UseInstr.getOperand(2).getReg(), MRI);
161     if (!Cst)
162       return false;
163     MinOffset = std::min(MinOffset, Cst->Value.getZExtValue());
164   }
165 
166   // Require that the new offset is larger than the existing one to avoid
167   // infinite loops.
168   uint64_t CurrOffset = GlobalOp.getOffset();
169   uint64_t NewOffset = MinOffset + CurrOffset;
170   if (NewOffset <= CurrOffset)
171     return false;
172 
173   // Check whether folding this offset is legal. It must not go out of bounds of
174   // the referenced object to avoid violating the code model, and must be
175   // smaller than 2^20 because this is the largest offset expressible in all
176   // object formats. (The IMAGE_REL_ARM64_PAGEBASE_REL21 relocation in COFF
177   // stores an immediate signed 21 bit offset.)
178   //
179   // This check also prevents us from folding negative offsets, which will end
180   // up being treated in the same way as large positive ones. They could also
181   // cause code model violations, and aren't really common enough to matter.
182   if (NewOffset >= (1 << 20))
183     return false;
184 
185   Type *T = GV->getValueType();
186   if (!T->isSized() ||
187       NewOffset > GV->getParent()->getDataLayout().getTypeAllocSize(T))
188     return false;
189   MatchInfo = std::make_pair(NewOffset, MinOffset);
190   return true;
191 }
192 
193 void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
194                            MachineIRBuilder &B, GISelChangeObserver &Observer,
195                            std::pair<uint64_t, uint64_t> &MatchInfo) {
196   // Change:
197   //
198   //  %g = G_GLOBAL_VALUE @x
199   //  %ptr1 = G_PTR_ADD %g, cst1
200   //  %ptr2 = G_PTR_ADD %g, cst2
201   //  ...
202   //  %ptrN = G_PTR_ADD %g, cstN
203   //
204   // To:
205   //
206   //  %offset_g = G_GLOBAL_VALUE @x + min_cst
207   //  %g = G_PTR_ADD %offset_g, -min_cst
208   //  %ptr1 = G_PTR_ADD %g, cst1
209   //  ...
210   //  %ptrN = G_PTR_ADD %g, cstN
211   //
212   // Then, the original G_PTR_ADDs should be folded later on so that they look
213   // like this:
214   //
215   //  %ptrN = G_PTR_ADD %offset_g, cstN - min_cst
216   uint64_t Offset, MinOffset;
217   std::tie(Offset, MinOffset) = MatchInfo;
218   B.setInstrAndDebugLoc(MI);
219   Observer.changingInstr(MI);
220   auto &GlobalOp = MI.getOperand(1);
221   auto *GV = GlobalOp.getGlobal();
222   GlobalOp.ChangeToGA(GV, Offset, GlobalOp.getTargetFlags());
223   Register Dst = MI.getOperand(0).getReg();
224   Register NewGVDst = MRI.cloneVirtualRegister(Dst);
225   MI.getOperand(0).setReg(NewGVDst);
226   Observer.changedInstr(MI);
227   B.buildPtrAdd(
228       Dst, NewGVDst,
229       B.buildConstant(LLT::scalar(64), -static_cast<int64_t>(MinOffset)));
230 }
231 
232 bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
233                         CombinerHelper &Helper, GISelChangeObserver &Observer) {
234   // Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if
235   // result is only used in the no-overflow case. It is restricted to cases
236   // where we know that the high-bits of the operands are 0. If there's an
237   // overflow, then the the 9th or 17th bit must be set, which can be checked
238   // using TBNZ.
239   //
240   // Change (for UADDOs on 8 and 16 bits):
241   //
242   //   %z0 = G_ASSERT_ZEXT _
243   //   %op0 = G_TRUNC %z0
244   //   %z1 = G_ASSERT_ZEXT _
245   //   %op1 = G_TRUNC %z1
246   //   %val, %cond = G_UADDO %op0, %op1
247   //   G_BRCOND %cond, %error.bb
248   //
249   // error.bb:
250   //   (no successors and no uses of %val)
251   //
252   // To:
253   //
254   //   %z0 = G_ASSERT_ZEXT _
255   //   %z1 = G_ASSERT_ZEXT _
256   //   %add = G_ADD %z0, %z1
257   //   %val = G_TRUNC %add
258   //   %bit = G_AND %add, 1 << scalar-size-in-bits(%op1)
259   //   %cond = G_ICMP NE, %bit, 0
260   //   G_BRCOND %cond, %error.bb
261 
262   auto &MRI = *B.getMRI();
263 
264   MachineOperand *DefOp0 = MRI.getOneDef(MI.getOperand(2).getReg());
265   MachineOperand *DefOp1 = MRI.getOneDef(MI.getOperand(3).getReg());
266   Register Op0Wide;
267   Register Op1Wide;
268   if (!mi_match(DefOp0->getParent(), MRI, m_GTrunc(m_Reg(Op0Wide))) ||
269       !mi_match(DefOp1->getParent(), MRI, m_GTrunc(m_Reg(Op1Wide))))
270     return false;
271   LLT WideTy0 = MRI.getType(Op0Wide);
272   LLT WideTy1 = MRI.getType(Op1Wide);
273   Register ResVal = MI.getOperand(0).getReg();
274   LLT OpTy = MRI.getType(ResVal);
275   MachineInstr *Op0WideDef = MRI.getVRegDef(Op0Wide);
276   MachineInstr *Op1WideDef = MRI.getVRegDef(Op1Wide);
277 
278   unsigned OpTySize = OpTy.getScalarSizeInBits();
279   // First check that the G_TRUNC feeding the G_UADDO are no-ops, because the
280   // inputs have been zero-extended.
281   if (Op0WideDef->getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
282       Op1WideDef->getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
283       OpTySize != Op0WideDef->getOperand(2).getImm() ||
284       OpTySize != Op1WideDef->getOperand(2).getImm())
285     return false;
286 
287   // Only scalar UADDO with either 8 or 16 bit operands are handled.
288   if (!WideTy0.isScalar() || !WideTy1.isScalar() || WideTy0 != WideTy1 ||
289       OpTySize >= WideTy0.getScalarSizeInBits() ||
290       (OpTySize != 8 && OpTySize != 16))
291     return false;
292 
293   // The overflow-status result must be used by a branch only.
294   Register ResStatus = MI.getOperand(1).getReg();
295   if (!MRI.hasOneNonDBGUse(ResStatus))
296     return false;
297   MachineInstr *CondUser = &*MRI.use_instr_nodbg_begin(ResStatus);
298   if (CondUser->getOpcode() != TargetOpcode::G_BRCOND)
299     return false;
300 
301   // Make sure the computed result is only used in the no-overflow blocks.
302   MachineBasicBlock *CurrentMBB = MI.getParent();
303   MachineBasicBlock *FailMBB = CondUser->getOperand(1).getMBB();
304   if (!FailMBB->succ_empty() || CondUser->getParent() != CurrentMBB)
305     return false;
306   if (any_of(MRI.use_nodbg_instructions(ResVal),
307              [&MI, FailMBB, CurrentMBB](MachineInstr &I) {
308                return &MI != &I &&
309                       (I.getParent() == FailMBB || I.getParent() == CurrentMBB);
310              }))
311     return false;
312 
313   // Remove G_ADDO.
314   B.setInstrAndDebugLoc(*MI.getNextNode());
315   MI.eraseFromParent();
316 
317   // Emit wide add.
318   Register AddDst = MRI.cloneVirtualRegister(Op0Wide);
319   B.buildInstr(TargetOpcode::G_ADD, {AddDst}, {Op0Wide, Op1Wide});
320 
321   // Emit check of the 9th or 17th bit and update users (the branch). This will
322   // later be folded to TBNZ.
323   Register CondBit = MRI.cloneVirtualRegister(Op0Wide);
324   B.buildAnd(
325       CondBit, AddDst,
326       B.buildConstant(LLT::scalar(32), OpTySize == 8 ? 1 << 8 : 1 << 16));
327   B.buildICmp(CmpInst::ICMP_NE, ResStatus, CondBit,
328               B.buildConstant(LLT::scalar(32), 0));
329 
330   // Update ZEXts users of the result value. Because all uses are in the
331   // no-overflow case, we know that the top bits are 0 and we can ignore ZExts.
332   B.buildZExtOrTrunc(ResVal, AddDst);
333   for (MachineOperand &U : make_early_inc_range(MRI.use_operands(ResVal))) {
334     Register WideReg;
335     if (mi_match(U.getParent(), MRI, m_GZExt(m_Reg(WideReg)))) {
336       auto OldR = U.getParent()->getOperand(0).getReg();
337       Observer.erasingInstr(*U.getParent());
338       U.getParent()->eraseFromParent();
339       Helper.replaceRegWith(MRI, OldR, AddDst);
340     }
341   }
342 
343   return true;
344 }
345 
346 class AArch64PreLegalizerCombinerImpl : public GIMatchTableExecutor {
347 protected:
348   CombinerHelper &Helper;
349   const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig;
350 
351   const AArch64Subtarget &STI;
352   GISelChangeObserver &Observer;
353   MachineIRBuilder &B;
354   MachineFunction &MF;
355 
356   MachineRegisterInfo &MRI;
357 
358 public:
359   AArch64PreLegalizerCombinerImpl(
360       const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
361       const AArch64Subtarget &STI, GISelChangeObserver &Observer,
362       MachineIRBuilder &B, CombinerHelper &Helper);
363 
364   static const char *getName() { return "AArch6400PreLegalizerCombiner"; }
365 
366   bool tryCombineAll(MachineInstr &I) const;
367 
368 private:
369 #define GET_GICOMBINER_CLASS_MEMBERS
370 #include "AArch64GenPreLegalizeGICombiner.inc"
371 #undef GET_GICOMBINER_CLASS_MEMBERS
372 };
373 
374 #define GET_GICOMBINER_IMPL
375 #include "AArch64GenPreLegalizeGICombiner.inc"
376 #undef GET_GICOMBINER_IMPL
377 
378 AArch64PreLegalizerCombinerImpl::AArch64PreLegalizerCombinerImpl(
379     const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
380     const AArch64Subtarget &STI, GISelChangeObserver &Observer,
381     MachineIRBuilder &B, CombinerHelper &Helper)
382     : Helper(Helper), RuleConfig(RuleConfig), STI(STI), Observer(Observer),
383       B(B), MF(B.getMF()), MRI(*B.getMRI()),
384 #define GET_GICOMBINER_CONSTRUCTOR_INITS
385 #include "AArch64GenPreLegalizeGICombiner.inc"
386 #undef GET_GICOMBINER_CONSTRUCTOR_INITS
387 {
388 }
389 
390 class AArch64PreLegalizerCombinerInfo : public CombinerInfo {
391   GISelKnownBits *KB;
392   MachineDominatorTree *MDT;
393   AArch64PreLegalizerCombinerImplRuleConfig RuleConfig;
394 
395 public:
396   AArch64PreLegalizerCombinerInfo(bool EnableOpt, bool OptSize, bool MinSize,
397                                   GISelKnownBits *KB, MachineDominatorTree *MDT)
398       : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
399                      /*LegalizerInfo*/ nullptr, EnableOpt, OptSize, MinSize),
400         KB(KB), MDT(MDT) {
401     if (!RuleConfig.parseCommandLineOption())
402       report_fatal_error("Invalid rule identifier");
403   }
404 
405   bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
406                MachineIRBuilder &B) const override;
407 };
408 
409 bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
410                                               MachineInstr &MI,
411                                               MachineIRBuilder &B) const {
412   const auto &STI = MI.getMF()->getSubtarget<AArch64Subtarget>();
413   const auto *LI = STI.getLegalizerInfo();
414   CombinerHelper Helper(Observer, B, /* IsPreLegalize*/ true, KB, MDT, LI);
415   AArch64PreLegalizerCombinerImpl Impl(RuleConfig, STI, Observer, B, Helper);
416   Impl.setupMF(*MI.getMF(), KB);
417 
418   if (Impl.tryCombineAll(MI))
419     return true;
420 
421   unsigned Opc = MI.getOpcode();
422   switch (Opc) {
423   case TargetOpcode::G_CONCAT_VECTORS:
424     return Helper.tryCombineConcatVectors(MI);
425   case TargetOpcode::G_SHUFFLE_VECTOR:
426     return Helper.tryCombineShuffleVector(MI);
427   case TargetOpcode::G_UADDO:
428     return tryToSimplifyUADDO(MI, B, Helper, Observer);
429   case TargetOpcode::G_MEMCPY_INLINE:
430     return Helper.tryEmitMemcpyInline(MI);
431   case TargetOpcode::G_MEMCPY:
432   case TargetOpcode::G_MEMMOVE:
433   case TargetOpcode::G_MEMSET: {
434     // If we're at -O0 set a maxlen of 32 to inline, otherwise let the other
435     // heuristics decide.
436     unsigned MaxLen = EnableOpt ? 0 : 32;
437     // Try to inline memcpy type calls if optimizations are enabled.
438     if (Helper.tryCombineMemCpyFamily(MI, MaxLen))
439       return true;
440     if (Opc == TargetOpcode::G_MEMSET)
441       return llvm::AArch64GISelUtils::tryEmitBZero(MI, B, EnableMinSize);
442     return false;
443   }
444   }
445 
446   return false;
447 }
448 
449 // Pass boilerplate
450 // ================
451 
452 class AArch64PreLegalizerCombiner : public MachineFunctionPass {
453 public:
454   static char ID;
455 
456   AArch64PreLegalizerCombiner();
457 
458   StringRef getPassName() const override {
459     return "AArch64PreLegalizerCombiner";
460   }
461 
462   bool runOnMachineFunction(MachineFunction &MF) override;
463 
464   void getAnalysisUsage(AnalysisUsage &AU) const override;
465 };
466 } // end anonymous namespace
467 
468 void AArch64PreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
469   AU.addRequired<TargetPassConfig>();
470   AU.setPreservesCFG();
471   getSelectionDAGFallbackAnalysisUsage(AU);
472   AU.addRequired<GISelKnownBitsAnalysis>();
473   AU.addPreserved<GISelKnownBitsAnalysis>();
474   AU.addRequired<MachineDominatorTree>();
475   AU.addPreserved<MachineDominatorTree>();
476   AU.addRequired<GISelCSEAnalysisWrapperPass>();
477   AU.addPreserved<GISelCSEAnalysisWrapperPass>();
478   MachineFunctionPass::getAnalysisUsage(AU);
479 }
480 
481 AArch64PreLegalizerCombiner::AArch64PreLegalizerCombiner()
482     : MachineFunctionPass(ID) {
483   initializeAArch64PreLegalizerCombinerPass(*PassRegistry::getPassRegistry());
484 }
485 
486 bool AArch64PreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {
487   if (MF.getProperties().hasProperty(
488           MachineFunctionProperties::Property::FailedISel))
489     return false;
490   auto &TPC = getAnalysis<TargetPassConfig>();
491 
492   // Enable CSE.
493   GISelCSEAnalysisWrapper &Wrapper =
494       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
495   auto *CSEInfo = &Wrapper.get(TPC.getCSEConfig());
496 
497   const Function &F = MF.getFunction();
498   bool EnableOpt =
499       MF.getTarget().getOptLevel() != CodeGenOpt::None && !skipFunction(F);
500   GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
501   MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
502   AArch64PreLegalizerCombinerInfo PCInfo(EnableOpt, F.hasOptSize(),
503                                          F.hasMinSize(), KB, MDT);
504   Combiner C(PCInfo, &TPC);
505   return C.combineMachineInstrs(MF, CSEInfo);
506 }
507 
508 char AArch64PreLegalizerCombiner::ID = 0;
509 INITIALIZE_PASS_BEGIN(AArch64PreLegalizerCombiner, DEBUG_TYPE,
510                       "Combine AArch64 machine instrs before legalization",
511                       false, false)
512 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
513 INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
514 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
515 INITIALIZE_PASS_END(AArch64PreLegalizerCombiner, DEBUG_TYPE,
516                     "Combine AArch64 machine instrs before legalization", false,
517                     false)
518 
519 namespace llvm {
520 FunctionPass *createAArch64PreLegalizerCombiner() {
521   return new AArch64PreLegalizerCombiner();
522 }
523 } // end namespace llvm
524