1e8d8bef9SDimitry Andric //=== AArch64PostSelectOptimize.cpp ---------------------------------------===//
2e8d8bef9SDimitry Andric //
3e8d8bef9SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e8d8bef9SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5e8d8bef9SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e8d8bef9SDimitry Andric //
7e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===//
8e8d8bef9SDimitry Andric //
9e8d8bef9SDimitry Andric // This pass does post-instruction-selection optimizations in the GlobalISel
10e8d8bef9SDimitry Andric // pipeline, before the rest of codegen runs.
11e8d8bef9SDimitry Andric //
12e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===//
13e8d8bef9SDimitry Andric
14e8d8bef9SDimitry Andric #include "AArch64.h"
15e8d8bef9SDimitry Andric #include "AArch64TargetMachine.h"
16e8d8bef9SDimitry Andric #include "MCTargetDesc/AArch64MCTargetDesc.h"
17bdd1243dSDimitry Andric #include "llvm/ADT/STLExtras.h"
18fe6060f1SDimitry Andric #include "llvm/CodeGen/GlobalISel/Utils.h"
19e8d8bef9SDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h"
20e8d8bef9SDimitry Andric #include "llvm/CodeGen/MachineFunctionPass.h"
21bdd1243dSDimitry Andric #include "llvm/CodeGen/MachineInstr.h"
22e8d8bef9SDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
23e8d8bef9SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
24e8d8bef9SDimitry Andric #include "llvm/Support/Debug.h"
25bdd1243dSDimitry Andric #include "llvm/Support/ErrorHandling.h"
26e8d8bef9SDimitry Andric
27e8d8bef9SDimitry Andric #define DEBUG_TYPE "aarch64-post-select-optimize"
28e8d8bef9SDimitry Andric
29e8d8bef9SDimitry Andric using namespace llvm;
30e8d8bef9SDimitry Andric
31e8d8bef9SDimitry Andric namespace {
32e8d8bef9SDimitry Andric class AArch64PostSelectOptimize : public MachineFunctionPass {
33e8d8bef9SDimitry Andric public:
34e8d8bef9SDimitry Andric static char ID;
35e8d8bef9SDimitry Andric
36e8d8bef9SDimitry Andric AArch64PostSelectOptimize();
37e8d8bef9SDimitry Andric
getPassName() const38e8d8bef9SDimitry Andric StringRef getPassName() const override {
39e8d8bef9SDimitry Andric return "AArch64 Post Select Optimizer";
40e8d8bef9SDimitry Andric }
41e8d8bef9SDimitry Andric
42e8d8bef9SDimitry Andric bool runOnMachineFunction(MachineFunction &MF) override;
43e8d8bef9SDimitry Andric
44e8d8bef9SDimitry Andric void getAnalysisUsage(AnalysisUsage &AU) const override;
45e8d8bef9SDimitry Andric
46e8d8bef9SDimitry Andric private:
47e8d8bef9SDimitry Andric bool optimizeNZCVDefs(MachineBasicBlock &MBB);
48bdd1243dSDimitry Andric bool doPeepholeOpts(MachineBasicBlock &MBB);
49bdd1243dSDimitry Andric /// Look for cross regclass copies that can be trivially eliminated.
50bdd1243dSDimitry Andric bool foldSimpleCrossClassCopies(MachineInstr &MI);
51*0fca6ea1SDimitry Andric bool foldCopyDup(MachineInstr &MI);
52e8d8bef9SDimitry Andric };
53e8d8bef9SDimitry Andric } // end anonymous namespace
54e8d8bef9SDimitry Andric
getAnalysisUsage(AnalysisUsage & AU) const55e8d8bef9SDimitry Andric void AArch64PostSelectOptimize::getAnalysisUsage(AnalysisUsage &AU) const {
56e8d8bef9SDimitry Andric AU.addRequired<TargetPassConfig>();
57e8d8bef9SDimitry Andric AU.setPreservesCFG();
58e8d8bef9SDimitry Andric getSelectionDAGFallbackAnalysisUsage(AU);
59e8d8bef9SDimitry Andric MachineFunctionPass::getAnalysisUsage(AU);
60e8d8bef9SDimitry Andric }
61e8d8bef9SDimitry Andric
AArch64PostSelectOptimize()62e8d8bef9SDimitry Andric AArch64PostSelectOptimize::AArch64PostSelectOptimize()
63e8d8bef9SDimitry Andric : MachineFunctionPass(ID) {
64e8d8bef9SDimitry Andric initializeAArch64PostSelectOptimizePass(*PassRegistry::getPassRegistry());
65e8d8bef9SDimitry Andric }
66e8d8bef9SDimitry Andric
getNonFlagSettingVariant(unsigned Opc)67e8d8bef9SDimitry Andric unsigned getNonFlagSettingVariant(unsigned Opc) {
68e8d8bef9SDimitry Andric switch (Opc) {
69e8d8bef9SDimitry Andric default:
70e8d8bef9SDimitry Andric return 0;
71e8d8bef9SDimitry Andric case AArch64::SUBSXrr:
72e8d8bef9SDimitry Andric return AArch64::SUBXrr;
73e8d8bef9SDimitry Andric case AArch64::SUBSWrr:
74e8d8bef9SDimitry Andric return AArch64::SUBWrr;
75e8d8bef9SDimitry Andric case AArch64::SUBSXrs:
76e8d8bef9SDimitry Andric return AArch64::SUBXrs;
7706c3fb27SDimitry Andric case AArch64::SUBSWrs:
7806c3fb27SDimitry Andric return AArch64::SUBWrs;
79e8d8bef9SDimitry Andric case AArch64::SUBSXri:
80e8d8bef9SDimitry Andric return AArch64::SUBXri;
81e8d8bef9SDimitry Andric case AArch64::SUBSWri:
82e8d8bef9SDimitry Andric return AArch64::SUBWri;
8306c3fb27SDimitry Andric case AArch64::ADDSXrr:
8406c3fb27SDimitry Andric return AArch64::ADDXrr;
8506c3fb27SDimitry Andric case AArch64::ADDSWrr:
8606c3fb27SDimitry Andric return AArch64::ADDWrr;
8706c3fb27SDimitry Andric case AArch64::ADDSXrs:
8806c3fb27SDimitry Andric return AArch64::ADDXrs;
8906c3fb27SDimitry Andric case AArch64::ADDSWrs:
9006c3fb27SDimitry Andric return AArch64::ADDWrs;
9106c3fb27SDimitry Andric case AArch64::ADDSXri:
9206c3fb27SDimitry Andric return AArch64::ADDXri;
9306c3fb27SDimitry Andric case AArch64::ADDSWri:
9406c3fb27SDimitry Andric return AArch64::ADDWri;
9506c3fb27SDimitry Andric case AArch64::SBCSXr:
9606c3fb27SDimitry Andric return AArch64::SBCXr;
9706c3fb27SDimitry Andric case AArch64::SBCSWr:
9806c3fb27SDimitry Andric return AArch64::SBCWr;
9906c3fb27SDimitry Andric case AArch64::ADCSXr:
10006c3fb27SDimitry Andric return AArch64::ADCXr;
10106c3fb27SDimitry Andric case AArch64::ADCSWr:
10206c3fb27SDimitry Andric return AArch64::ADCWr;
103e8d8bef9SDimitry Andric }
104e8d8bef9SDimitry Andric }
105e8d8bef9SDimitry Andric
doPeepholeOpts(MachineBasicBlock & MBB)106bdd1243dSDimitry Andric bool AArch64PostSelectOptimize::doPeepholeOpts(MachineBasicBlock &MBB) {
107bdd1243dSDimitry Andric bool Changed = false;
108bdd1243dSDimitry Andric for (auto &MI : make_early_inc_range(make_range(MBB.begin(), MBB.end()))) {
109*0fca6ea1SDimitry Andric bool CurrentIterChanged = foldSimpleCrossClassCopies(MI);
110*0fca6ea1SDimitry Andric if (!CurrentIterChanged)
111*0fca6ea1SDimitry Andric CurrentIterChanged |= foldCopyDup(MI);
112*0fca6ea1SDimitry Andric Changed |= CurrentIterChanged;
113bdd1243dSDimitry Andric }
114bdd1243dSDimitry Andric return Changed;
115bdd1243dSDimitry Andric }
116bdd1243dSDimitry Andric
foldSimpleCrossClassCopies(MachineInstr & MI)117bdd1243dSDimitry Andric bool AArch64PostSelectOptimize::foldSimpleCrossClassCopies(MachineInstr &MI) {
118bdd1243dSDimitry Andric auto *MF = MI.getMF();
119bdd1243dSDimitry Andric auto &MRI = MF->getRegInfo();
120bdd1243dSDimitry Andric
121bdd1243dSDimitry Andric if (!MI.isCopy())
122bdd1243dSDimitry Andric return false;
123bdd1243dSDimitry Andric
124bdd1243dSDimitry Andric if (MI.getOperand(1).getSubReg())
125bdd1243dSDimitry Andric return false; // Don't deal with subreg copies
126bdd1243dSDimitry Andric
127bdd1243dSDimitry Andric Register Src = MI.getOperand(1).getReg();
128bdd1243dSDimitry Andric Register Dst = MI.getOperand(0).getReg();
129bdd1243dSDimitry Andric
130bdd1243dSDimitry Andric if (Src.isPhysical() || Dst.isPhysical())
131bdd1243dSDimitry Andric return false;
132bdd1243dSDimitry Andric
133bdd1243dSDimitry Andric const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
134bdd1243dSDimitry Andric const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
135bdd1243dSDimitry Andric
136bdd1243dSDimitry Andric if (SrcRC == DstRC)
137bdd1243dSDimitry Andric return false;
138bdd1243dSDimitry Andric
139bdd1243dSDimitry Andric
140bdd1243dSDimitry Andric if (SrcRC->hasSubClass(DstRC)) {
141bdd1243dSDimitry Andric // This is the case where the source class is a superclass of the dest, so
142bdd1243dSDimitry Andric // if the copy is the only user of the source, we can just constrain the
143bdd1243dSDimitry Andric // source reg to the dest class.
144bdd1243dSDimitry Andric
145bdd1243dSDimitry Andric if (!MRI.hasOneNonDBGUse(Src))
146bdd1243dSDimitry Andric return false; // Only constrain single uses of the source.
147bdd1243dSDimitry Andric
148bdd1243dSDimitry Andric // Constrain to dst reg class as long as it's not a weird class that only
149bdd1243dSDimitry Andric // has a few registers.
150bdd1243dSDimitry Andric if (!MRI.constrainRegClass(Src, DstRC, /* MinNumRegs */ 25))
151bdd1243dSDimitry Andric return false;
152bdd1243dSDimitry Andric } else if (DstRC->hasSubClass(SrcRC)) {
153bdd1243dSDimitry Andric // This is the inverse case, where the destination class is a superclass of
154bdd1243dSDimitry Andric // the source. Here, if the copy is the only user, we can just constrain
155bdd1243dSDimitry Andric // the user of the copy to use the smaller class of the source.
156bdd1243dSDimitry Andric } else {
157bdd1243dSDimitry Andric return false;
158bdd1243dSDimitry Andric }
159bdd1243dSDimitry Andric
160bdd1243dSDimitry Andric MRI.replaceRegWith(Dst, Src);
161bdd1243dSDimitry Andric MI.eraseFromParent();
162bdd1243dSDimitry Andric return true;
163bdd1243dSDimitry Andric }
164bdd1243dSDimitry Andric
foldCopyDup(MachineInstr & MI)165*0fca6ea1SDimitry Andric bool AArch64PostSelectOptimize::foldCopyDup(MachineInstr &MI) {
166*0fca6ea1SDimitry Andric if (!MI.isCopy())
167*0fca6ea1SDimitry Andric return false;
168*0fca6ea1SDimitry Andric
169*0fca6ea1SDimitry Andric auto *MF = MI.getMF();
170*0fca6ea1SDimitry Andric auto &MRI = MF->getRegInfo();
171*0fca6ea1SDimitry Andric auto *TII = MF->getSubtarget().getInstrInfo();
172*0fca6ea1SDimitry Andric
173*0fca6ea1SDimitry Andric // Optimize COPY(y:GPR, DUP(x:FPR, i)) -> UMOV(y:GPR, x:FPR, i).
174*0fca6ea1SDimitry Andric // Here Dst is y and Src is the result of DUP.
175*0fca6ea1SDimitry Andric Register Dst = MI.getOperand(0).getReg();
176*0fca6ea1SDimitry Andric Register Src = MI.getOperand(1).getReg();
177*0fca6ea1SDimitry Andric
178*0fca6ea1SDimitry Andric if (!Dst.isVirtual() || !Src.isVirtual())
179*0fca6ea1SDimitry Andric return false;
180*0fca6ea1SDimitry Andric
181*0fca6ea1SDimitry Andric auto TryMatchDUP = [&](const TargetRegisterClass *GPRRegClass,
182*0fca6ea1SDimitry Andric const TargetRegisterClass *FPRRegClass, unsigned DUP,
183*0fca6ea1SDimitry Andric unsigned UMOV) {
184*0fca6ea1SDimitry Andric if (MRI.getRegClassOrNull(Dst) != GPRRegClass ||
185*0fca6ea1SDimitry Andric MRI.getRegClassOrNull(Src) != FPRRegClass)
186*0fca6ea1SDimitry Andric return false;
187*0fca6ea1SDimitry Andric
188*0fca6ea1SDimitry Andric // There is a special case when one of the uses is COPY(z:FPR, y:GPR).
189*0fca6ea1SDimitry Andric // In this case, we get COPY(z:FPR, COPY(y:GPR, DUP(x:FPR, i))), which can
190*0fca6ea1SDimitry Andric // be folded by peephole-opt into just DUP(z:FPR, i), so this transform is
191*0fca6ea1SDimitry Andric // not worthwhile in that case.
192*0fca6ea1SDimitry Andric for (auto &Use : MRI.use_nodbg_instructions(Dst)) {
193*0fca6ea1SDimitry Andric if (!Use.isCopy())
194*0fca6ea1SDimitry Andric continue;
195*0fca6ea1SDimitry Andric
196*0fca6ea1SDimitry Andric Register UseOp0 = Use.getOperand(0).getReg();
197*0fca6ea1SDimitry Andric Register UseOp1 = Use.getOperand(1).getReg();
198*0fca6ea1SDimitry Andric if (UseOp0.isPhysical() || UseOp1.isPhysical())
199*0fca6ea1SDimitry Andric return false;
200*0fca6ea1SDimitry Andric
201*0fca6ea1SDimitry Andric if (MRI.getRegClassOrNull(UseOp0) == FPRRegClass &&
202*0fca6ea1SDimitry Andric MRI.getRegClassOrNull(UseOp1) == GPRRegClass)
203*0fca6ea1SDimitry Andric return false;
204*0fca6ea1SDimitry Andric }
205*0fca6ea1SDimitry Andric
206*0fca6ea1SDimitry Andric MachineInstr *SrcMI = MRI.getUniqueVRegDef(Src);
207*0fca6ea1SDimitry Andric if (!SrcMI || SrcMI->getOpcode() != DUP || !MRI.hasOneNonDBGUse(Src))
208*0fca6ea1SDimitry Andric return false;
209*0fca6ea1SDimitry Andric
210*0fca6ea1SDimitry Andric Register DupSrc = SrcMI->getOperand(1).getReg();
211*0fca6ea1SDimitry Andric int64_t DupImm = SrcMI->getOperand(2).getImm();
212*0fca6ea1SDimitry Andric
213*0fca6ea1SDimitry Andric BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(UMOV), Dst)
214*0fca6ea1SDimitry Andric .addReg(DupSrc)
215*0fca6ea1SDimitry Andric .addImm(DupImm);
216*0fca6ea1SDimitry Andric SrcMI->eraseFromParent();
217*0fca6ea1SDimitry Andric MI.eraseFromParent();
218*0fca6ea1SDimitry Andric return true;
219*0fca6ea1SDimitry Andric };
220*0fca6ea1SDimitry Andric
221*0fca6ea1SDimitry Andric return TryMatchDUP(&AArch64::GPR32RegClass, &AArch64::FPR32RegClass,
222*0fca6ea1SDimitry Andric AArch64::DUPi32, AArch64::UMOVvi32) ||
223*0fca6ea1SDimitry Andric TryMatchDUP(&AArch64::GPR64RegClass, &AArch64::FPR64RegClass,
224*0fca6ea1SDimitry Andric AArch64::DUPi64, AArch64::UMOVvi64);
225*0fca6ea1SDimitry Andric }
226*0fca6ea1SDimitry Andric
optimizeNZCVDefs(MachineBasicBlock & MBB)227e8d8bef9SDimitry Andric bool AArch64PostSelectOptimize::optimizeNZCVDefs(MachineBasicBlock &MBB) {
22806c3fb27SDimitry Andric // If we find a dead NZCV implicit-def, we
22906c3fb27SDimitry Andric // - try to convert the operation to a non-flag-setting equivalent
23006c3fb27SDimitry Andric // - or mark the def as dead to aid later peephole optimizations.
23106c3fb27SDimitry Andric
23206c3fb27SDimitry Andric // Use cases:
23306c3fb27SDimitry Andric // 1)
234e8d8bef9SDimitry Andric // Consider the following code:
235e8d8bef9SDimitry Andric // FCMPSrr %0, %1, implicit-def $nzcv
236e8d8bef9SDimitry Andric // %sel1:gpr32 = CSELWr %_, %_, 12, implicit $nzcv
237e8d8bef9SDimitry Andric // %sub:gpr32 = SUBSWrr %_, %_, implicit-def $nzcv
238e8d8bef9SDimitry Andric // FCMPSrr %0, %1, implicit-def $nzcv
239e8d8bef9SDimitry Andric // %sel2:gpr32 = CSELWr %_, %_, 12, implicit $nzcv
240e8d8bef9SDimitry Andric // This kind of code where we have 2 FCMPs each feeding a CSEL can happen
241e8d8bef9SDimitry Andric // when we have a single IR fcmp being used by two selects. During selection,
242e8d8bef9SDimitry Andric // to ensure that there can be no clobbering of nzcv between the fcmp and the
243e8d8bef9SDimitry Andric // csel, we have to generate an fcmp immediately before each csel is
244e8d8bef9SDimitry Andric // selected.
245e8d8bef9SDimitry Andric // However, often we can essentially CSE these together later in MachineCSE.
246e8d8bef9SDimitry Andric // This doesn't work though if there are unrelated flag-setting instructions
247e8d8bef9SDimitry Andric // in between the two FCMPs. In this case, the SUBS defines NZCV
248e8d8bef9SDimitry Andric // but it doesn't have any users, being overwritten by the second FCMP.
249e8d8bef9SDimitry Andric //
25006c3fb27SDimitry Andric // 2)
25106c3fb27SDimitry Andric // The instruction selector always emits the flag-setting variant of ADC/SBC
25206c3fb27SDimitry Andric // while selecting G_UADDE/G_SADDE/G_USUBE/G_SSUBE. If the carry-out of these
25306c3fb27SDimitry Andric // instructions is never used, we can switch to the non-flag-setting variant.
25406c3fb27SDimitry Andric
255e8d8bef9SDimitry Andric bool Changed = false;
256fe6060f1SDimitry Andric auto &MF = *MBB.getParent();
257fe6060f1SDimitry Andric auto &Subtarget = MF.getSubtarget();
258fe6060f1SDimitry Andric const auto &TII = Subtarget.getInstrInfo();
259fe6060f1SDimitry Andric auto TRI = Subtarget.getRegisterInfo();
260fe6060f1SDimitry Andric auto RBI = Subtarget.getRegBankInfo();
261fe6060f1SDimitry Andric auto &MRI = MF.getRegInfo();
262e8d8bef9SDimitry Andric
263e8d8bef9SDimitry Andric LiveRegUnits LRU(*MBB.getParent()->getSubtarget().getRegisterInfo());
264e8d8bef9SDimitry Andric LRU.addLiveOuts(MBB);
26506c3fb27SDimitry Andric
266e8d8bef9SDimitry Andric for (auto &II : instructionsWithoutDebug(MBB.rbegin(), MBB.rend())) {
26706c3fb27SDimitry Andric bool NZCVDead = LRU.available(AArch64::NZCV);
268*0fca6ea1SDimitry Andric if (NZCVDead && II.definesRegister(AArch64::NZCV, /*TRI=*/nullptr)) {
26906c3fb27SDimitry Andric // The instruction defines NZCV, but NZCV is dead.
270e8d8bef9SDimitry Andric unsigned NewOpc = getNonFlagSettingVariant(II.getOpcode());
271*0fca6ea1SDimitry Andric int DeadNZCVIdx =
272*0fca6ea1SDimitry Andric II.findRegisterDefOperandIdx(AArch64::NZCV, /*TRI=*/nullptr);
273e8d8bef9SDimitry Andric if (DeadNZCVIdx != -1) {
27406c3fb27SDimitry Andric if (NewOpc) {
27506c3fb27SDimitry Andric // If there is an equivalent non-flag-setting op, we convert.
276e8d8bef9SDimitry Andric LLVM_DEBUG(dbgs() << "Post-select optimizer: converting flag-setting "
27706c3fb27SDimitry Andric "op: "
278e8d8bef9SDimitry Andric << II);
279e8d8bef9SDimitry Andric II.setDesc(TII->get(NewOpc));
28081ad6265SDimitry Andric II.removeOperand(DeadNZCVIdx);
281fe6060f1SDimitry Andric // Changing the opcode can result in differing regclass requirements,
282fe6060f1SDimitry Andric // e.g. SUBSWri uses gpr32 for the dest, whereas SUBWri uses gpr32sp.
283fe6060f1SDimitry Andric // Constrain the regclasses, possibly introducing a copy.
284fe6060f1SDimitry Andric constrainOperandRegClass(MF, *TRI, MRI, *TII, *RBI, II, II.getDesc(),
285fe6060f1SDimitry Andric II.getOperand(0), 0);
286e8d8bef9SDimitry Andric Changed |= true;
287e8d8bef9SDimitry Andric } else {
288e8d8bef9SDimitry Andric // Otherwise, we just set the nzcv imp-def operand to be dead, so the
289e8d8bef9SDimitry Andric // peephole optimizations can optimize them further.
290e8d8bef9SDimitry Andric II.getOperand(DeadNZCVIdx).setIsDead();
291e8d8bef9SDimitry Andric }
292e8d8bef9SDimitry Andric }
293e8d8bef9SDimitry Andric }
29406c3fb27SDimitry Andric LRU.stepBackward(II);
295e8d8bef9SDimitry Andric }
296e8d8bef9SDimitry Andric return Changed;
297e8d8bef9SDimitry Andric }
298e8d8bef9SDimitry Andric
runOnMachineFunction(MachineFunction & MF)299e8d8bef9SDimitry Andric bool AArch64PostSelectOptimize::runOnMachineFunction(MachineFunction &MF) {
300e8d8bef9SDimitry Andric if (MF.getProperties().hasProperty(
301e8d8bef9SDimitry Andric MachineFunctionProperties::Property::FailedISel))
302e8d8bef9SDimitry Andric return false;
303e8d8bef9SDimitry Andric assert(MF.getProperties().hasProperty(
304e8d8bef9SDimitry Andric MachineFunctionProperties::Property::Selected) &&
305e8d8bef9SDimitry Andric "Expected a selected MF");
306e8d8bef9SDimitry Andric
307e8d8bef9SDimitry Andric bool Changed = false;
308bdd1243dSDimitry Andric for (auto &BB : MF) {
309e8d8bef9SDimitry Andric Changed |= optimizeNZCVDefs(BB);
310bdd1243dSDimitry Andric Changed |= doPeepholeOpts(BB);
311bdd1243dSDimitry Andric }
312fe6060f1SDimitry Andric return Changed;
313e8d8bef9SDimitry Andric }
314e8d8bef9SDimitry Andric
315e8d8bef9SDimitry Andric char AArch64PostSelectOptimize::ID = 0;
316e8d8bef9SDimitry Andric INITIALIZE_PASS_BEGIN(AArch64PostSelectOptimize, DEBUG_TYPE,
317e8d8bef9SDimitry Andric "Optimize AArch64 selected instructions",
318e8d8bef9SDimitry Andric false, false)
319e8d8bef9SDimitry Andric INITIALIZE_PASS_END(AArch64PostSelectOptimize, DEBUG_TYPE,
320e8d8bef9SDimitry Andric "Optimize AArch64 selected instructions", false,
321e8d8bef9SDimitry Andric false)
322e8d8bef9SDimitry Andric
323e8d8bef9SDimitry Andric namespace llvm {
createAArch64PostSelectOptimize()324e8d8bef9SDimitry Andric FunctionPass *createAArch64PostSelectOptimize() {
325e8d8bef9SDimitry Andric return new AArch64PostSelectOptimize();
326e8d8bef9SDimitry Andric }
327e8d8bef9SDimitry Andric } // end namespace llvm
328