xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp (revision 525fe93dc7487a1e63a90f6a2b956abc601963c1)
1 //===- AArch64GlobalISelUtils.cpp --------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file Implementations of AArch64-specific helper functions used in the
9 /// GlobalISel pipeline.
10 //===----------------------------------------------------------------------===//
11 #include "AArch64GlobalISelUtils.h"
12 #include "AArch64InstrInfo.h"
13 #include "llvm/CodeGen/GlobalISel/Utils.h"
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/IR/InstrTypes.h"
16 #include "llvm/Support/raw_ostream.h"
17 
18 using namespace llvm;
19 
20 std::optional<RegOrConstant>
21 AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
22                                          const MachineRegisterInfo &MRI) {
23   if (auto Splat = getVectorSplat(MI, MRI))
24     return Splat;
25   if (MI.getOpcode() != AArch64::G_DUP)
26     return std::nullopt;
27   Register Src = MI.getOperand(1).getReg();
28   if (auto ValAndVReg =
29           getAnyConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI))
30     return RegOrConstant(ValAndVReg->Value.getSExtValue());
31   return RegOrConstant(Src);
32 }
33 
34 std::optional<int64_t>
35 AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI,
36                                                const MachineRegisterInfo &MRI) {
37   auto Splat = getAArch64VectorSplat(MI, MRI);
38   if (!Splat || Splat->isReg())
39     return std::nullopt;
40   return Splat->getCst();
41 }
42 
43 bool AArch64GISelUtils::isCMN(const MachineInstr *MaybeSub,
44                               const CmpInst::Predicate &Pred,
45                               const MachineRegisterInfo &MRI) {
46   // Match:
47   //
48   // %sub = G_SUB 0, %y
49   // %cmp = G_ICMP eq/ne, %sub, %z
50   //
51   // Or
52   //
53   // %sub = G_SUB 0, %y
54   // %cmp = G_ICMP eq/ne, %z, %sub
55   if (!MaybeSub || MaybeSub->getOpcode() != TargetOpcode::G_SUB ||
56       !CmpInst::isEquality(Pred))
57     return false;
58   auto MaybeZero =
59       getIConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI);
60   return MaybeZero && MaybeZero->Value.getZExtValue() == 0;
61 }
62 
63 bool AArch64GISelUtils::tryEmitBZero(MachineInstr &MI,
64                                      MachineIRBuilder &MIRBuilder,
65                                      bool MinSize) {
66   assert(MI.getOpcode() == TargetOpcode::G_MEMSET);
67   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
68   auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
69   if (!TLI.getLibcallName(RTLIB::BZERO))
70     return false;
71   auto Zero =
72       getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI);
73   if (!Zero || Zero->Value.getSExtValue() != 0)
74     return false;
75 
76   // It's not faster to use bzero rather than memset for sizes <= 256.
77   // However, it *does* save us a mov from wzr, so if we're going for
78   // minsize, use bzero even if it's slower.
79   if (!MinSize) {
80     // If the size is known, check it. If it is not known, assume using bzero is
81     // better.
82     if (auto Size = getIConstantVRegValWithLookThrough(
83             MI.getOperand(2).getReg(), MRI)) {
84       if (Size->Value.getSExtValue() <= 256)
85         return false;
86     }
87   }
88 
89   MIRBuilder.setInstrAndDebugLoc(MI);
90   MIRBuilder
91       .buildInstr(TargetOpcode::G_BZERO, {},
92                   {MI.getOperand(0), MI.getOperand(2)})
93       .addImm(MI.getOperand(3).getImm())
94       .addMemOperand(*MI.memoperands_begin());
95   MI.eraseFromParent();
96   return true;
97 }
98 
99 void AArch64GISelUtils::changeFCMPPredToAArch64CC(
100     const CmpInst::Predicate P, AArch64CC::CondCode &CondCode,
101     AArch64CC::CondCode &CondCode2) {
102   CondCode2 = AArch64CC::AL;
103   switch (P) {
104   default:
105     llvm_unreachable("Unknown FP condition!");
106   case CmpInst::FCMP_OEQ:
107     CondCode = AArch64CC::EQ;
108     break;
109   case CmpInst::FCMP_OGT:
110     CondCode = AArch64CC::GT;
111     break;
112   case CmpInst::FCMP_OGE:
113     CondCode = AArch64CC::GE;
114     break;
115   case CmpInst::FCMP_OLT:
116     CondCode = AArch64CC::MI;
117     break;
118   case CmpInst::FCMP_OLE:
119     CondCode = AArch64CC::LS;
120     break;
121   case CmpInst::FCMP_ONE:
122     CondCode = AArch64CC::MI;
123     CondCode2 = AArch64CC::GT;
124     break;
125   case CmpInst::FCMP_ORD:
126     CondCode = AArch64CC::VC;
127     break;
128   case CmpInst::FCMP_UNO:
129     CondCode = AArch64CC::VS;
130     break;
131   case CmpInst::FCMP_UEQ:
132     CondCode = AArch64CC::EQ;
133     CondCode2 = AArch64CC::VS;
134     break;
135   case CmpInst::FCMP_UGT:
136     CondCode = AArch64CC::HI;
137     break;
138   case CmpInst::FCMP_UGE:
139     CondCode = AArch64CC::PL;
140     break;
141   case CmpInst::FCMP_ULT:
142     CondCode = AArch64CC::LT;
143     break;
144   case CmpInst::FCMP_ULE:
145     CondCode = AArch64CC::LE;
146     break;
147   case CmpInst::FCMP_UNE:
148     CondCode = AArch64CC::NE;
149     break;
150   }
151 }
152 
153 void AArch64GISelUtils::changeVectorFCMPPredToAArch64CC(
154     const CmpInst::Predicate P, AArch64CC::CondCode &CondCode,
155     AArch64CC::CondCode &CondCode2, bool &Invert) {
156   Invert = false;
157   switch (P) {
158   default:
159     // Mostly the scalar mappings work fine.
160     changeFCMPPredToAArch64CC(P, CondCode, CondCode2);
161     break;
162   case CmpInst::FCMP_UNO:
163     Invert = true;
164     [[fallthrough]];
165   case CmpInst::FCMP_ORD:
166     CondCode = AArch64CC::MI;
167     CondCode2 = AArch64CC::GE;
168     break;
169   case CmpInst::FCMP_UEQ:
170   case CmpInst::FCMP_ULT:
171   case CmpInst::FCMP_ULE:
172   case CmpInst::FCMP_UGT:
173   case CmpInst::FCMP_UGE:
174     // All of the compare-mask comparisons are ordered, but we can switch
175     // between the two by a double inversion. E.g. ULE == !OGT.
176     Invert = true;
177     changeFCMPPredToAArch64CC(CmpInst::getInversePredicate(P), CondCode,
178                               CondCode2);
179     break;
180   }
181 }
182