xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64Combine.td (revision 07cc7ea7386c5428cef9e8f06d4ebd8144dec311)
1//=- AArch64Combine.td - Define AArch64 Combine Rules ---------*-tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12include "llvm/Target/GlobalISel/Combine.td"
13
14def fconstant_to_constant : GICombineRule<
15  (defs root:$root),
16  (match (wip_match_opcode G_FCONSTANT):$root,
17         [{ return matchFConstantToConstant(*${root}, MRI); }]),
18  (apply [{ applyFConstantToConstant(*${root}); }])>;
19
20def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
21def icmp_redundant_trunc : GICombineRule<
22  (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
23  (match (wip_match_opcode G_ICMP):$root,
24         [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
25  (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
26
27// AArch64-specific offset folding for G_GLOBAL_VALUE.
28def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
29def fold_global_offset : GICombineRule<
30  (defs root:$root, fold_global_offset_matchdata:$matchinfo),
31  (match (wip_match_opcode G_GLOBAL_VALUE):$root,
32          [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
33  (apply [{ applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
34>;
35
36// Boolean: 0 = G_ZEXT, 1 = G_SEXT
37def ext_addv_to_udot_addv_matchinfo : GIDefMatchData<"std::tuple<Register, Register, bool>">;
38let Predicates = [HasDotProd] in {
39def ext_addv_to_udot_addv : GICombineRule<
40  (defs root:$root, ext_addv_to_udot_addv_matchinfo:$matchinfo),
41  (match (wip_match_opcode G_VECREDUCE_ADD):$root,
42         [{ return matchExtAddvToUdotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
43  (apply [{ applyExtAddvToUdotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
44>;
45}
46
47def ext_uaddv_to_uaddlv_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
48def ext_uaddv_to_uaddlv : GICombineRule<
49  (defs root:$root, ext_uaddv_to_uaddlv_matchinfo:$matchinfo),
50  (match (wip_match_opcode G_VECREDUCE_ADD):$root,
51         [{ return matchExtUaddvToUaddlv(*${root}, MRI, ${matchinfo}); }]),
52  (apply [{ applyExtUaddvToUaddlv(*${root}, MRI, B, Observer, ${matchinfo}); }])
53>;
54
55def AArch64PreLegalizerCombiner: GICombiner<
56  "AArch64PreLegalizerCombinerImpl", [all_combines,
57                                      fconstant_to_constant,
58                                      icmp_redundant_trunc,
59                                      fold_global_offset,
60                                      shuffle_to_extract,
61                                      ext_addv_to_udot_addv,
62                                      ext_uaddv_to_uaddlv]> {
63  let CombineAllMethodName = "tryCombineAllImpl";
64}
65
66def AArch64O0PreLegalizerCombiner: GICombiner<
67  "AArch64O0PreLegalizerCombinerImpl", [optnone_combines]> {
68  let CombineAllMethodName = "tryCombineAllImpl";
69}
70
71// Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
72// target-specific opcode.
73def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
74
75def rev : GICombineRule<
76  (defs root:$root, shuffle_matchdata:$matchinfo),
77  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
78         [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
79  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
80>;
81
82def zip : GICombineRule<
83  (defs root:$root, shuffle_matchdata:$matchinfo),
84  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
85         [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
86  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
87>;
88
89def uzp : GICombineRule<
90  (defs root:$root, shuffle_matchdata:$matchinfo),
91  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
92         [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
93  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
94>;
95
96def dup: GICombineRule <
97  (defs root:$root, shuffle_matchdata:$matchinfo),
98  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
99         [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
100  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
101>;
102
103def trn : GICombineRule<
104  (defs root:$root, shuffle_matchdata:$matchinfo),
105  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
106         [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
107  (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
108>;
109
110def ext: GICombineRule <
111  (defs root:$root, shuffle_matchdata:$matchinfo),
112  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
113         [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
114  (apply [{ applyEXT(*${root}, ${matchinfo}); }])
115>;
116
117def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
118def shuf_to_ins: GICombineRule <
119  (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
120  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
121         [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
122  (apply [{ applyINS(*${root}, MRI, B, ${matchinfo}); }])
123>;
124
125def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
126def vashr_vlshr_imm : GICombineRule<
127  (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
128  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
129          [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
130  (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
131>;
132
133def form_duplane_matchdata :
134  GIDefMatchData<"std::pair<unsigned, int>">;
135def form_duplane : GICombineRule <
136  (defs root:$root, form_duplane_matchdata:$matchinfo),
137  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
138          [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
139  (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
140>;
141
142def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
143                                              form_duplane,
144                                              shuf_to_ins]>;
145
146// Turn G_UNMERGE_VALUES -> G_EXTRACT_VECTOR_ELT's
147def vector_unmerge_lowering : GICombineRule <
148  (defs root:$root),
149  (match (wip_match_opcode G_UNMERGE_VALUES):$root,
150          [{ return matchScalarizeVectorUnmerge(*${root}, MRI); }]),
151  (apply [{ applyScalarizeVectorUnmerge(*${root}, MRI, B); }])
152>;
153
154def adjust_icmp_imm_matchdata :
155  GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
156def adjust_icmp_imm : GICombineRule <
157  (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
158  (match (wip_match_opcode G_ICMP):$root,
159          [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
160  (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
161>;
162
163def swap_icmp_operands : GICombineRule <
164  (defs root:$root),
165  (match (wip_match_opcode G_ICMP):$root,
166          [{ return trySwapICmpOperands(*${root}, MRI); }]),
167  (apply [{ applySwapICmpOperands(*${root}, Observer); }])
168>;
169
170def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
171
172def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
173def extractvecelt_pairwise_add : GICombineRule<
174  (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
175  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
176          [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
177  (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
178>;
179
180def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
181def mul_const : GICombineRule<
182  (defs root:$root, mul_const_matchdata:$matchinfo),
183  (match (wip_match_opcode G_MUL):$root,
184          [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
185  (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
186>;
187
188def lower_mull : GICombineRule<
189  (defs root:$root),
190  (match (wip_match_opcode G_MUL):$root,
191          [{ return matchExtMulToMULL(*${root}, MRI); }]),
192  (apply [{ applyExtMulToMULL(*${root}, MRI, B, Observer); }])
193>;
194
195def build_vector_to_dup : GICombineRule<
196  (defs root:$root),
197  (match (wip_match_opcode G_BUILD_VECTOR):$root,
198          [{ return matchBuildVectorToDup(*${root}, MRI); }]),
199  (apply [{ applyBuildVectorToDup(*${root}, MRI, B); }])
200>;
201
202def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
203
204def lower_vector_fcmp : GICombineRule<
205  (defs root:$root),
206  (match (wip_match_opcode G_FCMP):$root,
207    [{ return matchLowerVectorFCMP(*${root}, MRI, B); }]),
208  (apply [{ applyLowerVectorFCMP(*${root}, MRI, B); }])>;
209
210def form_truncstore_matchdata : GIDefMatchData<"Register">;
211def form_truncstore : GICombineRule<
212  (defs root:$root, form_truncstore_matchdata:$matchinfo),
213  (match (wip_match_opcode G_STORE):$root,
214          [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
215  (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
216>;
217
218def fold_merge_to_zext : GICombineRule<
219  (defs root:$d),
220  (match (wip_match_opcode G_MERGE_VALUES):$d,
221          [{ return matchFoldMergeToZext(*${d}, MRI); }]),
222  (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
223>;
224
225def mutate_anyext_to_zext : GICombineRule<
226  (defs root:$d),
227  (match (wip_match_opcode G_ANYEXT):$d,
228          [{ return matchMutateAnyExtToZExt(*${d}, MRI); }]),
229  (apply [{ applyMutateAnyExtToZExt(*${d}, MRI, B, Observer); }])
230>;
231
232def split_store_zero_128 : GICombineRule<
233  (defs root:$d),
234  (match (wip_match_opcode G_STORE):$d,
235          [{ return matchSplitStoreZero128(*${d}, MRI); }]),
236  (apply [{ applySplitStoreZero128(*${d}, MRI, B, Observer); }])
237>;
238
239def vector_sext_inreg_to_shift : GICombineRule<
240  (defs root:$d),
241  (match (wip_match_opcode G_SEXT_INREG):$d,
242          [{ return matchVectorSextInReg(*${d}, MRI); }]),
243  (apply [{ applyVectorSextInReg(*${d}, MRI, B, Observer); }])
244>;
245
246def unmerge_ext_to_unmerge_matchdata : GIDefMatchData<"Register">;
247def unmerge_ext_to_unmerge : GICombineRule<
248  (defs root:$d, unmerge_ext_to_unmerge_matchdata:$matchinfo),
249  (match (wip_match_opcode G_UNMERGE_VALUES):$d,
250          [{ return matchUnmergeExtToUnmerge(*${d}, MRI, ${matchinfo}); }]),
251  (apply [{ applyUnmergeExtToUnmerge(*${d}, MRI, B, Observer, ${matchinfo}); }])
252>;
253
254def regtriple_matchdata : GIDefMatchData<"std::tuple<Register, Register, Register>">;
255def or_to_bsp: GICombineRule <
256  (defs root:$root, regtriple_matchdata:$matchinfo),
257  (match (wip_match_opcode G_OR):$root,
258         [{ return matchOrToBSP(*${root}, MRI, ${matchinfo}); }]),
259  (apply [{ applyOrToBSP(*${root}, MRI, B, ${matchinfo}); }])
260>;
261
262// Post-legalization combines which should happen at all optimization levels.
263// (E.g. ones that facilitate matching for the selector) For example, matching
264// pseudos.
265def AArch64PostLegalizerLowering
266    : GICombiner<"AArch64PostLegalizerLoweringImpl",
267                       [shuffle_vector_lowering, vashr_vlshr_imm,
268                        icmp_lowering, build_vector_lowering,
269                        lower_vector_fcmp, form_truncstore,
270                        vector_sext_inreg_to_shift,
271                        unmerge_ext_to_unmerge, lower_mull,
272                        vector_unmerge_lowering]> {
273}
274
275// Post-legalization combines which are primarily optimizations.
276def AArch64PostLegalizerCombiner
277    : GICombiner<"AArch64PostLegalizerCombinerImpl",
278                       [copy_prop, combines_for_extload,
279                        combine_indexed_load_store,
280                        sext_trunc_sextload, mutate_anyext_to_zext,
281                        hoist_logic_op_with_same_opcode_hands,
282                        redundant_and, xor_of_and_with_same_reg,
283                        extractvecelt_pairwise_add, redundant_or,
284                        mul_const, redundant_sext_inreg,
285                        form_bitfield_extract, rotate_out_of_range,
286                        icmp_to_true_false_known_bits, merge_unmerge,
287                        select_combines, fold_merge_to_zext,
288                        constant_fold_binops, identity_combines,
289                        ptr_add_immed_chain, overlapping_and,
290                        split_store_zero_128, undef_combines,
291                        select_to_minmax, or_to_bsp]> {
292}
293