xref: /freebsd/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td (revision 963f5dc7a30624e95d72fb7f87b8892651164e46)
1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Declare GlobalISel combine rules and provide mechanisms to opt-out.
10//
11//===----------------------------------------------------------------------===//
12
13// Common base class for GICombineRule and GICombineGroup.
14class GICombine {
15  // See GICombineGroup. We only declare it here to make the tablegen pass
16  // simpler.
17  list<GICombine> Rules = ?;
18}
19
20// A group of combine rules that can be added to a GICombiner or another group.
21class GICombineGroup<list<GICombine> rules> : GICombine {
22  // The rules contained in this group. The rules in a group are flattened into
23  // a single list and sorted into whatever order is most efficient. However,
24  // they will never be re-ordered such that behaviour differs from the
25  // specified order. It is therefore possible to use the order of rules in this
26  // list to describe priorities.
27  let Rules = rules;
28}
29
30class GICombinerHelperArg<string type, string name> {
31  string Type = type;
32  string Name = name;
33}
34
35// Declares a combiner helper class
36class GICombinerHelper<string classname, list<GICombine> rules>
37    : GICombineGroup<rules> {
38  // The class name to use in the generated output.
39  string Classname = classname;
40  // The name of a run-time compiler option that will be generated to disable
41  // specific rules within this combiner.
42  string DisableRuleOption = ?;
43  // The state class to inherit from (if any). The generated helper will inherit
44  // from this class and will forward arguments to its constructors.
45  string StateClass = "";
46  // Any additional arguments that should be appended to the tryCombine*().
47  list<GICombinerHelperArg> AdditionalArguments =
48      [GICombinerHelperArg<"CombinerHelper &", "Helper">];
49}
50class GICombineRule<dag defs, dag match, dag apply> : GICombine {
51  /// Defines the external interface of the match rule. This includes:
52  /// * The names of the root nodes (requires at least one)
53  /// See GIDefKind for details.
54  dag Defs = defs;
55
56  /// Defines the things which must be true for the pattern to match
57  /// See GIMatchKind for details.
58  dag Match = match;
59
60  /// Defines the things which happen after the decision is made to apply a
61  /// combine rule.
62  /// See GIApplyKind for details.
63  dag Apply = apply;
64}
65
66/// The operator at the root of a GICombineRule.Defs dag.
67def defs;
68
69/// All arguments of the defs operator must be subclasses of GIDefKind or
70/// sub-dags whose operator is GIDefKindWithArgs.
71class GIDefKind;
72class GIDefKindWithArgs;
73/// Declare a root node. There must be at least one of these in every combine
74/// rule.
75/// TODO: The plan is to elide `root` definitions and determine it from the DAG
76///       itself with an overide for situations where the usual determination
77///       is incorrect.
78def root : GIDefKind;
79
80/// Declares data that is passed from the match stage to the apply stage.
81class GIDefMatchData<string type> : GIDefKind {
82  /// A C++ type name indicating the storage type.
83  string Type = type;
84}
85
86def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
87def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
88def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
89
90/// The operator at the root of a GICombineRule.Match dag.
91def match;
92/// All arguments of the match operator must be either:
93/// * A subclass of GIMatchKind
94/// * A subclass of GIMatchKindWithArgs
95/// * A subclass of Instruction
96/// * A MIR code block (deprecated)
97/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
98/// in their definitions below.
99/// For the Instruction case, these are collected into a DAG where operand names
100/// that occur multiple times introduce edges.
101class GIMatchKind;
102class GIMatchKindWithArgs;
103
104/// In lieu of having proper macro support. Trivial one-off opcode checks can be
105/// performed with this.
106def wip_match_opcode : GIMatchKindWithArgs;
107
108/// The operator at the root of a GICombineRule.Apply dag.
109def apply;
110/// All arguments of the apply operator must be subclasses of GIApplyKind, or
111/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
112/// (deprecated).
113class GIApplyKind;
114class GIApplyKindWithArgs;
115
116def register_matchinfo: GIDefMatchData<"Register">;
117def int64_matchinfo: GIDefMatchData<"int64_t">;
118def apint_matchinfo : GIDefMatchData<"APInt">;
119def build_fn_matchinfo :
120GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
121
122def copy_prop : GICombineRule<
123  (defs root:$d),
124  (match (COPY $d, $s):$mi,
125         [{ return Helper.matchCombineCopy(*${mi}); }]),
126  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
127
128def extending_loads : GICombineRule<
129  (defs root:$root, extending_load_matchdata:$matchinfo),
130  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
131         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
132  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
133def combines_for_extload: GICombineGroup<[extending_loads]>;
134
135def sext_trunc_sextload : GICombineRule<
136  (defs root:$d),
137  (match (wip_match_opcode G_SEXT_INREG):$d,
138         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
139  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
140
141def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
142def sext_inreg_of_load : GICombineRule<
143  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
144  (match (wip_match_opcode G_SEXT_INREG):$root,
145         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
146  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
147
148def combine_indexed_load_store : GICombineRule<
149  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
150  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
151         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
152  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
153
154def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
155def opt_brcond_by_inverting_cond : GICombineRule<
156  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
157  (match (wip_match_opcode G_BR):$root,
158         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
159  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
160
161def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
162def ptr_add_immed_chain : GICombineRule<
163  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
164  (match (wip_match_opcode G_PTR_ADD):$d,
165         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
166  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
167
168// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
169def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
170def shift_immed_chain : GICombineRule<
171  (defs root:$d, shift_immed_matchdata:$matchinfo),
172  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
173         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
174  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
175
176// Transform shift (logic (shift X, C0), Y), C1
177//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
178def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
179def shift_of_shifted_logic_chain : GICombineRule<
180  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
181  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
182         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
183  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
184
185def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
186def mul_to_shl : GICombineRule<
187  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
188  (match (G_MUL $d, $op1, $op2):$mi,
189         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
190  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
191
192// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
193def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
194def reduce_shl_of_extend : GICombineRule<
195  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
196  (match (G_SHL $dst, $src0, $src1):$mi,
197         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
198  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
199
200// [us]itofp(undef) = 0, because the result value is bounded.
201def undef_to_fp_zero : GICombineRule<
202  (defs root:$root),
203  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
204         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
205  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
206
207def undef_to_int_zero: GICombineRule<
208  (defs root:$root),
209  (match (wip_match_opcode G_AND, G_MUL):$root,
210         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
211  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
212
213def undef_to_negative_one: GICombineRule<
214  (defs root:$root),
215  (match (wip_match_opcode G_OR):$root,
216         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
217  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
218
219def binop_left_undef_to_zero: GICombineRule<
220  (defs root:$root),
221  (match (wip_match_opcode G_SHL):$root,
222         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
223  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
224
225// Instructions where if any source operand is undef, the instruction can be
226// replaced with undef.
227def propagate_undef_any_op: GICombineRule<
228  (defs root:$root),
229  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
230         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
231  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
232
233// Instructions where if all source operands are undef, the instruction can be
234// replaced with undef.
235def propagate_undef_all_ops: GICombineRule<
236  (defs root:$root),
237  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
238          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
239  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
240
241// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
242def propagate_undef_shuffle_mask: GICombineRule<
243  (defs root:$root),
244  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
245         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
246  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
247
248// Fold (cond ? x : x) -> x
249def select_same_val: GICombineRule<
250  (defs root:$root),
251  (match (wip_match_opcode G_SELECT):$root,
252    [{ return Helper.matchSelectSameVal(*${root}); }]),
253  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
254>;
255
256// Fold (undef ? x : y) -> y
257def select_undef_cmp: GICombineRule<
258  (defs root:$root),
259  (match (wip_match_opcode G_SELECT):$root,
260    [{ return Helper.matchUndefSelectCmp(*${root}); }]),
261  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
262>;
263
264// Fold (true ? x : y) -> x
265// Fold (false ? x : y) -> y
266def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
267def select_constant_cmp: GICombineRule<
268  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
269  (match (wip_match_opcode G_SELECT):$root,
270    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
271  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
272>;
273
274// Fold x op 0 -> x
275def right_identity_zero: GICombineRule<
276  (defs root:$root),
277  (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
278                           G_PTR_ADD):$root,
279    [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
280  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
281>;
282
283// Fold x op 1 -> x
284def right_identity_one: GICombineRule<
285  (defs root:$root),
286  (match (wip_match_opcode G_MUL):$root,
287    [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
288  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
289>;
290
291// Fold (x op x) - > x
292def binop_same_val: GICombineRule<
293  (defs root:$root),
294  (match (wip_match_opcode G_AND, G_OR):$root,
295    [{ return Helper.matchBinOpSameVal(*${root}); }]),
296  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
297>;
298
299// Fold (0 op x) - > 0
300def binop_left_to_zero: GICombineRule<
301  (defs root:$root),
302  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
303    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
304  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
305>;
306
307def urem_pow2_to_mask : GICombineRule<
308  (defs root:$root),
309  (match (wip_match_opcode G_UREM):$root,
310    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
311  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
312>;
313
314// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
315def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
316def div_rem_to_divrem : GICombineRule<
317  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
318  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
319    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
320  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
321>;
322
323// Fold (x op 0) - > 0
324def binop_right_to_zero: GICombineRule<
325  (defs root:$root),
326  (match (wip_match_opcode G_MUL):$root,
327    [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
328  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
329>;
330
331// Erase stores of undef values.
332def erase_undef_store : GICombineRule<
333  (defs root:$root),
334  (match (wip_match_opcode G_STORE):$root,
335    [{ return Helper.matchUndefStore(*${root}); }]),
336  (apply [{ return Helper.eraseInst(*${root}); }])
337>;
338
339def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
340def simplify_add_to_sub: GICombineRule <
341  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
342  (match (wip_match_opcode G_ADD):$root,
343    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
344  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
345>;
346
347// Fold fp_op(cst) to the constant result of the floating point operation.
348def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
349def constant_fp_op: GICombineRule <
350  (defs root:$root, constant_fp_op_matchinfo:$info),
351  (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
352    [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
353  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
354>;
355
356// Fold int2ptr(ptr2int(x)) -> x
357def p2i_to_i2p: GICombineRule<
358  (defs root:$root, register_matchinfo:$info),
359  (match (wip_match_opcode G_INTTOPTR):$root,
360    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
361  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
362>;
363
364// Fold ptr2int(int2ptr(x)) -> x
365def i2p_to_p2i: GICombineRule<
366  (defs root:$root, register_matchinfo:$info),
367  (match (wip_match_opcode G_PTRTOINT):$root,
368    [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]),
369  (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
370>;
371
372// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
373def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
374def add_p2i_to_ptradd : GICombineRule<
375  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
376  (match (wip_match_opcode G_ADD):$root,
377    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
378  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
379>;
380
381// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
382def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"int64_t">;
383def const_ptradd_to_i2p: GICombineRule<
384  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
385  (match (wip_match_opcode G_PTR_ADD):$root,
386    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
387  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
388>;
389
390// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
391def hoist_logic_op_with_same_opcode_hands: GICombineRule <
392  (defs root:$root, instruction_steps_matchdata:$info),
393  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
394    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
395  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
396>;
397
398// Fold ashr (shl x, C), C -> sext_inreg (C)
399def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
400def shl_ashr_to_sext_inreg : GICombineRule<
401  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
402  (match (wip_match_opcode G_ASHR): $root,
403    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
404  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
405>;
406
407// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
408def overlapping_and: GICombineRule <
409  (defs root:$root, build_fn_matchinfo:$info),
410  (match (wip_match_opcode G_AND):$root,
411         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
412  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
413>;
414
415// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
416def redundant_and: GICombineRule <
417  (defs root:$root, register_matchinfo:$matchinfo),
418  (match (wip_match_opcode G_AND):$root,
419         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
420  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
421>;
422
423// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
424def redundant_or: GICombineRule <
425  (defs root:$root, register_matchinfo:$matchinfo),
426  (match (wip_match_opcode G_OR):$root,
427         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
428  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
429>;
430
431// If the input is already sign extended, just drop the extension.
432// sext_inreg x, K ->
433//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
434def redundant_sext_inreg: GICombineRule <
435  (defs root:$root),
436  (match (wip_match_opcode G_SEXT_INREG):$root,
437         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
438     (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
439>;
440
441// Fold (anyext (trunc x)) -> x if the source type is same as
442// the destination type.
443def anyext_trunc_fold: GICombineRule <
444  (defs root:$root, register_matchinfo:$matchinfo),
445  (match (wip_match_opcode G_ANYEXT):$root,
446         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
447  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
448>;
449
450// Fold (zext (trunc x)) -> x if the source type is same as the destination type
451// and truncated bits are known to be zero.
452def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
453def zext_trunc_fold: GICombineRule <
454  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
455  (match (wip_match_opcode G_ZEXT):$root,
456         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
457  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
458>;
459
460// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
461def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
462def ext_ext_fold: GICombineRule <
463  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
464  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
465         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
466  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
467>;
468
469def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
470def not_cmp_fold : GICombineRule<
471  (defs root:$d, not_cmp_fold_matchinfo:$info),
472  (match (wip_match_opcode G_XOR): $d,
473  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
474  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
475>;
476
477// Fold (fneg (fneg x)) -> x.
478def fneg_fneg_fold: GICombineRule <
479  (defs root:$root, register_matchinfo:$matchinfo),
480  (match (wip_match_opcode G_FNEG):$root,
481         [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]),
482  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
483>;
484
485// Fold (unmerge(merge x, y, z)) -> z, y, z.
486def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
487def unmerge_merge : GICombineRule<
488  (defs root:$d, unmerge_merge_matchinfo:$info),
489  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
490  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
491  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
492>;
493
494// Fold merge(unmerge).
495def merge_unmerge : GICombineRule<
496  (defs root:$d, register_matchinfo:$matchinfo),
497  (match (wip_match_opcode G_MERGE_VALUES):$d,
498  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
499  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
500>;
501
502// Fold (fabs (fabs x)) -> (fabs x).
503def fabs_fabs_fold: GICombineRule<
504  (defs root:$root, register_matchinfo:$matchinfo),
505  (match (wip_match_opcode G_FABS):$root,
506         [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]),
507  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
508>;
509
510// Fold (unmerge cst) -> cst1, cst2, ...
511def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
512def unmerge_cst : GICombineRule<
513  (defs root:$d, unmerge_cst_matchinfo:$info),
514  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
515  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
516  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
517>;
518
519// Transform x,y<dead> = unmerge z -> x = trunc z.
520def unmerge_dead_to_trunc : GICombineRule<
521  (defs root:$d),
522  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
523  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
524  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
525>;
526
527// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
528def unmerge_zext_to_zext : GICombineRule<
529  (defs root:$d),
530  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
531  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
532  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
533>;
534
535// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
536def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
537def trunc_ext_fold: GICombineRule <
538  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
539  (match (wip_match_opcode G_TRUNC):$root,
540         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
541  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
542>;
543
544// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
545def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
546def trunc_shl: GICombineRule <
547  (defs root:$root, trunc_shl_matchinfo:$matchinfo),
548  (match (wip_match_opcode G_TRUNC):$root,
549         [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
550  (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
551>;
552
553// Transform (mul x, -1) -> (sub 0, x)
554def mul_by_neg_one: GICombineRule <
555  (defs root:$root),
556  (match (wip_match_opcode G_MUL):$root,
557         [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
558  (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
559>;
560
561// Fold (xor (and x, y), y) -> (and (not x), y)
562def xor_of_and_with_same_reg_matchinfo :
563    GIDefMatchData<"std::pair<Register, Register>">;
564def xor_of_and_with_same_reg: GICombineRule <
565  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
566  (match (wip_match_opcode G_XOR):$root,
567         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
568  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
569>;
570
571// Transform (ptr_add 0, x) -> (int_to_ptr x)
572def ptr_add_with_zero: GICombineRule<
573  (defs root:$root),
574  (match (wip_match_opcode G_PTR_ADD):$root,
575         [{ return Helper.matchPtrAddZero(*${root}); }]),
576  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
577
578def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
579def combine_insert_vec_elts_build_vector : GICombineRule<
580  (defs root:$root, regs_small_vec:$info),
581  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
582    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
583  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
584
585def load_or_combine : GICombineRule<
586  (defs root:$root, build_fn_matchinfo:$info),
587  (match (wip_match_opcode G_OR):$root,
588    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
589  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
590
591def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
592def extend_through_phis : GICombineRule<
593  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
594  (match (wip_match_opcode G_PHI):$root,
595    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
596  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
597
598// Currently only the one combine above.
599def insert_vec_elt_combines : GICombineGroup<
600                            [combine_insert_vec_elts_build_vector]>;
601
602def extract_vec_elt_build_vec : GICombineRule<
603  (defs root:$root, register_matchinfo:$matchinfo),
604  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
605    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
606  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
607
608// Fold away full elt extracts from a build_vector.
609def extract_all_elts_from_build_vector_matchinfo :
610  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
611def extract_all_elts_from_build_vector : GICombineRule<
612  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
613  (match (wip_match_opcode G_BUILD_VECTOR):$root,
614    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
615  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
616
617def extract_vec_elt_combines : GICombineGroup<[
618  extract_vec_elt_build_vec,
619  extract_all_elts_from_build_vector]>;
620
621def funnel_shift_to_rotate : GICombineRule<
622  (defs root:$root),
623  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
624    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
625  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
626>;
627
628def rotate_out_of_range : GICombineRule<
629  (defs root:$root),
630  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
631    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
632  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
633>;
634
635def icmp_to_true_false_known_bits : GICombineRule<
636  (defs root:$d, int64_matchinfo:$matchinfo),
637  (match (wip_match_opcode G_ICMP):$d,
638         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
639  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
640
641def bitfield_extract_from_and : GICombineRule<
642  (defs root:$root, build_fn_matchinfo:$info),
643  (match (wip_match_opcode G_AND):$root,
644    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
645  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
646
647def funnel_shift_combines : GICombineGroup<[funnel_shift_to_rotate]>;
648
649def bitfield_extract_from_sext_inreg : GICombineRule<
650  (defs root:$root, build_fn_matchinfo:$info),
651  (match (wip_match_opcode G_SEXT_INREG):$root,
652    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
653  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
654
655def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
656                                            bitfield_extract_from_and]>;
657def reassoc_ptradd : GICombineRule<
658  (defs root:$root, build_fn_matchinfo:$matchinfo),
659  (match (wip_match_opcode G_PTR_ADD):$root,
660    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
661  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
662
663def reassocs : GICombineGroup<[reassoc_ptradd]>;
664
665// Constant fold operations.
666def constant_fold : GICombineRule<
667  (defs root:$d, apint_matchinfo:$matchinfo),
668  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
669   [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
670  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
671
672// FIXME: These should use the custom predicate feature once it lands.
673def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
674                                     undef_to_negative_one,
675                                     binop_left_undef_to_zero,
676                                     propagate_undef_any_op,
677                                     propagate_undef_all_ops,
678                                     propagate_undef_shuffle_mask,
679                                     erase_undef_store]>;
680
681def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
682                                        binop_same_val, binop_left_to_zero,
683                                        binop_right_to_zero, p2i_to_i2p,
684                                        i2p_to_p2i, anyext_trunc_fold,
685                                        fneg_fneg_fold, right_identity_one]>;
686
687def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
688                                     overlapping_and]>;
689
690def known_bits_simplifications : GICombineGroup<[
691  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
692  zext_trunc_fold, icmp_to_true_false_known_bits]>;
693
694def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>;
695
696def phi_combines : GICombineGroup<[extend_through_phis]>;
697
698def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>;
699
700def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
701                                       mul_by_neg_one]>;
702
703def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
704    extract_vec_elt_combines, combines_for_extload,
705    combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
706    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
707    reassocs, ptr_add_immed_chain,
708    shl_ashr_to_sext_inreg, sext_inreg_of_load,
709    width_reduction_combines, select_combines,
710    known_bits_simplifications, ext_ext_fold,
711    not_cmp_fold, opt_brcond_by_inverting_cond,
712    unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
713    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
714    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
715    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
716    div_rem_to_divrem, funnel_shift_combines, form_bitfield_extract,
717    constant_fold]>;
718
719// A combine group used to for prelegalizer combiners at -O0. The combines in
720// this group have been selected based on experiments to balance code size and
721// compile time performance.
722def optnone_combines : GICombineGroup<[trivial_combines,
723    ptr_add_immed_chain, combines_for_extload,
724    not_cmp_fold, opt_brcond_by_inverting_cond]>;
725