xref: /freebsd/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Declare GlobalISel combine rules and provide mechanisms to opt-out.
10//
11//===----------------------------------------------------------------------===//
12
13// Common base class for GICombineRule and GICombineGroup.
14class GICombine {
15  // See GICombineGroup. We only declare it here to make the tablegen pass
16  // simpler.
17  list<GICombine> Rules = ?;
18}
19
20// A group of combine rules that can be added to a GICombiner or another group.
21class GICombineGroup<list<GICombine> rules> : GICombine {
22  // The rules contained in this group. The rules in a group are flattened into
23  // a single list and sorted into whatever order is most efficient. However,
24  // they will never be re-ordered such that behaviour differs from the
25  // specified order. It is therefore possible to use the order of rules in this
26  // list to describe priorities.
27  let Rules = rules;
28}
29
30class GICombinerHelperArg<string type, string name> {
31  string Type = type;
32  string Name = name;
33}
34
35// Declares a combiner helper class
36class GICombinerHelper<string classname, list<GICombine> rules>
37    : GICombineGroup<rules> {
38  // The class name to use in the generated output.
39  string Classname = classname;
40  // The name of a run-time compiler option that will be generated to disable
41  // specific rules within this combiner.
42  string DisableRuleOption = ?;
43  // The state class to inherit from (if any). The generated helper will inherit
44  // from this class and will forward arguments to its constructors.
45  string StateClass = "";
46  // Any additional arguments that should be appended to the tryCombine*().
47  list<GICombinerHelperArg> AdditionalArguments =
48      [GICombinerHelperArg<"CombinerHelper &", "Helper">];
49}
50class GICombineRule<dag defs, dag match, dag apply> : GICombine {
51  /// Defines the external interface of the match rule. This includes:
52  /// * The names of the root nodes (requires at least one)
53  /// See GIDefKind for details.
54  dag Defs = defs;
55
56  /// Defines the things which must be true for the pattern to match
57  /// See GIMatchKind for details.
58  dag Match = match;
59
60  /// Defines the things which happen after the decision is made to apply a
61  /// combine rule.
62  /// See GIApplyKind for details.
63  dag Apply = apply;
64
65  /// Defines the predicates that are checked before the match function
66  /// is called. Targets can use this to, for instance, check Subtarget
67  /// features.
68  list<Predicate> Predicates = [];
69}
70
71/// The operator at the root of a GICombineRule.Defs dag.
72def defs;
73
74/// All arguments of the defs operator must be subclasses of GIDefKind or
75/// sub-dags whose operator is GIDefKindWithArgs.
76class GIDefKind;
77class GIDefKindWithArgs;
78/// Declare a root node. There must be at least one of these in every combine
79/// rule.
80/// TODO: The plan is to elide `root` definitions and determine it from the DAG
81///       itself with an overide for situations where the usual determination
82///       is incorrect.
83def root : GIDefKind;
84
85/// Declares data that is passed from the match stage to the apply stage.
86class GIDefMatchData<string type> : GIDefKind {
87  /// A C++ type name indicating the storage type.
88  string Type = type;
89}
90
91def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
92def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
93def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
94
95/// The operator at the root of a GICombineRule.Match dag.
96def match;
97/// All arguments of the match operator must be either:
98/// * A subclass of GIMatchKind
99/// * A subclass of GIMatchKindWithArgs
100/// * A subclass of Instruction
101/// * A MIR code block (deprecated)
102/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
103/// in their definitions below.
104/// For the Instruction case, these are collected into a DAG where operand names
105/// that occur multiple times introduce edges.
106class GIMatchKind;
107class GIMatchKindWithArgs;
108
109/// In lieu of having proper macro support. Trivial one-off opcode checks can be
110/// performed with this.
111def wip_match_opcode : GIMatchKindWithArgs;
112
113/// The operator at the root of a GICombineRule.Apply dag.
114def apply;
115/// All arguments of the apply operator must be subclasses of GIApplyKind, or
116/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
117/// (deprecated).
118class GIApplyKind;
119class GIApplyKindWithArgs;
120
121def register_matchinfo: GIDefMatchData<"Register">;
122def int64_matchinfo: GIDefMatchData<"int64_t">;
123def apint_matchinfo : GIDefMatchData<"APInt">;
124def build_fn_matchinfo :
125GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
126def unsigned_matchinfo: GIDefMatchData<"unsigned">;
127
128def copy_prop : GICombineRule<
129  (defs root:$d),
130  (match (COPY $d, $s):$mi,
131         [{ return Helper.matchCombineCopy(*${mi}); }]),
132  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
133
134// idempotent operations
135// Fold (freeze (freeze x)) -> (freeze x).
136// Fold (fabs (fabs x)) -> (fabs x).
137// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
138def idempotent_prop : GICombineRule<
139   (defs root:$mi),
140   (match (wip_match_opcode G_FREEZE, G_FABS, G_FCANONICALIZE):$mi,
141          [{ return MRI.getVRegDef(${mi}->getOperand(1).getReg())->getOpcode() ==
142                    ${mi}->getOpcode(); }]),
143   (apply [{ Helper.replaceSingleDefInstWithOperand(*${mi}, 1); }])>;
144
145
146def extending_loads : GICombineRule<
147  (defs root:$root, extending_load_matchdata:$matchinfo),
148  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
149         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
150  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
151
152def load_and_mask : GICombineRule<
153  (defs root:$root, build_fn_matchinfo:$matchinfo),
154  (match (wip_match_opcode G_AND):$root,
155        [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
156  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
157def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
158
159def sext_trunc_sextload : GICombineRule<
160  (defs root:$d),
161  (match (wip_match_opcode G_SEXT_INREG):$d,
162         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
163  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
164
165def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
166def sext_inreg_of_load : GICombineRule<
167  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
168  (match (wip_match_opcode G_SEXT_INREG):$root,
169         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
170  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
171
172def sext_inreg_to_zext_inreg : GICombineRule<
173  (defs root:$dst),
174  (match
175    (G_SEXT_INREG $dst, $src, $imm):$root,
176      [{
177        unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
178        return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(),
179                 APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
180    (apply [{
181      Helper.getBuilder().setInstrAndDebugLoc(*${root});
182      Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
183      ${root}->eraseFromParent();
184      return true;
185  }])
186>;
187
188def combine_indexed_load_store : GICombineRule<
189  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
190  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
191         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
192  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
193
194def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
195def opt_brcond_by_inverting_cond : GICombineRule<
196  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
197  (match (wip_match_opcode G_BR):$root,
198         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
199  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
200
201def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
202def ptr_add_immed_chain : GICombineRule<
203  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
204  (match (wip_match_opcode G_PTR_ADD):$d,
205         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
206  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
207
208// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
209def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
210def shift_immed_chain : GICombineRule<
211  (defs root:$d, shift_immed_matchdata:$matchinfo),
212  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
213         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
214  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
215
216// Transform shift (logic (shift X, C0), Y), C1
217//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
218def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
219def shift_of_shifted_logic_chain : GICombineRule<
220  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
221  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
222         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
223  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
224
225def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
226def mul_to_shl : GICombineRule<
227  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
228  (match (G_MUL $d, $op1, $op2):$mi,
229         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
230  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
231
232// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
233def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
234def reduce_shl_of_extend : GICombineRule<
235  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
236  (match (G_SHL $dst, $src0, $src1):$mi,
237         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
238  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
239
240def narrow_binop_feeding_and : GICombineRule<
241  (defs root:$root, build_fn_matchinfo:$matchinfo),
242  (match (wip_match_opcode G_AND):$root,
243         [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
244  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
245
246// [us]itofp(undef) = 0, because the result value is bounded.
247def undef_to_fp_zero : GICombineRule<
248  (defs root:$root),
249  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
250         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
251  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
252
253def undef_to_int_zero: GICombineRule<
254  (defs root:$root),
255  (match (wip_match_opcode G_AND, G_MUL):$root,
256         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
257  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
258
259def undef_to_negative_one: GICombineRule<
260  (defs root:$root),
261  (match (wip_match_opcode G_OR):$root,
262         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
263  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
264
265def binop_left_undef_to_zero: GICombineRule<
266  (defs root:$root),
267  (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
268         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
269  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
270
271def binop_right_undef_to_undef: GICombineRule<
272  (defs root:$root),
273  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
274         [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
275  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
276
277def unary_undef_to_zero: GICombineRule<
278  (defs root:$root),
279  (match (wip_match_opcode G_ABS):$root,
280         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
281  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
282
283// Instructions where if any source operand is undef, the instruction can be
284// replaced with undef.
285def propagate_undef_any_op: GICombineRule<
286  (defs root:$root),
287  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
288         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
289  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
290
291// Instructions where if all source operands are undef, the instruction can be
292// replaced with undef.
293def propagate_undef_all_ops: GICombineRule<
294  (defs root:$root),
295  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
296          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
297  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
298
299// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
300def propagate_undef_shuffle_mask: GICombineRule<
301  (defs root:$root),
302  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
303         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
304  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
305
306  // Replace an insert/extract element of an out of bounds index with undef.
307  def insert_extract_vec_elt_out_of_bounds : GICombineRule<
308  (defs root:$root),
309  (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
310         [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
311  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
312
313// Fold (cond ? x : x) -> x
314def select_same_val: GICombineRule<
315  (defs root:$root),
316  (match (wip_match_opcode G_SELECT):$root,
317    [{ return Helper.matchSelectSameVal(*${root}); }]),
318  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
319>;
320
321// Fold (undef ? x : y) -> y
322def select_undef_cmp: GICombineRule<
323  (defs root:$root),
324  (match (wip_match_opcode G_SELECT):$root,
325    [{ return Helper.matchUndefSelectCmp(*${root}); }]),
326  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
327>;
328
329// Fold (true ? x : y) -> x
330// Fold (false ? x : y) -> y
331def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
332def select_constant_cmp: GICombineRule<
333  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
334  (match (wip_match_opcode G_SELECT):$root,
335    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
336  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
337>;
338
339def select_to_logical : GICombineRule<
340  (defs root:$root, build_fn_matchinfo:$matchinfo),
341  (match (wip_match_opcode G_SELECT):$root,
342    [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]),
343  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
344>;
345
346// Fold (C op x) -> (x op C)
347// TODO: handle more isCommutable opcodes
348// TODO: handle compares (currently not marked as isCommutable)
349def commute_constant_to_rhs : GICombineRule<
350  (defs root:$root),
351  (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR):$root, [{
352    return getIConstantVRegVal(${root}->getOperand(1).getReg(), MRI).has_value();
353  }]),
354  (apply [{
355    Observer.changingInstr(*${root});
356    Register LHSReg = ${root}->getOperand(1).getReg();
357    Register RHSReg = ${root}->getOperand(2).getReg();
358    ${root}->getOperand(1).setReg(RHSReg);
359    ${root}->getOperand(2).setReg(LHSReg);
360    Observer.changedInstr(*${root});
361  }])
362>;
363
364// Fold x op 0 -> x
365def right_identity_zero: GICombineRule<
366  (defs root:$root),
367  (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
368                           G_PTR_ADD, G_ROTL, G_ROTR):$root,
369    [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
370  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
371>;
372
373// Fold x op 1 -> x
374def right_identity_one: GICombineRule<
375  (defs root:$root),
376  (match (wip_match_opcode G_MUL):$root,
377    [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
378  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
379>;
380
381// Fold (x op x) - > x
382def binop_same_val: GICombineRule<
383  (defs root:$root),
384  (match (wip_match_opcode G_AND, G_OR):$root,
385    [{ return Helper.matchBinOpSameVal(*${root}); }]),
386  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
387>;
388
389// Fold (0 op x) - > 0
390def binop_left_to_zero: GICombineRule<
391  (defs root:$root),
392  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
393    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
394  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
395>;
396
397def urem_pow2_to_mask : GICombineRule<
398  (defs root:$root),
399  (match (wip_match_opcode G_UREM):$root,
400    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
401  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
402>;
403
404// Push a binary operator through a select on constants.
405//
406// binop (select cond, K0, K1), K2 ->
407//   select cond, (binop K0, K2), (binop K1, K2)
408
409// Every binary operator that has constant folding. We currently do
410// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
411// G_FMINNUM_IEEE.
412def fold_binop_into_select : GICombineRule<
413  (defs root:$root, unsigned_matchinfo:$select_op_no),
414  (match (wip_match_opcode
415    G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
416    G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
417    G_SMIN, G_SMAX, G_UMIN, G_UMAX,
418    G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
419    G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
420    [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
421  (apply [{ return Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
422>;
423
424// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
425def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
426def div_rem_to_divrem : GICombineRule<
427  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
428  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
429    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
430  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
431>;
432
433// Fold (x op 0) - > 0
434def binop_right_to_zero: GICombineRule<
435  (defs root:$root),
436  (match (wip_match_opcode G_MUL):$root,
437    [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
438  (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
439>;
440
441// Erase stores of undef values.
442def erase_undef_store : GICombineRule<
443  (defs root:$root),
444  (match (wip_match_opcode G_STORE):$root,
445    [{ return Helper.matchUndefStore(*${root}); }]),
446  (apply [{ return Helper.eraseInst(*${root}); }])
447>;
448
449def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
450def simplify_add_to_sub: GICombineRule <
451  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
452  (match (wip_match_opcode G_ADD):$root,
453    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
454  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
455>;
456
457// Fold fp_op(cst) to the constant result of the floating point operation.
458def constant_fp_op_matchinfo: GIDefMatchData<"std::optional<APFloat>">;
459def constant_fp_op: GICombineRule <
460  (defs root:$root, constant_fp_op_matchinfo:$info),
461  (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
462    [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
463  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
464>;
465
466// Fold int2ptr(ptr2int(x)) -> x
467def p2i_to_i2p: GICombineRule<
468  (defs root:$root, register_matchinfo:$info),
469  (match (wip_match_opcode G_INTTOPTR):$root,
470    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
471  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
472>;
473
474// Fold ptr2int(int2ptr(x)) -> x
475def i2p_to_p2i: GICombineRule<
476  (defs root:$dst, register_matchinfo:$info),
477  (match (G_INTTOPTR $t, $ptr),
478         (G_PTRTOINT $dst, $t):$mi,
479    [{ ${info} = ${ptr}.getReg(); }]),
480  (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
481>;
482
483// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
484def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
485def add_p2i_to_ptradd : GICombineRule<
486  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
487  (match (wip_match_opcode G_ADD):$root,
488    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
489  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
490>;
491
492// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
493def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
494def const_ptradd_to_i2p: GICombineRule<
495  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
496  (match (wip_match_opcode G_PTR_ADD):$root,
497    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
498  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
499>;
500
501// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
502def hoist_logic_op_with_same_opcode_hands: GICombineRule <
503  (defs root:$root, instruction_steps_matchdata:$info),
504  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
505    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
506  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
507>;
508
509// Fold ashr (shl x, C), C -> sext_inreg (C)
510def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
511def shl_ashr_to_sext_inreg : GICombineRule<
512  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
513  (match (wip_match_opcode G_ASHR): $root,
514    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
515  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
516>;
517
518// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
519def overlapping_and: GICombineRule <
520  (defs root:$root, build_fn_matchinfo:$info),
521  (match (wip_match_opcode G_AND):$root,
522         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
523  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
524>;
525
526// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
527def redundant_and: GICombineRule <
528  (defs root:$root, register_matchinfo:$matchinfo),
529  (match (wip_match_opcode G_AND):$root,
530         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
531  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
532>;
533
534// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
535def redundant_or: GICombineRule <
536  (defs root:$root, register_matchinfo:$matchinfo),
537  (match (wip_match_opcode G_OR):$root,
538         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
539  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
540>;
541
542// If the input is already sign extended, just drop the extension.
543// sext_inreg x, K ->
544//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
545def redundant_sext_inreg: GICombineRule <
546  (defs root:$root),
547  (match (wip_match_opcode G_SEXT_INREG):$root,
548         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
549     (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
550>;
551
552// Fold (anyext (trunc x)) -> x if the source type is same as
553// the destination type.
554def anyext_trunc_fold: GICombineRule <
555  (defs root:$root, register_matchinfo:$matchinfo),
556  (match (wip_match_opcode G_ANYEXT):$root,
557         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
558  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
559>;
560
561// Fold (zext (trunc x)) -> x if the source type is same as the destination type
562// and truncated bits are known to be zero.
563def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
564def zext_trunc_fold: GICombineRule <
565  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
566  (match (wip_match_opcode G_ZEXT):$root,
567         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
568  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
569>;
570
571// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
572def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
573def ext_ext_fold: GICombineRule <
574  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
575  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
576         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
577  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
578>;
579
580def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
581def not_cmp_fold : GICombineRule<
582  (defs root:$d, not_cmp_fold_matchinfo:$info),
583  (match (wip_match_opcode G_XOR): $d,
584  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
585  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
586>;
587
588// Fold (fneg (fneg x)) -> x.
589def fneg_fneg_fold: GICombineRule <
590  (defs root:$dst, register_matchinfo:$matchinfo),
591  (match (G_FNEG $t, $src),
592         (G_FNEG $dst, $t):$mi,
593         [{ ${matchinfo} = ${src}.getReg(); }]),
594  (apply [{ return Helper.replaceSingleDefInstWithReg(*${mi}, ${matchinfo}); }])
595>;
596
597// Fold (unmerge(merge x, y, z)) -> z, y, z.
598def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
599def unmerge_merge : GICombineRule<
600  (defs root:$d, unmerge_merge_matchinfo:$info),
601  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
602  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
603  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
604>;
605
606// Fold merge(unmerge).
607def merge_unmerge : GICombineRule<
608  (defs root:$d, register_matchinfo:$matchinfo),
609  (match (wip_match_opcode G_MERGE_VALUES):$d,
610  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
611  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
612>;
613
614// Fold (fabs (fneg x)) -> (fabs x).
615def fabs_fneg_fold: GICombineRule <
616  (defs root:$root, build_fn_matchinfo:$matchinfo),
617  (match (wip_match_opcode G_FABS):$root,
618         [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
619  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
620
621// Fold (unmerge cst) -> cst1, cst2, ...
622def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
623def unmerge_cst : GICombineRule<
624  (defs root:$d, unmerge_cst_matchinfo:$info),
625  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
626  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
627  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
628>;
629
630// Fold (unmerge undef) -> undef, undef, ...
631def unmerge_undef : GICombineRule<
632  (defs root:$root, build_fn_matchinfo:$info),
633  (match (wip_match_opcode G_UNMERGE_VALUES): $root,
634         [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
635  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
636>;
637
638// Transform x,y<dead> = unmerge z -> x = trunc z.
639def unmerge_dead_to_trunc : GICombineRule<
640  (defs root:$d),
641  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
642  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
643  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
644>;
645
646// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
647def unmerge_zext_to_zext : GICombineRule<
648  (defs root:$d),
649  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
650  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
651  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
652>;
653
654// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
655def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
656def trunc_ext_fold: GICombineRule <
657  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
658  (match (wip_match_opcode G_TRUNC):$root,
659         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
660  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
661>;
662
663// Under certain conditions, transform:
664//  trunc (shl x, K)     -> shl (trunc x), K//
665//  trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
666def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
667def trunc_shift: GICombineRule <
668  (defs root:$root, trunc_shift_matchinfo:$matchinfo),
669  (match (wip_match_opcode G_TRUNC):$root,
670         [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
671  (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
672>;
673
674// Transform (mul x, -1) -> (sub 0, x)
675def mul_by_neg_one: GICombineRule <
676  (defs root:$root),
677  (match (wip_match_opcode G_MUL):$root,
678         [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
679  (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
680>;
681
682// Fold (xor (and x, y), y) -> (and (not x), y)
683def xor_of_and_with_same_reg_matchinfo :
684    GIDefMatchData<"std::pair<Register, Register>">;
685def xor_of_and_with_same_reg: GICombineRule <
686  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
687  (match (wip_match_opcode G_XOR):$root,
688         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
689  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
690>;
691
692// Transform (ptr_add 0, x) -> (int_to_ptr x)
693def ptr_add_with_zero: GICombineRule<
694  (defs root:$root),
695  (match (wip_match_opcode G_PTR_ADD):$root,
696         [{ return Helper.matchPtrAddZero(*${root}); }]),
697  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
698
699def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
700def combine_insert_vec_elts_build_vector : GICombineRule<
701  (defs root:$root, regs_small_vec:$info),
702  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
703    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
704  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
705
706def load_or_combine : GICombineRule<
707  (defs root:$root, build_fn_matchinfo:$info),
708  (match (wip_match_opcode G_OR):$root,
709    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
710  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
711
712
713def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
714def truncstore_merge : GICombineRule<
715  (defs root:$root, truncstore_merge_matcdata:$info),
716  (match (wip_match_opcode G_STORE):$root,
717   [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
718  (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;
719
720def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
721def extend_through_phis : GICombineRule<
722  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
723  (match (wip_match_opcode G_PHI):$root,
724    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
725  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
726
727// Currently only the one combine above.
728def insert_vec_elt_combines : GICombineGroup<
729                            [combine_insert_vec_elts_build_vector]>;
730
731def extract_vec_elt_build_vec : GICombineRule<
732  (defs root:$root, register_matchinfo:$matchinfo),
733  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
734    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
735  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
736
737// Fold away full elt extracts from a build_vector.
738def extract_all_elts_from_build_vector_matchinfo :
739  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
740def extract_all_elts_from_build_vector : GICombineRule<
741  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
742  (match (wip_match_opcode G_BUILD_VECTOR):$root,
743    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
744  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
745
746def extract_vec_elt_combines : GICombineGroup<[
747  extract_vec_elt_build_vec,
748  extract_all_elts_from_build_vector]>;
749
750def funnel_shift_from_or_shift : GICombineRule<
751  (defs root:$root, build_fn_matchinfo:$info),
752  (match (wip_match_opcode G_OR):$root,
753    [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
754  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
755>;
756
757def funnel_shift_to_rotate : GICombineRule<
758  (defs root:$root),
759  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
760    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
761  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
762>;
763
764def rotate_out_of_range : GICombineRule<
765  (defs root:$root),
766  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
767    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
768  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
769>;
770
771def icmp_to_true_false_known_bits : GICombineRule<
772  (defs root:$d, int64_matchinfo:$matchinfo),
773  (match (wip_match_opcode G_ICMP):$d,
774         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
775  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
776
777def icmp_to_lhs_known_bits : GICombineRule<
778  (defs root:$root, build_fn_matchinfo:$info),
779  (match (wip_match_opcode G_ICMP):$root,
780         [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
781  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
782
783def redundant_binop_in_equality : GICombineRule<
784  (defs root:$root, build_fn_matchinfo:$info),
785  (match (wip_match_opcode G_ICMP):$root,
786         [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
787  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
788
789def and_or_disjoint_mask : GICombineRule<
790  (defs root:$root, build_fn_matchinfo:$info),
791  (match (wip_match_opcode G_AND):$root,
792         [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
793  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
794
795def bitfield_extract_from_and : GICombineRule<
796  (defs root:$root, build_fn_matchinfo:$info),
797  (match (wip_match_opcode G_AND):$root,
798    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
799  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
800
801def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
802                                            funnel_shift_to_rotate]>;
803
804def bitfield_extract_from_sext_inreg : GICombineRule<
805  (defs root:$root, build_fn_matchinfo:$info),
806  (match (wip_match_opcode G_SEXT_INREG):$root,
807    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
808  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
809
810def bitfield_extract_from_shr : GICombineRule<
811  (defs root:$root, build_fn_matchinfo:$info),
812  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
813    [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
814  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
815
816def bitfield_extract_from_shr_and : GICombineRule<
817  (defs root:$root, build_fn_matchinfo:$info),
818  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
819    [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
820  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
821
822def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
823                                            bitfield_extract_from_and,
824                                            bitfield_extract_from_shr,
825                                            bitfield_extract_from_shr_and]>;
826
827def udiv_by_const : GICombineRule<
828  (defs root:$root),
829  (match (wip_match_opcode G_UDIV):$root,
830   [{ return Helper.matchUDivByConst(*${root}); }]),
831  (apply [{ Helper.applyUDivByConst(*${root}); }])>;
832
833def sdiv_by_const : GICombineRule<
834  (defs root:$root),
835  (match (wip_match_opcode G_SDIV):$root,
836   [{ return Helper.matchSDivByConst(*${root}); }]),
837  (apply [{ Helper.applySDivByConst(*${root}); }])>;
838
839def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
840
841def reassoc_ptradd : GICombineRule<
842  (defs root:$root, build_fn_matchinfo:$matchinfo),
843  (match (wip_match_opcode G_PTR_ADD):$root,
844    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
845  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
846
847def reassocs : GICombineGroup<[reassoc_ptradd]>;
848
849// Constant fold operations.
850def constant_fold : GICombineRule<
851  (defs root:$d, apint_matchinfo:$matchinfo),
852  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
853   [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
854  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
855
856def mulo_by_2: GICombineRule<
857  (defs root:$root, build_fn_matchinfo:$matchinfo),
858  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
859         [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
860  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
861
862def mulo_by_0: GICombineRule<
863  (defs root:$root, build_fn_matchinfo:$matchinfo),
864  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
865         [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
866  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
867
868def addo_by_0: GICombineRule<
869  (defs root:$root, build_fn_matchinfo:$matchinfo),
870  (match (wip_match_opcode G_UADDO, G_SADDO):$root,
871         [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]),
872  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
873
874// Transform (uadde x, y, 0) -> (uaddo x, y)
875//           (sadde x, y, 0) -> (saddo x, y)
876//           (usube x, y, 0) -> (usubo x, y)
877//           (ssube x, y, 0) -> (ssubo x, y)
878def adde_to_addo: GICombineRule<
879  (defs root:$root, build_fn_matchinfo:$matchinfo),
880  (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
881         [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
882  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
883
884def mulh_to_lshr : GICombineRule<
885  (defs root:$root),
886  (match (wip_match_opcode G_UMULH):$root,
887         [{ return Helper.matchUMulHToLShr(*${root}); }]),
888  (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
889
890def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
891
892def redundant_neg_operands: GICombineRule<
893  (defs root:$root, build_fn_matchinfo:$matchinfo),
894  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
895    [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
896  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
897
898// Transform (fsub +-0.0, X) -> (fneg X)
899def fsub_to_fneg: GICombineRule<
900  (defs root:$root, register_matchinfo:$matchinfo),
901  (match (wip_match_opcode G_FSUB):$root,
902    [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
903  (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;
904
905// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
906//           (fadd x, (fmul y, z)) -> (fmad y, z, x)
907// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
908//           (fadd (fmul x, y), z) -> (fmad x, y, z)
909def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
910  (defs root:$root, build_fn_matchinfo:$info),
911  (match (wip_match_opcode G_FADD):$root,
912         [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
913                                                          ${info}); }]),
914  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
915
916// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
917//                                         -> (fmad (fpext x), (fpext y), z)
918// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
919//                                         -> (fmad (fpext y), (fpext z), x)
920def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
921  (defs root:$root, build_fn_matchinfo:$info),
922  (match (wip_match_opcode G_FADD):$root,
923         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
924                                                               ${info}); }]),
925  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
926
927// Transform (fadd (fma x, y, (fmul z, u)), v)  -> (fma x, y, (fma z, u, v))
928//           (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
929// Transform (fadd v, (fma x, y, (fmul z, u)))  -> (fma x, y, (fma z, u, v))
930//           (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
931def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
932  (defs root:$root, build_fn_matchinfo:$info),
933  (match (wip_match_opcode G_FADD):$root,
934         [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
935                                                             ${info}); }]),
936  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
937
938// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
939//           (fma x, y, (fma (fpext u), (fpext v), z))
940def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
941  (defs root:$root, build_fn_matchinfo:$info),
942  (match (wip_match_opcode G_FADD):$root,
943         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
944                                                  *${root}, ${info}); }]),
945  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
946
947// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
948//                                 -> (fmad x, y, -z)
949def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
950  (defs root:$root, build_fn_matchinfo:$info),
951  (match (wip_match_opcode G_FSUB):$root,
952         [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
953                                                          ${info}); }]),
954  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
955
956// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
957//           (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
958def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
959  (defs root:$root, build_fn_matchinfo:$info),
960  (match (wip_match_opcode G_FSUB):$root,
961         [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
962                                                              ${info}); }]),
963  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
964
965// Transform (fsub (fpext (fmul x, y)), z) ->
966//           (fma (fpext x), (fpext y), (fneg z))
967def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
968  (defs root:$root, build_fn_matchinfo:$info),
969  (match (wip_match_opcode G_FSUB):$root,
970         [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
971                                                               ${info}); }]),
972  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
973
974// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
975//           (fneg (fma (fpext x), (fpext y), z))
976def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
977  (defs root:$root, build_fn_matchinfo:$info),
978  (match (wip_match_opcode G_FSUB):$root,
979         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
980                                            *${root}, ${info}); }]),
981  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
982
983def combine_minmax_nan: GICombineRule<
984  (defs root:$root, unsigned_matchinfo:$info),
985  (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
986         [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
987  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;
988
989// Transform (add x, (sub y, x)) -> y
990// Transform (add (sub y, x), x) -> y
991def add_sub_reg: GICombineRule <
992  (defs root:$root, register_matchinfo:$matchinfo),
993  (match (wip_match_opcode G_ADD):$root,
994         [{ return Helper.matchAddSubSameReg(*${root}, ${matchinfo}); }]),
995  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root},
996                                                      ${matchinfo}); }])>;
997
998def buildvector_identity_fold : GICombineRule<
999  (defs root:$build_vector, register_matchinfo:$matchinfo),
1000  (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
1001         [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
1002  (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;
1003
1004def trunc_buildvector_fold : GICombineRule<
1005  (defs root:$op, register_matchinfo:$matchinfo),
1006  (match (wip_match_opcode G_TRUNC):$op,
1007      [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
1008  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
1009
1010def trunc_lshr_buildvector_fold : GICombineRule<
1011  (defs root:$op, register_matchinfo:$matchinfo),
1012  (match (wip_match_opcode G_TRUNC):$op,
1013      [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
1014  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
1015
1016// Transform:
1017//   (x + y) - y -> x
1018//   (x + y) - x -> y
1019//   x - (y + x) -> 0 - y
1020//   x - (x + z) -> 0 - z
1021def sub_add_reg: GICombineRule <
1022  (defs root:$root, build_fn_matchinfo:$matchinfo),
1023  (match (wip_match_opcode G_SUB):$root,
1024         [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
1025  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
1026
1027def bitcast_bitcast_fold : GICombineRule<
1028  (defs root:$dst),
1029  (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
1030      [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
1031  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
1032
1033def select_to_minmax: GICombineRule<
1034  (defs root:$root, build_fn_matchinfo:$info),
1035  (match (wip_match_opcode G_SELECT):$root,
1036         [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
1037  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1038
1039// FIXME: These should use the custom predicate feature once it lands.
1040def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
1041                                     undef_to_negative_one,
1042                                     binop_left_undef_to_zero,
1043                                     binop_right_undef_to_undef,
1044                                     unary_undef_to_zero,
1045                                     propagate_undef_any_op,
1046                                     propagate_undef_all_ops,
1047                                     propagate_undef_shuffle_mask,
1048                                     erase_undef_store,
1049                                     unmerge_undef,
1050                                     insert_extract_vec_elt_out_of_bounds]>;
1051
1052def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
1053                                        binop_same_val, binop_left_to_zero,
1054                                        binop_right_to_zero, p2i_to_i2p,
1055                                        i2p_to_p2i, anyext_trunc_fold,
1056                                        fneg_fneg_fold, right_identity_one,
1057                                        add_sub_reg, buildvector_identity_fold,
1058                                        trunc_buildvector_fold,
1059                                        trunc_lshr_buildvector_fold,
1060                                        bitcast_bitcast_fold]>;
1061
1062def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
1063                                     overlapping_and, mulo_by_2, mulo_by_0,
1064                                     addo_by_0, adde_to_addo,
1065                                     combine_minmax_nan]>;
1066
1067def known_bits_simplifications : GICombineGroup<[
1068  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
1069  zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits,
1070  sext_inreg_to_zext_inreg]>;
1071
1072def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
1073                                               narrow_binop_feeding_and]>;
1074
1075def phi_combines : GICombineGroup<[extend_through_phis]>;
1076
1077def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
1078                                      select_to_logical]>;
1079
1080def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
1081                                       mul_by_neg_one, idempotent_prop]>;
1082
1083def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
1084  combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
1085  combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
1086  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
1087  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
1088
1089def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
1090    extract_vec_elt_combines, combines_for_extload,
1091    combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
1092    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
1093    reassocs, ptr_add_immed_chain,
1094    shl_ashr_to_sext_inreg, sext_inreg_of_load,
1095    width_reduction_combines, select_combines,
1096    known_bits_simplifications, ext_ext_fold,
1097    not_cmp_fold, opt_brcond_by_inverting_cond,
1098    unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
1099    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
1100    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
1101    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
1102    truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
1103    form_bitfield_extract, constant_fold, fabs_fneg_fold,
1104    intdiv_combines, mulh_combines, redundant_neg_operands,
1105    and_or_disjoint_mask, fma_combines, fold_binop_into_select,
1106    sub_add_reg, select_to_minmax, redundant_binop_in_equality,
1107    fsub_to_fneg, commute_constant_to_rhs]>;
1108
1109// A combine group used to for prelegalizer combiners at -O0. The combines in
1110// this group have been selected based on experiments to balance code size and
1111// compile time performance.
1112def optnone_combines : GICombineGroup<[trivial_combines,
1113    ptr_add_immed_chain, combines_for_extload,
1114    not_cmp_fold, opt_brcond_by_inverting_cond]>;
1115