xref: /freebsd/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/Combine.td (revision 5f757f3ff9144b609b3c433dfd370cc6bdc191ad)
1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Declare GlobalISel combine rules and provide mechanisms to opt-out.
10//
11//===----------------------------------------------------------------------===//
12
13
14//===----------------------------------------------------------------------===//
15// Base Classes
16//
17// These are the core classes that the combiner backend relies on.
18//===----------------------------------------------------------------------===//
19
20/// All arguments of the defs operator must be subclasses of GIDefKind or
21/// sub-dags whose operator is GIDefKindWithArgs.
22class GIDefKind;
23class GIDefKindWithArgs;
24
25/// Declare a root node. There must be at least one of these in every combine
26/// rule.
27def root : GIDefKind;
28
29def defs;
30
31def pattern;
32def match;
33def apply;
34
35def wip_match_opcode;
36
37// Common base class for GICombineRule and GICombineGroup.
38class GICombine {
39  // See GICombineGroup. We only declare it here to make the tablegen pass
40  // simpler.
41  list<GICombine> Rules = ?;
42}
43
44// A group of combine rules that can be added to a GICombiner or another group.
45class GICombineGroup<list<GICombine> rules> : GICombine {
46  // The rules contained in this group. The rules in a group are flattened into
47  // a single list and sorted into whatever order is most efficient. However,
48  // they will never be re-ordered such that behaviour differs from the
49  // specified order. It is therefore possible to use the order of rules in this
50  // list to describe priorities.
51  let Rules = rules;
52}
53
54// Declares a combiner implementation class
55class GICombiner<string classname, list<GICombine> rules>
56    : GICombineGroup<rules> {
57  // The class name to use in the generated output.
58  string Classname = classname;
59  // Combiners can use this so they're free to define tryCombineAll themselves
60  // and do extra work before/after calling the TableGen-erated code.
61  string CombineAllMethodName = "tryCombineAll";
62}
63
64/// Declares data that is passed from the match stage to the apply stage.
65class GIDefMatchData<string type>  {
66  /// A C++ type name indicating the storage type.
67  string Type = type;
68}
69
70class GICombineRule<dag defs, dag match, dag apply> : GICombine {
71  /// Defines the external interface of the match rule. This includes:
72  /// * The names of the root nodes (requires at least one)
73  /// See GIDefKind for details.
74  dag Defs = defs;
75
76  /// Defines the things which must be true for the pattern to match
77  dag Match = match;
78
79  /// Defines the things which happen after the decision is made to apply a
80  /// combine rule.
81  dag Apply = apply;
82
83  /// Defines the predicates that are checked before the match function
84  /// is called. Targets can use this to, for instance, check Subtarget
85  /// features.
86  list<Predicate> Predicates = [];
87
88  // Maximum number of permutations of this rule that can be emitted.
89  // Set to -1 to disable the limit.
90  int MaxPermutations = 16;
91}
92
93def gi_mo;
94def gi_imm;
95
96// This is an equivalent of PatFrags but for MIR Patterns.
97//
98// GICombinePatFrags can be used in place of instructions for 'match' patterns.
99// Much like normal instructions, the defs (outs) come first, and the ins second
100//
101// Out operands can only be of type "root" or "gi_mo", and they must be defined
102// by an instruction pattern in all alternatives.
103//
104// In operands can be gi_imm or gi_mo. They cannot be redefined in any alternative
105// pattern and may only appear in the C++ code, or in the output operand of an
106// instruction pattern.
107class GICombinePatFrag<dag outs, dag ins, list<dag> alts> {
108  dag InOperands = ins;
109  dag OutOperands = outs;
110  list<dag> Alternatives = alts;
111}
112
113//===----------------------------------------------------------------------===//
114// Pattern Special Types
115//===----------------------------------------------------------------------===//
116
117class GISpecialType;
118
119// In an apply pattern, GITypeOf can be used to set the type of a new temporary
120// register to match the type of a matched register.
121//
122// This can only be used on temporary registers defined by the apply pattern.
123//
124// TODO: Make this work in matchers as well?
125//
126// FIXME: Syntax is very ugly.
127class GITypeOf<string opName> : GISpecialType {
128  string OpName = opName;
129}
130
131//===----------------------------------------------------------------------===//
132// Pattern Builtins
133//===----------------------------------------------------------------------===//
134
135// "Magic" Builtin instructions for MIR patterns.
136// The definitions that implement
137class GIBuiltinInst;
138
139// Replace all references to a register with another one.
140//
141// Usage:
142//    (apply (GIReplaceReg $old, $new))
143//
144// Operands:
145// - $old (out) register defined by a matched instruction
146// - $new (in)  register
147//
148// Semantics:
149// - Can only appear in an 'apply' pattern.
150// - If both old/new are operands of matched instructions,
151//   "canReplaceReg" is checked before applying the rule.
152def GIReplaceReg : GIBuiltinInst;
153
154// Apply action that erases the match root.
155//
156// Usage:
157//    (apply (GIEraseRoot))
158//
159// Semantics:
160// - Can only appear as the only pattern of an 'apply' pattern list.
161// - The root cannot have any output operands.
162// - The root must be a CodeGenInstruction
163//
164// TODO: Allow using this directly, like (apply GIEraseRoot)
165def GIEraseRoot : GIBuiltinInst;
166
167//===----------------------------------------------------------------------===//
168// Pattern MIFlags
169//===----------------------------------------------------------------------===//
170
171class MIFlagEnum<string enumName> {
172  string EnumName = "MachineInstr::" # enumName;
173}
174
175def FmNoNans    : MIFlagEnum<"FmNoNans">;
176def FmNoInfs    : MIFlagEnum<"FmNoInfs">;
177def FmNsz       : MIFlagEnum<"FmNsz">;
178def FmArcp      : MIFlagEnum<"FmArcp">;
179def FmContract  : MIFlagEnum<"FmContract">;
180def FmAfn       : MIFlagEnum<"FmAfn">;
181def FmReassoc   : MIFlagEnum<"FmReassoc">;
182
183def MIFlags;
184// def not; -> Already defined as a SDNode
185
186//===----------------------------------------------------------------------===//
187
188def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
189def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
190def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
191
192def register_matchinfo: GIDefMatchData<"Register">;
193def int64_matchinfo: GIDefMatchData<"int64_t">;
194def apint_matchinfo : GIDefMatchData<"APInt">;
195def constantfp_matchinfo : GIDefMatchData<"ConstantFP*">;
196def build_fn_matchinfo :
197GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
198def unsigned_matchinfo: GIDefMatchData<"unsigned">;
199
200def copy_prop : GICombineRule<
201  (defs root:$d),
202  (match (COPY $d, $s):$mi,
203         [{ return Helper.matchCombineCopy(*${mi}); }]),
204  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
205
206// idempotent operations
207// Fold (freeze (freeze x)) -> (freeze x).
208// Fold (fabs (fabs x)) -> (fabs x).
209// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
210def idempotent_prop_frags : GICombinePatFrag<
211  (outs root:$dst, $src), (ins),
212  !foreach(op, [G_FREEZE, G_FABS, G_FCANONICALIZE],
213           (pattern (op $dst, $src), (op $src, $x)))>;
214
215def idempotent_prop : GICombineRule<
216   (defs root:$dst),
217   (match (idempotent_prop_frags $dst, $src)),
218   (apply (GIReplaceReg $dst, $src))>;
219
220
221def extending_loads : GICombineRule<
222  (defs root:$root, extending_load_matchdata:$matchinfo),
223  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
224         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
225  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
226
227def load_and_mask : GICombineRule<
228  (defs root:$root, build_fn_matchinfo:$matchinfo),
229  (match (wip_match_opcode G_AND):$root,
230        [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
231  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
232def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
233
234def sext_trunc_sextload : GICombineRule<
235  (defs root:$d),
236  (match (wip_match_opcode G_SEXT_INREG):$d,
237         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
238  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
239
240def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
241def sext_inreg_of_load : GICombineRule<
242  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
243  (match (wip_match_opcode G_SEXT_INREG):$root,
244         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
245  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
246
247def sext_inreg_to_zext_inreg : GICombineRule<
248  (defs root:$dst),
249  (match
250    (G_SEXT_INREG $dst, $src, $imm):$root,
251      [{
252        unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
253        return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(),
254                 APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
255    (apply [{
256      Helper.getBuilder().setInstrAndDebugLoc(*${root});
257      Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
258      ${root}->eraseFromParent();
259  }])
260>;
261
262def combine_extracted_vector_load : GICombineRule<
263  (defs root:$root, build_fn_matchinfo:$matchinfo),
264  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
265        [{ return Helper.matchCombineExtractedVectorLoad(*${root}, ${matchinfo}); }]),
266  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
267
268def combine_indexed_load_store : GICombineRule<
269  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
270  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
271         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
272  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
273
274def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
275def opt_brcond_by_inverting_cond : GICombineRule<
276  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
277  (match (wip_match_opcode G_BR):$root,
278         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
279  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
280
281def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
282def ptr_add_immed_chain : GICombineRule<
283  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
284  (match (wip_match_opcode G_PTR_ADD):$d,
285         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
286  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
287
288def shifts_too_big : GICombineRule<
289  (defs root:$root),
290  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
291         [{ return Helper.matchShiftsTooBig(*${root}); }]),
292  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
293
294// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
295def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
296def shift_immed_chain : GICombineRule<
297  (defs root:$d, shift_immed_matchdata:$matchinfo),
298  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
299         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
300  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
301
302// Transform shift (logic (shift X, C0), Y), C1
303//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
304def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
305def shift_of_shifted_logic_chain : GICombineRule<
306  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
307  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
308         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
309  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
310
311def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
312def mul_to_shl : GICombineRule<
313  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
314  (match (G_MUL $d, $op1, $op2):$mi,
315         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
316  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
317
318// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
319def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
320def reduce_shl_of_extend : GICombineRule<
321  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
322  (match (G_SHL $dst, $src0, $src1):$mi,
323         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
324  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
325
326// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
327// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
328def commute_shift : GICombineRule<
329  (defs root:$d, build_fn_matchinfo:$matchinfo),
330  (match (wip_match_opcode G_SHL):$d,
331         [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
332  (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;
333
334def narrow_binop_feeding_and : GICombineRule<
335  (defs root:$root, build_fn_matchinfo:$matchinfo),
336  (match (wip_match_opcode G_AND):$root,
337         [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
338  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
339
340// [us]itofp(undef) = 0, because the result value is bounded.
341def undef_to_fp_zero : GICombineRule<
342  (defs root:$root),
343  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
344         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
345  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
346
347def undef_to_int_zero: GICombineRule<
348  (defs root:$root),
349  (match (wip_match_opcode G_AND, G_MUL):$root,
350         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
351  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
352
353def undef_to_negative_one: GICombineRule<
354  (defs root:$root),
355  (match (wip_match_opcode G_OR):$root,
356         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
357  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
358
359def binop_left_undef_to_zero: GICombineRule<
360  (defs root:$root),
361  (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
362         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
363  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
364
365def binop_right_undef_to_undef: GICombineRule<
366  (defs root:$root),
367  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
368         [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
369  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
370
371def unary_undef_to_zero: GICombineRule<
372  (defs root:$root),
373  (match (wip_match_opcode G_ABS):$root,
374         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
375  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
376
377// Instructions where if any source operand is undef, the instruction can be
378// replaced with undef.
379def propagate_undef_any_op: GICombineRule<
380  (defs root:$root),
381  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
382         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
383  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
384
385// Instructions where if all source operands are undef, the instruction can be
386// replaced with undef.
387def propagate_undef_all_ops: GICombineRule<
388  (defs root:$root),
389  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
390          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
391  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
392
393// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
394def propagate_undef_shuffle_mask: GICombineRule<
395  (defs root:$root),
396  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
397         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
398  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
399
400// Replace a G_SHUFFLE_VECTOR with a G_EXTRACT_VECTOR_ELT.
401def shuffle_to_extract: GICombineRule<
402  (defs root:$root),
403  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
404         [{ return Helper.matchShuffleToExtract(*${root}); }]),
405  (apply [{ Helper.applyShuffleToExtract(*${root}); }])>;
406
407  // Replace an insert/extract element of an out of bounds index with undef.
408  def insert_extract_vec_elt_out_of_bounds : GICombineRule<
409  (defs root:$root),
410  (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
411         [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
412  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
413
414// Fold (cond ? x : x) -> x
415def select_same_val: GICombineRule<
416  (defs root:$root),
417  (match (wip_match_opcode G_SELECT):$root,
418    [{ return Helper.matchSelectSameVal(*${root}); }]),
419  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
420>;
421
422// Fold (undef ? x : y) -> y
423def select_undef_cmp: GICombineRule<
424  (defs root:$dst),
425  (match (G_IMPLICIT_DEF $undef),
426         (G_SELECT $dst, $undef, $x, $y)),
427  (apply (GIReplaceReg $dst, $y))
428>;
429
430// Fold (true ? x : y) -> x
431// Fold (false ? x : y) -> y
432def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
433def select_constant_cmp: GICombineRule<
434  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
435  (match (wip_match_opcode G_SELECT):$root,
436    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
437  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
438>;
439
440def select_to_logical : GICombineRule<
441  (defs root:$root, build_fn_matchinfo:$matchinfo),
442  (match (wip_match_opcode G_SELECT):$root,
443    [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]),
444  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
445>;
446
447// Fold (C op x) -> (x op C)
448// TODO: handle more isCommutable opcodes
449// TODO: handle compares (currently not marked as isCommutable)
450def commute_int_constant_to_rhs : GICombineRule<
451  (defs root:$root),
452  (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR):$root,
453    [{ return Helper.matchCommuteConstantToRHS(*${root}); }]),
454  (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
455>;
456
457def commute_fp_constant_to_rhs : GICombineRule<
458  (defs root:$root),
459  (match (wip_match_opcode G_FADD, G_FMUL):$root,
460    [{ return Helper.matchCommuteFPConstantToRHS(*${root}); }]),
461  (apply [{ Helper.applyCommuteBinOpOperands(*${root}); }])
462>;
463
464def commute_constant_to_rhs : GICombineGroup<[
465  commute_int_constant_to_rhs,
466  commute_fp_constant_to_rhs
467]>;
468
469// Fold x op 0 -> x
470def right_identity_zero_frags : GICombinePatFrag<
471  (outs root:$dst), (ins $x),
472  !foreach(op,
473           [G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR,
474            G_LSHR, G_PTR_ADD, G_ROTL, G_ROTR],
475           (pattern (op $dst, $x, 0)))>;
476def right_identity_zero: GICombineRule<
477  (defs root:$dst),
478  (match (right_identity_zero_frags $dst, $lhs)),
479  (apply (GIReplaceReg $dst, $lhs))
480>;
481
482def right_identity_neg_zero_fp: GICombineRule<
483  (defs root:$dst),
484  (match (G_FADD $dst, $x, $y):$root,
485    [{ return Helper.matchConstantFPOp(${y}, -0.0); }]),
486  (apply (GIReplaceReg $dst, $x))
487>;
488
489// Fold x op 1 -> x
490def right_identity_one_int: GICombineRule<
491  (defs root:$dst),
492  (match (G_MUL $dst, $x, 1)),
493  (apply (GIReplaceReg $dst, $x))
494>;
495
496def right_identity_one_fp: GICombineRule<
497  (defs root:$dst),
498  (match (G_FMUL $dst, $x, $y):$root,
499    [{ return Helper.matchConstantFPOp(${y}, 1.0); }]),
500  (apply (GIReplaceReg $dst, $x))
501>;
502
503def right_identity_one : GICombineGroup<[right_identity_one_int, right_identity_one_fp]>;
504
505// Fold (x op x) - > x
506def binop_same_val_frags : GICombinePatFrag<
507  (outs root:$dst), (ins $x),
508  [
509    (pattern (G_AND $dst, $x, $x)),
510    (pattern (G_OR $dst, $x, $x)),
511  ]
512>;
513def binop_same_val: GICombineRule<
514  (defs root:$dst),
515  (match (binop_same_val_frags $dst, $src)),
516  (apply (GIReplaceReg $dst, $src))
517>;
518
519// Fold (0 op x) - > 0
520def binop_left_to_zero: GICombineRule<
521  (defs root:$root),
522  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
523    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
524  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
525>;
526
527def urem_pow2_to_mask : GICombineRule<
528  (defs root:$root),
529  (match (wip_match_opcode G_UREM):$root,
530    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
531  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
532>;
533
534// Push a binary operator through a select on constants.
535//
536// binop (select cond, K0, K1), K2 ->
537//   select cond, (binop K0, K2), (binop K1, K2)
538
539// Every binary operator that has constant folding. We currently do
540// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
541// G_FMINNUM_IEEE.
542def fold_binop_into_select : GICombineRule<
543  (defs root:$root, unsigned_matchinfo:$select_op_no),
544  (match (wip_match_opcode
545    G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
546    G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
547    G_SMIN, G_SMAX, G_UMIN, G_UMAX,
548    G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
549    G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
550    [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
551  (apply [{ Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
552>;
553
554// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
555def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
556def div_rem_to_divrem : GICombineRule<
557  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
558  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
559    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
560  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
561>;
562
563// Fold (x op 0) - > 0
564def binop_right_to_zero: GICombineRule<
565  (defs root:$dst),
566  (match (G_MUL $dst, $lhs, 0:$zero)),
567  (apply (GIReplaceReg $dst, $zero))
568>;
569
570// Erase stores of undef values.
571def erase_undef_store : GICombineRule<
572  (defs root:$root),
573  (match (wip_match_opcode G_STORE):$root,
574    [{ return Helper.matchUndefStore(*${root}); }]),
575  (apply [{ Helper.eraseInst(*${root}); }])
576>;
577
578def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
579def simplify_add_to_sub: GICombineRule <
580  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
581  (match (wip_match_opcode G_ADD):$root,
582    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
583  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
584>;
585
586// Fold fp_op(cst) to the constant result of the floating point operation.
587class constant_fold_unary_fp_op_rule<Instruction opcode> : GICombineRule <
588  (defs root:$dst),
589  (match (opcode $dst, $src0):$root, (G_FCONSTANT $src0, $cst)),
590  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${cst}.getFPImm()); }])
591>;
592
593def constant_fold_fneg : constant_fold_unary_fp_op_rule<G_FNEG>;
594def constant_fold_fabs : constant_fold_unary_fp_op_rule<G_FABS>;
595def constant_fold_fsqrt : constant_fold_unary_fp_op_rule<G_FSQRT>;
596def constant_fold_flog2 : constant_fold_unary_fp_op_rule<G_FLOG2>;
597def constant_fold_fptrunc : constant_fold_unary_fp_op_rule<G_FPTRUNC>;
598
599// Fold constant zero int to fp conversions.
600class itof_const_zero_fold_rule<Instruction opcode> : GICombineRule <
601  (defs root:$dst),
602  (match (opcode $dst, 0)),
603  // Can't use COPY $dst, 0 here because the 0 operand may be a smaller type
604  // than the destination for itofp.
605  (apply [{ Helper.replaceInstWithFConstant(*${dst}.getParent(), 0.0); }])
606>;
607def itof_const_zero_fold_si : itof_const_zero_fold_rule<G_SITOFP>;
608def itof_const_zero_fold_ui : itof_const_zero_fold_rule<G_UITOFP>;
609
610def constant_fold_fp_ops : GICombineGroup<[
611  constant_fold_fneg,
612  constant_fold_fabs,
613  constant_fold_fsqrt,
614  constant_fold_flog2,
615  constant_fold_fptrunc,
616  itof_const_zero_fold_si,
617  itof_const_zero_fold_ui
618]>;
619
620// Fold int2ptr(ptr2int(x)) -> x
621def p2i_to_i2p: GICombineRule<
622  (defs root:$root, register_matchinfo:$info),
623  (match (wip_match_opcode G_INTTOPTR):$root,
624    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
625  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
626>;
627
628// Fold ptr2int(int2ptr(x)) -> x
629def i2p_to_p2i: GICombineRule<
630  (defs root:$dst, register_matchinfo:$info),
631  (match (G_INTTOPTR $t, $ptr),
632         (G_PTRTOINT $dst, $t):$mi,
633    [{ ${info} = ${ptr}.getReg(); return true; }]),
634  (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
635>;
636
637// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
638def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
639def add_p2i_to_ptradd : GICombineRule<
640  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
641  (match (wip_match_opcode G_ADD):$root,
642    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
643  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
644>;
645
646// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
647def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
648def const_ptradd_to_i2p: GICombineRule<
649  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
650  (match (wip_match_opcode G_PTR_ADD):$root,
651    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
652  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
653>;
654
655// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
656def hoist_logic_op_with_same_opcode_hands: GICombineRule <
657  (defs root:$root, instruction_steps_matchdata:$info),
658  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
659    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
660  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
661>;
662
663// Fold ashr (shl x, C), C -> sext_inreg (C)
664def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
665def shl_ashr_to_sext_inreg : GICombineRule<
666  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
667  (match (wip_match_opcode G_ASHR): $root,
668    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
669  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
670>;
671
672// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
673def overlapping_and: GICombineRule <
674  (defs root:$root, build_fn_matchinfo:$info),
675  (match (wip_match_opcode G_AND):$root,
676         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
677  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
678>;
679
680// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
681def redundant_and: GICombineRule <
682  (defs root:$root, register_matchinfo:$matchinfo),
683  (match (wip_match_opcode G_AND):$root,
684         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
685  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
686>;
687
688// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
689def redundant_or: GICombineRule <
690  (defs root:$root, register_matchinfo:$matchinfo),
691  (match (wip_match_opcode G_OR):$root,
692         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
693  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
694>;
695
696// If the input is already sign extended, just drop the extension.
697// sext_inreg x, K ->
698//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
699def redundant_sext_inreg: GICombineRule <
700  (defs root:$root),
701  (match (wip_match_opcode G_SEXT_INREG):$root,
702         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
703     (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
704>;
705
706// Fold (anyext (trunc x)) -> x if the source type is same as
707// the destination type.
708def anyext_trunc_fold: GICombineRule <
709  (defs root:$root, register_matchinfo:$matchinfo),
710  (match (wip_match_opcode G_ANYEXT):$root,
711         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
712  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
713>;
714
715// Fold (zext (trunc x)) -> x if the source type is same as the destination type
716// and truncated bits are known to be zero.
717def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
718def zext_trunc_fold: GICombineRule <
719  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
720  (match (wip_match_opcode G_ZEXT):$root,
721         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
722  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
723>;
724
725// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
726def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
727def ext_ext_fold: GICombineRule <
728  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
729  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
730         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
731  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
732>;
733
734def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
735def not_cmp_fold : GICombineRule<
736  (defs root:$d, not_cmp_fold_matchinfo:$info),
737  (match (wip_match_opcode G_XOR): $d,
738  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
739  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
740>;
741
742// Fold (fneg (fneg x)) -> x.
743def fneg_fneg_fold: GICombineRule <
744  (defs root:$dst),
745  (match (G_FNEG $t, $src),
746         (G_FNEG $dst, $t)),
747  (apply (GIReplaceReg $dst, $src))
748>;
749
750// Fold (unmerge(merge x, y, z)) -> z, y, z.
751def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
752def unmerge_merge : GICombineRule<
753  (defs root:$d, unmerge_merge_matchinfo:$info),
754  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
755  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
756  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
757>;
758
759// Fold merge(unmerge).
760def merge_unmerge : GICombineRule<
761  (defs root:$d, register_matchinfo:$matchinfo),
762  (match (wip_match_opcode G_MERGE_VALUES):$d,
763  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
764  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
765>;
766
767// Fold (fabs (fneg x)) -> (fabs x).
768def fabs_fneg_fold: GICombineRule <
769  (defs root:$dst),
770  (match  (G_FNEG $tmp, $x),
771          (G_FABS $dst, $tmp)),
772  (apply (G_FABS $dst, $x))>;
773
774// Fold (unmerge cst) -> cst1, cst2, ...
775def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
776def unmerge_cst : GICombineRule<
777  (defs root:$d, unmerge_cst_matchinfo:$info),
778  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
779  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
780  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
781>;
782
783// Fold (unmerge undef) -> undef, undef, ...
784def unmerge_undef : GICombineRule<
785  (defs root:$root, build_fn_matchinfo:$info),
786  (match (wip_match_opcode G_UNMERGE_VALUES): $root,
787         [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
788  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
789>;
790
791// Transform x,y<dead> = unmerge z -> x = trunc z.
792def unmerge_dead_to_trunc : GICombineRule<
793  (defs root:$d),
794  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
795  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
796  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
797>;
798
799// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
800def unmerge_zext_to_zext : GICombineRule<
801  (defs root:$d),
802  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
803  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
804  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
805>;
806
807// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
808def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
809def trunc_ext_fold: GICombineRule <
810  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
811  (match (wip_match_opcode G_TRUNC):$root,
812         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
813  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
814>;
815
816// Under certain conditions, transform:
817//  trunc (shl x, K)     -> shl (trunc x), K//
818//  trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
819def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
820def trunc_shift: GICombineRule <
821  (defs root:$root, trunc_shift_matchinfo:$matchinfo),
822  (match (wip_match_opcode G_TRUNC):$root,
823         [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
824  (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
825>;
826
827// Transform (mul x, -1) -> (sub 0, x)
828def mul_by_neg_one: GICombineRule <
829  (defs root:$dst),
830  (match (G_MUL $dst, $x, -1)),
831  (apply (G_SUB $dst, 0, $x))
832>;
833
834// Fold (xor (and x, y), y) -> (and (not x), y)
835def xor_of_and_with_same_reg_matchinfo :
836    GIDefMatchData<"std::pair<Register, Register>">;
837def xor_of_and_with_same_reg: GICombineRule <
838  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
839  (match (wip_match_opcode G_XOR):$root,
840         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
841  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
842>;
843
844// Transform (ptr_add 0, x) -> (int_to_ptr x)
845def ptr_add_with_zero: GICombineRule<
846  (defs root:$root),
847  (match (wip_match_opcode G_PTR_ADD):$root,
848         [{ return Helper.matchPtrAddZero(*${root}); }]),
849  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
850
851def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
852def combine_insert_vec_elts_build_vector : GICombineRule<
853  (defs root:$root, regs_small_vec:$info),
854  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
855    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
856  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
857
858def load_or_combine : GICombineRule<
859  (defs root:$root, build_fn_matchinfo:$info),
860  (match (wip_match_opcode G_OR):$root,
861    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
862  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
863
864def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
865def extend_through_phis : GICombineRule<
866  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
867  (match (wip_match_opcode G_PHI):$root,
868    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
869  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
870
871// Currently only the one combine above.
872def insert_vec_elt_combines : GICombineGroup<
873                            [combine_insert_vec_elts_build_vector]>;
874
875def extract_vec_elt_build_vec : GICombineRule<
876  (defs root:$root, register_matchinfo:$matchinfo),
877  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
878    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
879  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
880
881// Fold away full elt extracts from a build_vector.
882def extract_all_elts_from_build_vector_matchinfo :
883  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
884def extract_all_elts_from_build_vector : GICombineRule<
885  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
886  (match (wip_match_opcode G_BUILD_VECTOR):$root,
887    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
888  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
889
890def extract_vec_elt_combines : GICombineGroup<[
891  extract_vec_elt_build_vec,
892  extract_all_elts_from_build_vector]>;
893
894def funnel_shift_from_or_shift : GICombineRule<
895  (defs root:$root, build_fn_matchinfo:$info),
896  (match (wip_match_opcode G_OR):$root,
897    [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
898  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
899>;
900
901def funnel_shift_to_rotate : GICombineRule<
902  (defs root:$root),
903  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
904    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
905  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
906>;
907
908// Fold fshr x, y, 0 -> y
909def funnel_shift_right_zero: GICombineRule<
910  (defs root:$root),
911  (match (G_FSHR $x, $y, $z, 0):$root),
912  (apply (COPY $x, $z))
913>;
914
915// Fold fshl x, y, 0 -> x
916def funnel_shift_left_zero: GICombineRule<
917  (defs root:$root),
918  (match (G_FSHL $x, $y, $z, 0):$root),
919  (apply (COPY $x, $y))
920>;
921
922// Fold fsh(l/r) x, y, C -> fsh(l/r) x, y, C % bw
923def funnel_shift_overshift: GICombineRule<
924  (defs root:$root),
925  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
926    [{ return Helper.matchConstantLargerBitWidth(*${root}, 3); }]),
927  (apply [{ Helper.applyFunnelShiftConstantModulo(*${root}); }])
928>;
929
930def rotate_out_of_range : GICombineRule<
931  (defs root:$root),
932  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
933    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
934  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
935>;
936
937def icmp_to_true_false_known_bits : GICombineRule<
938  (defs root:$d, int64_matchinfo:$matchinfo),
939  (match (wip_match_opcode G_ICMP):$d,
940         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
941  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
942
943def icmp_to_lhs_known_bits : GICombineRule<
944  (defs root:$root, build_fn_matchinfo:$info),
945  (match (wip_match_opcode G_ICMP):$root,
946         [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
947  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
948
949def redundant_binop_in_equality : GICombineRule<
950  (defs root:$root, build_fn_matchinfo:$info),
951  (match (wip_match_opcode G_ICMP):$root,
952         [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
953  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
954
955def and_or_disjoint_mask : GICombineRule<
956  (defs root:$root, build_fn_matchinfo:$info),
957  (match (wip_match_opcode G_AND):$root,
958         [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
959  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
960
961def bitfield_extract_from_and : GICombineRule<
962  (defs root:$root, build_fn_matchinfo:$info),
963  (match (wip_match_opcode G_AND):$root,
964    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
965  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
966
967def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
968                                            funnel_shift_to_rotate,
969                                            funnel_shift_right_zero,
970                                            funnel_shift_left_zero,
971                                            funnel_shift_overshift]>;
972
973def bitfield_extract_from_sext_inreg : GICombineRule<
974  (defs root:$root, build_fn_matchinfo:$info),
975  (match (wip_match_opcode G_SEXT_INREG):$root,
976    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
977  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
978
979def bitfield_extract_from_shr : GICombineRule<
980  (defs root:$root, build_fn_matchinfo:$info),
981  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
982    [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
983  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
984
985def bitfield_extract_from_shr_and : GICombineRule<
986  (defs root:$root, build_fn_matchinfo:$info),
987  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
988    [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
989  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
990
991def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
992                                            bitfield_extract_from_and,
993                                            bitfield_extract_from_shr,
994                                            bitfield_extract_from_shr_and]>;
995
996def udiv_by_const : GICombineRule<
997  (defs root:$root),
998  (match (wip_match_opcode G_UDIV):$root,
999   [{ return Helper.matchUDivByConst(*${root}); }]),
1000  (apply [{ Helper.applyUDivByConst(*${root}); }])>;
1001
1002def sdiv_by_const : GICombineRule<
1003  (defs root:$root),
1004  (match (wip_match_opcode G_SDIV):$root,
1005   [{ return Helper.matchSDivByConst(*${root}); }]),
1006  (apply [{ Helper.applySDivByConst(*${root}); }])>;
1007
1008def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
1009
1010def reassoc_ptradd : GICombineRule<
1011  (defs root:$root, build_fn_matchinfo:$matchinfo),
1012  (match (wip_match_opcode G_PTR_ADD):$root,
1013    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
1014  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
1015
1016def reassoc_comm_binops : GICombineRule<
1017  (defs root:$root, build_fn_matchinfo:$matchinfo),
1018  (match (G_ADD $root, $src1, $src2):$root,
1019    [{ return Helper.matchReassocCommBinOp(*${root}, ${matchinfo}); }]),
1020  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
1021
1022def reassocs : GICombineGroup<[reassoc_ptradd, reassoc_comm_binops]>;
1023
1024// Constant fold operations.
1025def constant_fold_binop : GICombineRule<
1026  (defs root:$d, apint_matchinfo:$matchinfo),
1027  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL, G_LSHR, G_ASHR):$d,
1028   [{ return Helper.matchConstantFoldBinOp(*${d}, ${matchinfo}); }]),
1029  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
1030
1031def constant_fold_fp_binop : GICombineRule<
1032  (defs root:$d, constantfp_matchinfo:$matchinfo),
1033  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV):$d,
1034   [{ return Helper.matchConstantFoldFPBinOp(*${d}, ${matchinfo}); }]),
1035  (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
1036
1037
1038def constant_fold_fma : GICombineRule<
1039  (defs root:$d, constantfp_matchinfo:$matchinfo),
1040  (match (wip_match_opcode G_FMAD, G_FMA):$d,
1041   [{ return Helper.matchConstantFoldFMA(*${d}, ${matchinfo}); }]),
1042  (apply [{ Helper.replaceInstWithFConstant(*${d}, ${matchinfo}); }])>;
1043
1044def constant_fold_cast_op : GICombineRule<
1045  (defs root:$d, apint_matchinfo:$matchinfo),
1046  (match (wip_match_opcode G_ZEXT, G_SEXT, G_ANYEXT):$d,
1047   [{ return Helper.matchConstantFoldCastOp(*${d}, ${matchinfo}); }]),
1048  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
1049
1050def mulo_by_2: GICombineRule<
1051  (defs root:$root, build_fn_matchinfo:$matchinfo),
1052  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
1053         [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
1054  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
1055
1056def mulo_by_0: GICombineRule<
1057  (defs root:$root, build_fn_matchinfo:$matchinfo),
1058  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
1059         [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
1060  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
1061
1062def addo_by_0: GICombineRule<
1063  (defs root:$root, build_fn_matchinfo:$matchinfo),
1064  (match (wip_match_opcode G_UADDO, G_SADDO):$root,
1065         [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]),
1066  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
1067
1068// Transform (uadde x, y, 0) -> (uaddo x, y)
1069//           (sadde x, y, 0) -> (saddo x, y)
1070//           (usube x, y, 0) -> (usubo x, y)
1071//           (ssube x, y, 0) -> (ssubo x, y)
1072def adde_to_addo: GICombineRule<
1073  (defs root:$root, build_fn_matchinfo:$matchinfo),
1074  (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
1075         [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
1076  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
1077
1078def mulh_to_lshr : GICombineRule<
1079  (defs root:$root),
1080  (match (wip_match_opcode G_UMULH):$root,
1081         [{ return Helper.matchUMulHToLShr(*${root}); }]),
1082  (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
1083
1084def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
1085
1086def redundant_neg_operands: GICombineRule<
1087  (defs root:$root, build_fn_matchinfo:$matchinfo),
1088  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
1089    [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
1090  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
1091
1092// Transform (fsub +-0.0, X) -> (fneg X)
1093def fsub_to_fneg: GICombineRule<
1094  (defs root:$root, register_matchinfo:$matchinfo),
1095  (match (wip_match_opcode G_FSUB):$root,
1096    [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
1097  (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;
1098
1099// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
1100//           (fadd x, (fmul y, z)) -> (fmad y, z, x)
1101// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
1102//           (fadd (fmul x, y), z) -> (fmad x, y, z)
1103def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
1104  (defs root:$root, build_fn_matchinfo:$info),
1105  (match (wip_match_opcode G_FADD):$root,
1106         [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
1107                                                          ${info}); }]),
1108  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1109
1110// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
1111//                                         -> (fmad (fpext x), (fpext y), z)
1112// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
1113//                                         -> (fmad (fpext y), (fpext z), x)
1114def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
1115  (defs root:$root, build_fn_matchinfo:$info),
1116  (match (wip_match_opcode G_FADD):$root,
1117         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
1118                                                               ${info}); }]),
1119  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1120
1121// Transform (fadd (fma x, y, (fmul z, u)), v)  -> (fma x, y, (fma z, u, v))
1122//           (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
1123// Transform (fadd v, (fma x, y, (fmul z, u)))  -> (fma x, y, (fma z, u, v))
1124//           (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
1125def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
1126  (defs root:$root, build_fn_matchinfo:$info),
1127  (match (wip_match_opcode G_FADD):$root,
1128         [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
1129                                                             ${info}); }]),
1130  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1131
1132// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
1133//           (fma x, y, (fma (fpext u), (fpext v), z))
1134def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
1135  (defs root:$root, build_fn_matchinfo:$info),
1136  (match (wip_match_opcode G_FADD):$root,
1137         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
1138                                                  *${root}, ${info}); }]),
1139  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1140
1141// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
1142//                                 -> (fmad x, y, -z)
1143def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
1144  (defs root:$root, build_fn_matchinfo:$info),
1145  (match (wip_match_opcode G_FSUB):$root,
1146         [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
1147                                                          ${info}); }]),
1148  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1149
1150// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
1151//           (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
1152def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
1153  (defs root:$root, build_fn_matchinfo:$info),
1154  (match (wip_match_opcode G_FSUB):$root,
1155         [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
1156                                                              ${info}); }]),
1157  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1158
1159// Transform (fsub (fpext (fmul x, y)), z) ->
1160//           (fma (fpext x), (fpext y), (fneg z))
1161def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
1162  (defs root:$root, build_fn_matchinfo:$info),
1163  (match (wip_match_opcode G_FSUB):$root,
1164         [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
1165                                                               ${info}); }]),
1166  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1167
1168// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
1169//           (fneg (fma (fpext x), (fpext y), z))
1170def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
1171  (defs root:$root, build_fn_matchinfo:$info),
1172  (match (wip_match_opcode G_FSUB):$root,
1173         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
1174                                            *${root}, ${info}); }]),
1175  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1176
1177def combine_minmax_nan: GICombineRule<
1178  (defs root:$root, unsigned_matchinfo:$info),
1179  (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
1180         [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
1181  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;
1182
1183// Transform (add x, (sub y, x)) -> y
1184// Transform (add (sub y, x), x) -> y
1185def add_sub_reg_frags : GICombinePatFrag<
1186  (outs root:$dst), (ins $src),
1187  [
1188    (pattern (G_ADD $dst, $x, $tmp), (G_SUB $tmp, $src, $x)),
1189    (pattern (G_ADD $dst, $tmp, $x), (G_SUB $tmp, $src, $x))
1190  ]>;
1191def add_sub_reg: GICombineRule <
1192  (defs root:$dst),
1193  (match (add_sub_reg_frags $dst, $src)),
1194  (apply (GIReplaceReg $dst, $src))>;
1195
1196def buildvector_identity_fold : GICombineRule<
1197  (defs root:$build_vector, register_matchinfo:$matchinfo),
1198  (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
1199         [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
1200  (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;
1201
1202def trunc_buildvector_fold : GICombineRule<
1203  (defs root:$op, register_matchinfo:$matchinfo),
1204  (match (wip_match_opcode G_TRUNC):$op,
1205      [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
1206  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
1207
1208def trunc_lshr_buildvector_fold : GICombineRule<
1209  (defs root:$op, register_matchinfo:$matchinfo),
1210  (match (wip_match_opcode G_TRUNC):$op,
1211      [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
1212  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;
1213
1214// Transform:
1215//   (x + y) - y -> x
1216//   (x + y) - x -> y
1217//   x - (y + x) -> 0 - y
1218//   x - (x + z) -> 0 - z
1219def sub_add_reg: GICombineRule <
1220  (defs root:$root, build_fn_matchinfo:$matchinfo),
1221  (match (wip_match_opcode G_SUB):$root,
1222         [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
1223  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
1224
1225def bitcast_bitcast_fold : GICombineRule<
1226  (defs root:$dst),
1227  (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
1228      [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
1229  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
1230
1231
1232def fptrunc_fpext_fold : GICombineRule<
1233  (defs root:$dst),
1234  (match (G_FPTRUNC $dst, $src1):$op, (G_FPEXT $src1, $src0),
1235      [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
1236  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;
1237
1238
1239def select_to_minmax: GICombineRule<
1240  (defs root:$root, build_fn_matchinfo:$info),
1241  (match (wip_match_opcode G_SELECT):$root,
1242         [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
1243  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
1244
1245// FIXME: These should use the custom predicate feature once it lands.
1246def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
1247                                     undef_to_negative_one,
1248                                     binop_left_undef_to_zero,
1249                                     binop_right_undef_to_undef,
1250                                     unary_undef_to_zero,
1251                                     propagate_undef_any_op,
1252                                     propagate_undef_all_ops,
1253                                     propagate_undef_shuffle_mask,
1254                                     erase_undef_store,
1255                                     unmerge_undef,
1256                                     insert_extract_vec_elt_out_of_bounds]>;
1257
1258def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
1259                                        binop_same_val, binop_left_to_zero,
1260                                        binop_right_to_zero, p2i_to_i2p,
1261                                        i2p_to_p2i, anyext_trunc_fold,
1262                                        fneg_fneg_fold, right_identity_one,
1263                                        add_sub_reg, buildvector_identity_fold,
1264                                        trunc_buildvector_fold,
1265                                        trunc_lshr_buildvector_fold,
1266                                        bitcast_bitcast_fold, fptrunc_fpext_fold,
1267                                        right_identity_neg_zero_fp]>;
1268
1269def const_combines : GICombineGroup<[constant_fold_fp_ops, const_ptradd_to_i2p,
1270                                     overlapping_and, mulo_by_2, mulo_by_0,
1271                                     addo_by_0, adde_to_addo,
1272                                     combine_minmax_nan]>;
1273
1274def known_bits_simplifications : GICombineGroup<[
1275  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
1276  zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits,
1277  sext_inreg_to_zext_inreg]>;
1278
1279def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
1280                                               narrow_binop_feeding_and]>;
1281
1282def phi_combines : GICombineGroup<[extend_through_phis]>;
1283
1284def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
1285                                      select_to_logical]>;
1286
1287def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
1288                                       mul_by_neg_one, idempotent_prop]>;
1289
1290def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
1291  combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
1292  combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
1293  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
1294  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
1295
1296def constant_fold_binops : GICombineGroup<[constant_fold_binop,
1297                                           constant_fold_fp_binop]>;
1298
1299def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
1300    extract_vec_elt_combines, combines_for_extload, combine_extracted_vector_load,
1301    undef_combines, identity_combines, phi_combines,
1302    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
1303    reassocs, ptr_add_immed_chain,
1304    shl_ashr_to_sext_inreg, sext_inreg_of_load,
1305    width_reduction_combines, select_combines,
1306    known_bits_simplifications, ext_ext_fold,
1307    not_cmp_fold, opt_brcond_by_inverting_cond,
1308    unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
1309    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
1310    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
1311    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
1312    div_rem_to_divrem, funnel_shift_combines, commute_shift,
1313    form_bitfield_extract, constant_fold_binops, constant_fold_fma,
1314    constant_fold_cast_op, fabs_fneg_fold,
1315    intdiv_combines, mulh_combines, redundant_neg_operands,
1316    and_or_disjoint_mask, fma_combines, fold_binop_into_select,
1317    sub_add_reg, select_to_minmax, redundant_binop_in_equality,
1318    fsub_to_fneg, commute_constant_to_rhs]>;
1319
1320// A combine group used to for prelegalizer combiners at -O0. The combines in
1321// this group have been selected based on experiments to balance code size and
1322// compile time performance.
1323def optnone_combines : GICombineGroup<[trivial_combines,
1324    ptr_add_immed_chain, combines_for_extload,
1325    not_cmp_fold, opt_brcond_by_inverting_cond]>;
1326