1//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// Declare GlobalISel combine rules and provide mechanisms to opt-out. 10// 11//===----------------------------------------------------------------------===// 12 13// Common base class for GICombineRule and GICombineGroup. 14class GICombine { 15 // See GICombineGroup. We only declare it here to make the tablegen pass 16 // simpler. 17 list<GICombine> Rules = ?; 18} 19 20// A group of combine rules that can be added to a GICombiner or another group. 21class GICombineGroup<list<GICombine> rules> : GICombine { 22 // The rules contained in this group. The rules in a group are flattened into 23 // a single list and sorted into whatever order is most efficient. However, 24 // they will never be re-ordered such that behaviour differs from the 25 // specified order. It is therefore possible to use the order of rules in this 26 // list to describe priorities. 27 let Rules = rules; 28} 29 30class GICombinerHelperArg<string type, string name> { 31 string Type = type; 32 string Name = name; 33} 34 35// Declares a combiner helper class 36class GICombinerHelper<string classname, list<GICombine> rules> 37 : GICombineGroup<rules> { 38 // The class name to use in the generated output. 39 string Classname = classname; 40 // The name of a run-time compiler option that will be generated to disable 41 // specific rules within this combiner. 42 string DisableRuleOption = ?; 43 // The state class to inherit from (if any). The generated helper will inherit 44 // from this class and will forward arguments to its constructors. 45 string StateClass = ""; 46 // Any additional arguments that should be appended to the tryCombine*(). 47 list<GICombinerHelperArg> AdditionalArguments = 48 [GICombinerHelperArg<"CombinerHelper &", "Helper">]; 49} 50class GICombineRule<dag defs, dag match, dag apply> : GICombine { 51 /// Defines the external interface of the match rule. This includes: 52 /// * The names of the root nodes (requires at least one) 53 /// See GIDefKind for details. 54 dag Defs = defs; 55 56 /// Defines the things which must be true for the pattern to match 57 /// See GIMatchKind for details. 58 dag Match = match; 59 60 /// Defines the things which happen after the decision is made to apply a 61 /// combine rule. 62 /// See GIApplyKind for details. 63 dag Apply = apply; 64} 65 66/// The operator at the root of a GICombineRule.Defs dag. 67def defs; 68 69/// All arguments of the defs operator must be subclasses of GIDefKind or 70/// sub-dags whose operator is GIDefKindWithArgs. 71class GIDefKind; 72class GIDefKindWithArgs; 73/// Declare a root node. There must be at least one of these in every combine 74/// rule. 75/// TODO: The plan is to elide `root` definitions and determine it from the DAG 76/// itself with an overide for situations where the usual determination 77/// is incorrect. 78def root : GIDefKind; 79 80/// Declares data that is passed from the match stage to the apply stage. 81class GIDefMatchData<string type> : GIDefKind { 82 /// A C++ type name indicating the storage type. 83 string Type = type; 84} 85 86def extending_load_matchdata : GIDefMatchData<"PreferredTuple">; 87def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">; 88def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">; 89 90/// The operator at the root of a GICombineRule.Match dag. 91def match; 92/// All arguments of the match operator must be either: 93/// * A subclass of GIMatchKind 94/// * A subclass of GIMatchKindWithArgs 95/// * A subclass of Instruction 96/// * A MIR code block (deprecated) 97/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail 98/// in their definitions below. 99/// For the Instruction case, these are collected into a DAG where operand names 100/// that occur multiple times introduce edges. 101class GIMatchKind; 102class GIMatchKindWithArgs; 103 104/// In lieu of having proper macro support. Trivial one-off opcode checks can be 105/// performed with this. 106def wip_match_opcode : GIMatchKindWithArgs; 107 108/// The operator at the root of a GICombineRule.Apply dag. 109def apply; 110/// All arguments of the apply operator must be subclasses of GIApplyKind, or 111/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block 112/// (deprecated). 113class GIApplyKind; 114class GIApplyKindWithArgs; 115 116def register_matchinfo: GIDefMatchData<"Register">; 117def int64_matchinfo: GIDefMatchData<"int64_t">; 118def apint_matchinfo : GIDefMatchData<"APInt">; 119def build_fn_matchinfo : 120GIDefMatchData<"std::function<void(MachineIRBuilder &)>">; 121def unsigned_matchinfo: GIDefMatchData<"unsigned">; 122 123def copy_prop : GICombineRule< 124 (defs root:$d), 125 (match (COPY $d, $s):$mi, 126 [{ return Helper.matchCombineCopy(*${mi}); }]), 127 (apply [{ Helper.applyCombineCopy(*${mi}); }])>; 128 129def extending_loads : GICombineRule< 130 (defs root:$root, extending_load_matchdata:$matchinfo), 131 (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root, 132 [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]), 133 (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>; 134 135def load_and_mask : GICombineRule< 136 (defs root:$root, build_fn_matchinfo:$matchinfo), 137 (match (wip_match_opcode G_AND):$root, 138 [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]), 139 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 140def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>; 141 142def sext_trunc_sextload : GICombineRule< 143 (defs root:$d), 144 (match (wip_match_opcode G_SEXT_INREG):$d, 145 [{ return Helper.matchSextTruncSextLoad(*${d}); }]), 146 (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>; 147 148def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">; 149def sext_inreg_of_load : GICombineRule< 150 (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo), 151 (match (wip_match_opcode G_SEXT_INREG):$root, 152 [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]), 153 (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>; 154 155def combine_indexed_load_store : GICombineRule< 156 (defs root:$root, indexed_load_store_matchdata:$matchinfo), 157 (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root, 158 [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]), 159 (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>; 160 161def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">; 162def opt_brcond_by_inverting_cond : GICombineRule< 163 (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo), 164 (match (wip_match_opcode G_BR):$root, 165 [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]), 166 (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>; 167 168def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">; 169def ptr_add_immed_chain : GICombineRule< 170 (defs root:$d, ptr_add_immed_matchdata:$matchinfo), 171 (match (wip_match_opcode G_PTR_ADD):$d, 172 [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]), 173 (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>; 174 175// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same 176def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">; 177def shift_immed_chain : GICombineRule< 178 (defs root:$d, shift_immed_matchdata:$matchinfo), 179 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d, 180 [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]), 181 (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>; 182 183// Transform shift (logic (shift X, C0), Y), C1 184// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same 185def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">; 186def shift_of_shifted_logic_chain : GICombineRule< 187 (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo), 188 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d, 189 [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]), 190 (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>; 191 192def mul_to_shl_matchdata : GIDefMatchData<"unsigned">; 193def mul_to_shl : GICombineRule< 194 (defs root:$d, mul_to_shl_matchdata:$matchinfo), 195 (match (G_MUL $d, $op1, $op2):$mi, 196 [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]), 197 (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>; 198 199// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int 200def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">; 201def reduce_shl_of_extend : GICombineRule< 202 (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo), 203 (match (G_SHL $dst, $src0, $src1):$mi, 204 [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]), 205 (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>; 206 207def narrow_binop_feeding_and : GICombineRule< 208 (defs root:$root, build_fn_matchinfo:$matchinfo), 209 (match (wip_match_opcode G_AND):$root, 210 [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]), 211 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 212 213// [us]itofp(undef) = 0, because the result value is bounded. 214def undef_to_fp_zero : GICombineRule< 215 (defs root:$root), 216 (match (wip_match_opcode G_UITOFP, G_SITOFP):$root, 217 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 218 (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>; 219 220def undef_to_int_zero: GICombineRule< 221 (defs root:$root), 222 (match (wip_match_opcode G_AND, G_MUL):$root, 223 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 224 (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; 225 226def undef_to_negative_one: GICombineRule< 227 (defs root:$root), 228 (match (wip_match_opcode G_OR):$root, 229 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 230 (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>; 231 232def binop_left_undef_to_zero: GICombineRule< 233 (defs root:$root), 234 (match (wip_match_opcode G_SHL):$root, 235 [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), 236 (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; 237 238def binop_right_undef_to_undef: GICombineRule< 239 (defs root:$root), 240 (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root, 241 [{ return Helper.matchOperandIsUndef(*${root}, 2); }]), 242 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 243 244// Instructions where if any source operand is undef, the instruction can be 245// replaced with undef. 246def propagate_undef_any_op: GICombineRule< 247 (defs root:$root), 248 (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root, 249 [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), 250 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 251 252// Instructions where if all source operands are undef, the instruction can be 253// replaced with undef. 254def propagate_undef_all_ops: GICombineRule< 255 (defs root:$root), 256 (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, 257 [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]), 258 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 259 260// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF. 261def propagate_undef_shuffle_mask: GICombineRule< 262 (defs root:$root), 263 (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, 264 [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]), 265 (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; 266 267// Fold (cond ? x : x) -> x 268def select_same_val: GICombineRule< 269 (defs root:$root), 270 (match (wip_match_opcode G_SELECT):$root, 271 [{ return Helper.matchSelectSameVal(*${root}); }]), 272 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) 273>; 274 275// Fold (undef ? x : y) -> y 276def select_undef_cmp: GICombineRule< 277 (defs root:$root), 278 (match (wip_match_opcode G_SELECT):$root, 279 [{ return Helper.matchUndefSelectCmp(*${root}); }]), 280 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) 281>; 282 283// Fold (true ? x : y) -> x 284// Fold (false ? x : y) -> y 285def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">; 286def select_constant_cmp: GICombineRule< 287 (defs root:$root, select_constant_cmp_matchdata:$matchinfo), 288 (match (wip_match_opcode G_SELECT):$root, 289 [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]), 290 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }]) 291>; 292 293def select_to_logical : GICombineRule< 294 (defs root:$root, build_fn_matchinfo:$matchinfo), 295 (match (wip_match_opcode G_SELECT):$root, 296 [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]), 297 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }]) 298>; 299 300// Fold x op 0 -> x 301def right_identity_zero: GICombineRule< 302 (defs root:$root), 303 (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR, 304 G_PTR_ADD, G_ROTL, G_ROTR):$root, 305 [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]), 306 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 307>; 308 309// Fold x op 1 -> x 310def right_identity_one: GICombineRule< 311 (defs root:$root), 312 (match (wip_match_opcode G_MUL):$root, 313 [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]), 314 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 315>; 316 317// Fold (x op x) - > x 318def binop_same_val: GICombineRule< 319 (defs root:$root), 320 (match (wip_match_opcode G_AND, G_OR):$root, 321 [{ return Helper.matchBinOpSameVal(*${root}); }]), 322 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 323>; 324 325// Fold (0 op x) - > 0 326def binop_left_to_zero: GICombineRule< 327 (defs root:$root), 328 (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root, 329 [{ return Helper.matchOperandIsZero(*${root}, 1); }]), 330 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 331>; 332 333def urem_pow2_to_mask : GICombineRule< 334 (defs root:$root), 335 (match (wip_match_opcode G_UREM):$root, 336 [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]), 337 (apply [{ Helper.applySimplifyURemByPow2(*${root}); }]) 338>; 339 340// Push a binary operator through a select on constants. 341// 342// binop (select cond, K0, K1), K2 -> 343// select cond, (binop K0, K2), (binop K1, K2) 344 345// Every binary operator that has constant folding. We currently do 346// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or 347// G_FMINNUM_IEEE. 348def fold_binop_into_select : GICombineRule< 349 (defs root:$root, unsigned_matchinfo:$select_op_no), 350 (match (wip_match_opcode 351 G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR, 352 G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL, 353 G_SMIN, G_SMAX, G_UMIN, G_UMAX, 354 G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM, 355 G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root, 356 [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]), 357 (apply [{ return Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]) 358>; 359 360// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y) 361def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">; 362def div_rem_to_divrem : GICombineRule< 363 (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo), 364 (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root, 365 [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]), 366 (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }]) 367>; 368 369// Fold (x op 0) - > 0 370def binop_right_to_zero: GICombineRule< 371 (defs root:$root), 372 (match (wip_match_opcode G_MUL):$root, 373 [{ return Helper.matchOperandIsZero(*${root}, 2); }]), 374 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) 375>; 376 377// Erase stores of undef values. 378def erase_undef_store : GICombineRule< 379 (defs root:$root), 380 (match (wip_match_opcode G_STORE):$root, 381 [{ return Helper.matchUndefStore(*${root}); }]), 382 (apply [{ return Helper.eraseInst(*${root}); }]) 383>; 384 385def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">; 386def simplify_add_to_sub: GICombineRule < 387 (defs root:$root, simplify_add_to_sub_matchinfo:$info), 388 (match (wip_match_opcode G_ADD):$root, 389 [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]), 390 (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}]) 391>; 392 393// Fold fp_op(cst) to the constant result of the floating point operation. 394def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">; 395def constant_fp_op: GICombineRule < 396 (defs root:$root, constant_fp_op_matchinfo:$info), 397 (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root, 398 [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]), 399 (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }]) 400>; 401 402// Fold int2ptr(ptr2int(x)) -> x 403def p2i_to_i2p: GICombineRule< 404 (defs root:$root, register_matchinfo:$info), 405 (match (wip_match_opcode G_INTTOPTR):$root, 406 [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]), 407 (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }]) 408>; 409 410// Fold ptr2int(int2ptr(x)) -> x 411def i2p_to_p2i: GICombineRule< 412 (defs root:$root, register_matchinfo:$info), 413 (match (wip_match_opcode G_PTRTOINT):$root, 414 [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]), 415 (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }]) 416>; 417 418// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y 419def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">; 420def add_p2i_to_ptradd : GICombineRule< 421 (defs root:$root, add_p2i_to_ptradd_matchinfo:$info), 422 (match (wip_match_opcode G_ADD):$root, 423 [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]), 424 (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }]) 425>; 426 427// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2 428def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">; 429def const_ptradd_to_i2p: GICombineRule< 430 (defs root:$root, const_ptradd_to_i2p_matchinfo:$info), 431 (match (wip_match_opcode G_PTR_ADD):$root, 432 [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]), 433 (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }]) 434>; 435 436// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y)) 437def hoist_logic_op_with_same_opcode_hands: GICombineRule < 438 (defs root:$root, instruction_steps_matchdata:$info), 439 (match (wip_match_opcode G_AND, G_OR, G_XOR):$root, 440 [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]), 441 (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}]) 442>; 443 444// Fold ashr (shl x, C), C -> sext_inreg (C) 445def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">; 446def shl_ashr_to_sext_inreg : GICombineRule< 447 (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info), 448 (match (wip_match_opcode G_ASHR): $root, 449 [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), 450 (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}]) 451>; 452 453// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 454def overlapping_and: GICombineRule < 455 (defs root:$root, build_fn_matchinfo:$info), 456 (match (wip_match_opcode G_AND):$root, 457 [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]), 458 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 459>; 460 461// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y. 462def redundant_and: GICombineRule < 463 (defs root:$root, register_matchinfo:$matchinfo), 464 (match (wip_match_opcode G_AND):$root, 465 [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]), 466 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 467>; 468 469// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y. 470def redundant_or: GICombineRule < 471 (defs root:$root, register_matchinfo:$matchinfo), 472 (match (wip_match_opcode G_OR):$root, 473 [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]), 474 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 475>; 476 477// If the input is already sign extended, just drop the extension. 478// sext_inreg x, K -> 479// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1) 480def redundant_sext_inreg: GICombineRule < 481 (defs root:$root), 482 (match (wip_match_opcode G_SEXT_INREG):$root, 483 [{ return Helper.matchRedundantSExtInReg(*${root}); }]), 484 (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) 485>; 486 487// Fold (anyext (trunc x)) -> x if the source type is same as 488// the destination type. 489def anyext_trunc_fold: GICombineRule < 490 (defs root:$root, register_matchinfo:$matchinfo), 491 (match (wip_match_opcode G_ANYEXT):$root, 492 [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]), 493 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 494>; 495 496// Fold (zext (trunc x)) -> x if the source type is same as the destination type 497// and truncated bits are known to be zero. 498def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">; 499def zext_trunc_fold: GICombineRule < 500 (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo), 501 (match (wip_match_opcode G_ZEXT):$root, 502 [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]), 503 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 504>; 505 506// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). 507def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">; 508def ext_ext_fold: GICombineRule < 509 (defs root:$root, ext_ext_fold_matchinfo:$matchinfo), 510 (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root, 511 [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]), 512 (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }]) 513>; 514 515def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">; 516def not_cmp_fold : GICombineRule< 517 (defs root:$d, not_cmp_fold_matchinfo:$info), 518 (match (wip_match_opcode G_XOR): $d, 519 [{ return Helper.matchNotCmp(*${d}, ${info}); }]), 520 (apply [{ Helper.applyNotCmp(*${d}, ${info}); }]) 521>; 522 523// Fold (fneg (fneg x)) -> x. 524def fneg_fneg_fold: GICombineRule < 525 (defs root:$root, register_matchinfo:$matchinfo), 526 (match (wip_match_opcode G_FNEG):$root, 527 [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]), 528 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 529>; 530 531// Fold (unmerge(merge x, y, z)) -> z, y, z. 532def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">; 533def unmerge_merge : GICombineRule< 534 (defs root:$d, unmerge_merge_matchinfo:$info), 535 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 536 [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]), 537 (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]) 538>; 539 540// Fold merge(unmerge). 541def merge_unmerge : GICombineRule< 542 (defs root:$d, register_matchinfo:$matchinfo), 543 (match (wip_match_opcode G_MERGE_VALUES):$d, 544 [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]), 545 (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }]) 546>; 547 548// Fold (fabs (fabs x)) -> (fabs x). 549def fabs_fabs_fold: GICombineRule< 550 (defs root:$root, register_matchinfo:$matchinfo), 551 (match (wip_match_opcode G_FABS):$root, 552 [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]), 553 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) 554>; 555 556// Fold (fabs (fneg x)) -> (fabs x). 557def fabs_fneg_fold: GICombineRule < 558 (defs root:$root, build_fn_matchinfo:$matchinfo), 559 (match (wip_match_opcode G_FABS):$root, 560 [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]), 561 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 562 563// Fold (unmerge cst) -> cst1, cst2, ... 564def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">; 565def unmerge_cst : GICombineRule< 566 (defs root:$d, unmerge_cst_matchinfo:$info), 567 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 568 [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]), 569 (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }]) 570>; 571 572// Fold (unmerge undef) -> undef, undef, ... 573def unmerge_undef : GICombineRule< 574 (defs root:$root, build_fn_matchinfo:$info), 575 (match (wip_match_opcode G_UNMERGE_VALUES): $root, 576 [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]), 577 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 578>; 579 580// Transform x,y<dead> = unmerge z -> x = trunc z. 581def unmerge_dead_to_trunc : GICombineRule< 582 (defs root:$d), 583 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 584 [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]), 585 (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }]) 586>; 587 588// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0. 589def unmerge_zext_to_zext : GICombineRule< 590 (defs root:$d), 591 (match (wip_match_opcode G_UNMERGE_VALUES): $d, 592 [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]), 593 (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }]) 594>; 595 596// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x). 597def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">; 598def trunc_ext_fold: GICombineRule < 599 (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo), 600 (match (wip_match_opcode G_TRUNC):$root, 601 [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]), 602 (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }]) 603>; 604 605// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits(). 606def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">; 607def trunc_shl: GICombineRule < 608 (defs root:$root, trunc_shl_matchinfo:$matchinfo), 609 (match (wip_match_opcode G_TRUNC):$root, 610 [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]), 611 (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }]) 612>; 613 614// Transform (mul x, -1) -> (sub 0, x) 615def mul_by_neg_one: GICombineRule < 616 (defs root:$root), 617 (match (wip_match_opcode G_MUL):$root, 618 [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]), 619 (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }]) 620>; 621 622// Fold (xor (and x, y), y) -> (and (not x), y) 623def xor_of_and_with_same_reg_matchinfo : 624 GIDefMatchData<"std::pair<Register, Register>">; 625def xor_of_and_with_same_reg: GICombineRule < 626 (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo), 627 (match (wip_match_opcode G_XOR):$root, 628 [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]), 629 (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }]) 630>; 631 632// Transform (ptr_add 0, x) -> (int_to_ptr x) 633def ptr_add_with_zero: GICombineRule< 634 (defs root:$root), 635 (match (wip_match_opcode G_PTR_ADD):$root, 636 [{ return Helper.matchPtrAddZero(*${root}); }]), 637 (apply [{ Helper.applyPtrAddZero(*${root}); }])>; 638 639def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">; 640def combine_insert_vec_elts_build_vector : GICombineRule< 641 (defs root:$root, regs_small_vec:$info), 642 (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root, 643 [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]), 644 (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>; 645 646def load_or_combine : GICombineRule< 647 (defs root:$root, build_fn_matchinfo:$info), 648 (match (wip_match_opcode G_OR):$root, 649 [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]), 650 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 651 652 653def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">; 654def truncstore_merge : GICombineRule< 655 (defs root:$root, truncstore_merge_matcdata:$info), 656 (match (wip_match_opcode G_STORE):$root, 657 [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]), 658 (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>; 659 660def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">; 661def extend_through_phis : GICombineRule< 662 (defs root:$root, extend_through_phis_matchdata:$matchinfo), 663 (match (wip_match_opcode G_PHI):$root, 664 [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]), 665 (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>; 666 667// Currently only the one combine above. 668def insert_vec_elt_combines : GICombineGroup< 669 [combine_insert_vec_elts_build_vector]>; 670 671def extract_vec_elt_build_vec : GICombineRule< 672 (defs root:$root, register_matchinfo:$matchinfo), 673 (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root, 674 [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]), 675 (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>; 676 677// Fold away full elt extracts from a build_vector. 678def extract_all_elts_from_build_vector_matchinfo : 679 GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">; 680def extract_all_elts_from_build_vector : GICombineRule< 681 (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo), 682 (match (wip_match_opcode G_BUILD_VECTOR):$root, 683 [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]), 684 (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>; 685 686def extract_vec_elt_combines : GICombineGroup<[ 687 extract_vec_elt_build_vec, 688 extract_all_elts_from_build_vector]>; 689 690def funnel_shift_from_or_shift : GICombineRule< 691 (defs root:$root, build_fn_matchinfo:$info), 692 (match (wip_match_opcode G_OR):$root, 693 [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]), 694 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }]) 695>; 696 697def funnel_shift_to_rotate : GICombineRule< 698 (defs root:$root), 699 (match (wip_match_opcode G_FSHL, G_FSHR):$root, 700 [{ return Helper.matchFunnelShiftToRotate(*${root}); }]), 701 (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }]) 702>; 703 704def rotate_out_of_range : GICombineRule< 705 (defs root:$root), 706 (match (wip_match_opcode G_ROTR, G_ROTL):$root, 707 [{ return Helper.matchRotateOutOfRange(*${root}); }]), 708 (apply [{ Helper.applyRotateOutOfRange(*${root}); }]) 709>; 710 711def icmp_to_true_false_known_bits : GICombineRule< 712 (defs root:$d, int64_matchinfo:$matchinfo), 713 (match (wip_match_opcode G_ICMP):$d, 714 [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]), 715 (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>; 716 717def icmp_to_lhs_known_bits : GICombineRule< 718 (defs root:$root, build_fn_matchinfo:$info), 719 (match (wip_match_opcode G_ICMP):$root, 720 [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]), 721 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 722 723def and_or_disjoint_mask : GICombineRule< 724 (defs root:$root, build_fn_matchinfo:$info), 725 (match (wip_match_opcode G_AND):$root, 726 [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]), 727 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>; 728 729def bitfield_extract_from_and : GICombineRule< 730 (defs root:$root, build_fn_matchinfo:$info), 731 (match (wip_match_opcode G_AND):$root, 732 [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]), 733 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 734 735def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift, 736 funnel_shift_to_rotate]>; 737 738def bitfield_extract_from_sext_inreg : GICombineRule< 739 (defs root:$root, build_fn_matchinfo:$info), 740 (match (wip_match_opcode G_SEXT_INREG):$root, 741 [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]), 742 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 743 744def bitfield_extract_from_shr : GICombineRule< 745 (defs root:$root, build_fn_matchinfo:$info), 746 (match (wip_match_opcode G_ASHR, G_LSHR):$root, 747 [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]), 748 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 749 750def bitfield_extract_from_shr_and : GICombineRule< 751 (defs root:$root, build_fn_matchinfo:$info), 752 (match (wip_match_opcode G_ASHR, G_LSHR):$root, 753 [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]), 754 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 755 756def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg, 757 bitfield_extract_from_and, 758 bitfield_extract_from_shr, 759 bitfield_extract_from_shr_and]>; 760 761def udiv_by_const : GICombineRule< 762 (defs root:$root), 763 (match (wip_match_opcode G_UDIV):$root, 764 [{ return Helper.matchUDivByConst(*${root}); }]), 765 (apply [{ Helper.applyUDivByConst(*${root}); }])>; 766 767def intdiv_combines : GICombineGroup<[udiv_by_const]>; 768 769def reassoc_ptradd : GICombineRule< 770 (defs root:$root, build_fn_matchinfo:$matchinfo), 771 (match (wip_match_opcode G_PTR_ADD):$root, 772 [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]), 773 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 774 775def reassocs : GICombineGroup<[reassoc_ptradd]>; 776 777// Constant fold operations. 778def constant_fold : GICombineRule< 779 (defs root:$d, apint_matchinfo:$matchinfo), 780 (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d, 781 [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]), 782 (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>; 783 784def mulo_by_2: GICombineRule< 785 (defs root:$root, build_fn_matchinfo:$matchinfo), 786 (match (wip_match_opcode G_UMULO, G_SMULO):$root, 787 [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]), 788 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 789 790def mulo_by_0: GICombineRule< 791 (defs root:$root, build_fn_matchinfo:$matchinfo), 792 (match (wip_match_opcode G_UMULO, G_SMULO):$root, 793 [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]), 794 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 795 796def addo_by_0: GICombineRule< 797 (defs root:$root, build_fn_matchinfo:$matchinfo), 798 (match (wip_match_opcode G_UADDO, G_SADDO):$root, 799 [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]), 800 (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>; 801 802def mulh_to_lshr : GICombineRule< 803 (defs root:$root), 804 (match (wip_match_opcode G_UMULH):$root, 805 [{ return Helper.matchUMulHToLShr(*${root}); }]), 806 (apply [{ Helper.applyUMulHToLShr(*${root}); }])>; 807 808def mulh_combines : GICombineGroup<[mulh_to_lshr]>; 809 810def redundant_neg_operands: GICombineRule< 811 (defs root:$root, build_fn_matchinfo:$matchinfo), 812 (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root, 813 [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]), 814 (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>; 815 816// Transform (fadd x, (fmul y, z)) -> (fma y, z, x) 817// (fadd x, (fmul y, z)) -> (fmad y, z, x) 818// Transform (fadd (fmul x, y), z) -> (fma x, y, z) 819// (fadd (fmul x, y), z) -> (fmad x, y, z) 820def combine_fadd_fmul_to_fmad_or_fma: GICombineRule< 821 (defs root:$root, build_fn_matchinfo:$info), 822 (match (wip_match_opcode G_FADD):$root, 823 [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root}, 824 ${info}); }]), 825 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 826 827// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 828// -> (fmad (fpext x), (fpext y), z) 829// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x) 830// -> (fmad (fpext y), (fpext z), x) 831def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule< 832 (defs root:$root, build_fn_matchinfo:$info), 833 (match (wip_match_opcode G_FADD):$root, 834 [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root}, 835 ${info}); }]), 836 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 837 838// Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v)) 839// (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v)) 840// Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v)) 841// (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v)) 842def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule< 843 (defs root:$root, build_fn_matchinfo:$info), 844 (match (wip_match_opcode G_FADD):$root, 845 [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root}, 846 ${info}); }]), 847 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 848 849// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) -> 850// (fma x, y, (fma (fpext u), (fpext v), z)) 851def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule< 852 (defs root:$root, build_fn_matchinfo:$info), 853 (match (wip_match_opcode G_FADD):$root, 854 [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive( 855 *${root}, ${info}); }]), 856 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 857 858// Transform (fsub (fmul x, y), z) -> (fma x, y, -z) 859// -> (fmad x, y, -z) 860def combine_fsub_fmul_to_fmad_or_fma: GICombineRule< 861 (defs root:$root, build_fn_matchinfo:$info), 862 (match (wip_match_opcode G_FSUB):$root, 863 [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root}, 864 ${info}); }]), 865 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 866 867// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 868// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) 869def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule< 870 (defs root:$root, build_fn_matchinfo:$info), 871 (match (wip_match_opcode G_FSUB):$root, 872 [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root}, 873 ${info}); }]), 874 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 875 876// Transform (fsub (fpext (fmul x, y)), z) -> 877// (fma (fpext x), (fpext y), (fneg z)) 878def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule< 879 (defs root:$root, build_fn_matchinfo:$info), 880 (match (wip_match_opcode G_FSUB):$root, 881 [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root}, 882 ${info}); }]), 883 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 884 885// Transform (fsub (fneg (fpext (fmul x, y))), z) -> 886// (fneg (fma (fpext x), (fpext y), z)) 887def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule< 888 (defs root:$root, build_fn_matchinfo:$info), 889 (match (wip_match_opcode G_FSUB):$root, 890 [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA( 891 *${root}, ${info}); }]), 892 (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>; 893 894def combine_minmax_nan: GICombineRule< 895 (defs root:$root, unsigned_matchinfo:$info), 896 (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root, 897 [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]), 898 (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>; 899 900// Transform (add x, (sub y, x)) -> y 901// Transform (add (sub y, x), x) -> y 902def add_sub_reg: GICombineRule < 903 (defs root:$root, register_matchinfo:$matchinfo), 904 (match (wip_match_opcode G_ADD):$root, 905 [{ return Helper.matchAddSubSameReg(*${root}, ${matchinfo}); }]), 906 (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, 907 ${matchinfo}); }])>; 908 909// FIXME: These should use the custom predicate feature once it lands. 910def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, 911 undef_to_negative_one, 912 binop_left_undef_to_zero, 913 binop_right_undef_to_undef, 914 propagate_undef_any_op, 915 propagate_undef_all_ops, 916 propagate_undef_shuffle_mask, 917 erase_undef_store, 918 unmerge_undef]>; 919 920def identity_combines : GICombineGroup<[select_same_val, right_identity_zero, 921 binop_same_val, binop_left_to_zero, 922 binop_right_to_zero, p2i_to_i2p, 923 i2p_to_p2i, anyext_trunc_fold, 924 fneg_fneg_fold, right_identity_one, 925 add_sub_reg]>; 926 927def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p, 928 overlapping_and, mulo_by_2, mulo_by_0, 929 addo_by_0, combine_minmax_nan]>; 930 931def known_bits_simplifications : GICombineGroup<[ 932 redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask, 933 zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits]>; 934 935def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend, 936 narrow_binop_feeding_and]>; 937 938def phi_combines : GICombineGroup<[extend_through_phis]>; 939 940def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp, 941 select_to_logical]>; 942 943def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd, 944 mul_by_neg_one]>; 945 946def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma, 947 combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma, 948 combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma, 949 combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma, 950 combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>; 951 952def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines, 953 extract_vec_elt_combines, combines_for_extload, 954 combine_indexed_load_store, undef_combines, identity_combines, phi_combines, 955 simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, 956 reassocs, ptr_add_immed_chain, 957 shl_ashr_to_sext_inreg, sext_inreg_of_load, 958 width_reduction_combines, select_combines, 959 known_bits_simplifications, ext_ext_fold, 960 not_cmp_fold, opt_brcond_by_inverting_cond, 961 unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc, 962 unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl, 963 const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, 964 shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine, 965 truncstore_merge, div_rem_to_divrem, funnel_shift_combines, 966 form_bitfield_extract, constant_fold, fabs_fneg_fold, 967 intdiv_combines, mulh_combines, redundant_neg_operands, 968 and_or_disjoint_mask, fma_combines, fold_binop_into_select]>; 969 970// A combine group used to for prelegalizer combiners at -O0. The combines in 971// this group have been selected based on experiments to balance code size and 972// compile time performance. 973def optnone_combines : GICombineGroup<[trivial_combines, 974 ptr_add_immed_chain, combines_for_extload, 975 not_cmp_fold, opt_brcond_by_inverting_cond]>; 976