xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCombine.td (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1//=- AMDGPUCombine.td - Define AMDGPU Combine Rules ----------*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9include "llvm/Target/GlobalISel/Combine.td"
10
11// TODO: This really belongs after legalization after scalarization.
12
13def fmin_fmax_legacy_matchdata : GIDefMatchData<"FMinFMaxLegacyInfo">;
14
15let Predicates = [HasFminFmaxLegacy] in
16def fcmp_select_to_fmin_fmax_legacy : GICombineRule<
17  (defs root:$select, fmin_fmax_legacy_matchdata:$matchinfo),
18  (match (G_FCMP $cond, $pred, $lhs, $rhs):$fcmp,
19         (G_SELECT f32:$dst, $cond, $true, $false):$select,
20         [{ return matchFMinFMaxLegacy(*${select}, *${fcmp}, ${matchinfo}); }]),
21  (apply [{ applySelectFCmpToFMinFMaxLegacy(*${select}, ${matchinfo}); }])>;
22
23
24def uchar_to_float : GICombineRule<
25  (defs root:$itofp),
26  (match (wip_match_opcode G_UITOFP, G_SITOFP):$itofp,
27         [{ return matchUCharToFloat(*${itofp}); }]),
28  (apply [{ applyUCharToFloat(*${itofp}); }])>;
29
30
31def rcp_sqrt_to_rsq : GICombineRule<
32  (defs root:$rcp, build_fn_matchinfo:$matchinfo),
33  (match (wip_match_opcode G_INTRINSIC, G_FSQRT):$rcp,
34         [{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]),
35  (apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>;
36
37def fdiv_by_sqrt_to_rsq_f16 : GICombineRule<
38  (defs root:$root),
39  (match (G_FSQRT f16:$sqrt, $x, (MIFlags FmContract)),
40         (G_FDIV f16:$dst, $y, $sqrt, (MIFlags FmContract)):$root,
41         [{ return matchFDivSqrtToRsqF16(*${root}); }]),
42  (apply [{ applyFDivSqrtToRsqF16(*${root}, ${x}.getReg()); }])>;
43
44def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">;
45
46def cvt_f32_ubyteN : GICombineRule<
47  (defs root:$cvt_f32_ubyteN, cvt_f32_ubyteN_matchdata:$matchinfo),
48  (match (wip_match_opcode G_AMDGPU_CVT_F32_UBYTE0,
49                           G_AMDGPU_CVT_F32_UBYTE1,
50                           G_AMDGPU_CVT_F32_UBYTE2,
51                           G_AMDGPU_CVT_F32_UBYTE3):$cvt_f32_ubyteN,
52         [{ return matchCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }]),
53  (apply [{ applyCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }])>;
54
55def clamp_i64_to_i16_matchdata : GIDefMatchData<"ClampI64ToI16MatchInfo">;
56
57def clamp_i64_to_i16 : GICombineRule<
58  (defs root:$clamp_i64_to_i16, clamp_i64_to_i16_matchdata:$matchinfo),
59  (match (wip_match_opcode G_TRUNC):$clamp_i64_to_i16,
60      [{ return matchClampI64ToI16(*${clamp_i64_to_i16}, MRI, MF, ${matchinfo}); }]),
61  (apply [{ applyClampI64ToI16(*${clamp_i64_to_i16}, ${matchinfo}); }])>;
62
63def med3_matchdata : GIDefMatchData<"Med3MatchInfo">;
64
65def int_minmax_to_med3 : GICombineRule<
66  (defs root:$min_or_max, med3_matchdata:$matchinfo),
67  (match (wip_match_opcode G_SMAX,
68                           G_SMIN,
69                           G_UMAX,
70                           G_UMIN):$min_or_max,
71         [{ return matchIntMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
72  (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
73
74def fp_minmax_to_med3 : GICombineRule<
75  (defs root:$min_or_max, med3_matchdata:$matchinfo),
76  (match (wip_match_opcode G_FMAXNUM,
77                           G_FMINNUM,
78                           G_FMAXNUM_IEEE,
79                           G_FMINNUM_IEEE):$min_or_max,
80         [{ return matchFPMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
81  (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
82
83def fp_minmax_to_clamp : GICombineRule<
84  (defs root:$min_or_max, register_matchinfo:$matchinfo),
85  (match (wip_match_opcode G_FMAXNUM,
86                           G_FMINNUM,
87                           G_FMAXNUM_IEEE,
88                           G_FMINNUM_IEEE):$min_or_max,
89         [{ return matchFPMinMaxToClamp(*${min_or_max}, ${matchinfo}); }]),
90  (apply [{ applyClamp(*${min_or_max}, ${matchinfo}); }])>;
91
92def fmed3_intrinsic_to_clamp : GICombineRule<
93  (defs root:$fmed3, register_matchinfo:$matchinfo),
94  (match (wip_match_opcode G_AMDGPU_FMED3):$fmed3,
95         [{ return matchFPMed3ToClamp(*${fmed3}, ${matchinfo}); }]),
96  (apply [{ applyClamp(*${fmed3}, ${matchinfo}); }])>;
97
98def remove_fcanonicalize : GICombineRule<
99  (defs root:$fcanonicalize, register_matchinfo:$matchinfo),
100  (match (wip_match_opcode G_FCANONICALIZE):$fcanonicalize,
101         [{ return matchRemoveFcanonicalize(*${fcanonicalize}, ${matchinfo}); }]),
102  (apply [{ Helper.replaceSingleDefInstWithReg(*${fcanonicalize}, ${matchinfo}); }])>;
103
104def foldable_fneg_matchdata : GIDefMatchData<"MachineInstr *">;
105
106def foldable_fneg : GICombineRule<
107  (defs root:$ffn, foldable_fneg_matchdata:$matchinfo),
108  (match (wip_match_opcode G_FNEG):$ffn,
109         [{ return Helper.matchFoldableFneg(*${ffn}, ${matchinfo}); }]),
110  (apply [{ Helper.applyFoldableFneg(*${ffn}, ${matchinfo}); }])>;
111
112// Detects s_mul_u64 instructions whose higher bits are zero/sign extended.
113def smulu64 : GICombineRule<
114  (defs root:$smul, unsigned_matchinfo:$matchinfo),
115  (match (wip_match_opcode G_MUL):$smul,
116         [{ return matchCombine_s_mul_u64(*${smul}, ${matchinfo}); }]),
117  (apply [{ Helper.replaceOpcodeWith(*${smul}, ${matchinfo}); }])>;
118
119def sign_exension_in_reg_matchdata : GIDefMatchData<"std::pair<MachineInstr *, unsigned>">;
120
121def sign_extension_in_reg : GICombineRule<
122  (defs root:$sign_inreg, sign_exension_in_reg_matchdata:$matchinfo),
123  (match (wip_match_opcode G_SEXT_INREG):$sign_inreg,
124         [{ return matchCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }]),
125  (apply [{ applyCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }])>;
126
127
128let Predicates = [Has16BitInsts, NotHasMed3_16] in {
129// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
130// saves one instruction compared to the promotion.
131//
132// FIXME: Should have ComplexPattern like in/out matchers
133//
134// FIXME: We should be able to match either G_AMDGPU_FMED3 or
135// G_INTRINSIC @llvm.amdgcn.fmed3. Currently the legalizer will
136// replace the intrinsic with G_AMDGPU_FMED3 since we can't write a
137// pattern to match it.
138def expand_promoted_fmed3 : GICombineRule<
139  (defs root:$fptrunc_dst),
140  (match (G_FPTRUNC $fptrunc_dst, $fmed3_dst):$fptrunc,
141         (G_AMDGPU_FMED3 $fmed3_dst, $src0, $src1, $src2),
142    [{ return Helper.matchExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }]),
143  (apply [{ Helper.applyExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }])
144>;
145
146} // End Predicates = [NotHasMed3_16]
147
148// Combines which should only apply on SI/CI
149def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>;
150
151// Combines which should only apply on VI
152def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
153
154def AMDGPUPreLegalizerCombiner: GICombiner<
155  "AMDGPUPreLegalizerCombinerImpl",
156  [all_combines, clamp_i64_to_i16, foldable_fneg]> {
157  let CombineAllMethodName = "tryCombineAllImpl";
158}
159
160def AMDGPUPostLegalizerCombiner: GICombiner<
161  "AMDGPUPostLegalizerCombinerImpl",
162  [all_combines, gfx6gfx7_combines, gfx8_combines,
163   uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
164   rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
165  let CombineAllMethodName = "tryCombineAllImpl";
166}
167
168def AMDGPURegBankCombiner : GICombiner<
169  "AMDGPURegBankCombinerImpl",
170  [unmerge_merge, unmerge_cst, unmerge_undef,
171   zext_trunc_fold, int_minmax_to_med3, ptr_add_immed_chain,
172   fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp]> {
173}
174