xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCombine.td (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1//=- AMDGPUCombine.td - Define AMDGPU Combine Rules ----------*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9include "llvm/Target/GlobalISel/Combine.td"
10
11// TODO: This really belongs after legalization after scalarization.
12
13def fmin_fmax_legacy_matchdata : GIDefMatchData<"FMinFMaxLegacyInfo">;
14
15let Predicates = [HasFminFmaxLegacy] in
16def fcmp_select_to_fmin_fmax_legacy : GICombineRule<
17  (defs root:$select, fmin_fmax_legacy_matchdata:$matchinfo),
18  (match (wip_match_opcode G_SELECT):$select,
19         [{ return matchFMinFMaxLegacy(*${select}, ${matchinfo}); }]),
20  (apply [{ applySelectFCmpToFMinToFMaxLegacy(*${select}, ${matchinfo}); }])>;
21
22
23def uchar_to_float : GICombineRule<
24  (defs root:$itofp),
25  (match (wip_match_opcode G_UITOFP, G_SITOFP):$itofp,
26         [{ return matchUCharToFloat(*${itofp}); }]),
27  (apply [{ applyUCharToFloat(*${itofp}); }])>;
28
29
30def rcp_sqrt_to_rsq : GICombineRule<
31  (defs root:$rcp, build_fn_matchinfo:$matchinfo),
32  (match (wip_match_opcode G_INTRINSIC, G_FSQRT):$rcp,
33         [{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]),
34  (apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>;
35
36
37def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">;
38
39def cvt_f32_ubyteN : GICombineRule<
40  (defs root:$cvt_f32_ubyteN, cvt_f32_ubyteN_matchdata:$matchinfo),
41  (match (wip_match_opcode G_AMDGPU_CVT_F32_UBYTE0,
42                           G_AMDGPU_CVT_F32_UBYTE1,
43                           G_AMDGPU_CVT_F32_UBYTE2,
44                           G_AMDGPU_CVT_F32_UBYTE3):$cvt_f32_ubyteN,
45         [{ return matchCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }]),
46  (apply [{ applyCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }])>;
47
48def clamp_i64_to_i16_matchdata : GIDefMatchData<"ClampI64ToI16MatchInfo">;
49
50def clamp_i64_to_i16 : GICombineRule<
51  (defs root:$clamp_i64_to_i16, clamp_i64_to_i16_matchdata:$matchinfo),
52  (match (wip_match_opcode G_TRUNC):$clamp_i64_to_i16,
53      [{ return matchClampI64ToI16(*${clamp_i64_to_i16}, MRI, MF, ${matchinfo}); }]),
54  (apply [{ applyClampI64ToI16(*${clamp_i64_to_i16}, ${matchinfo}); }])>;
55
56def med3_matchdata : GIDefMatchData<"Med3MatchInfo">;
57
58def int_minmax_to_med3 : GICombineRule<
59  (defs root:$min_or_max, med3_matchdata:$matchinfo),
60  (match (wip_match_opcode G_SMAX,
61                           G_SMIN,
62                           G_UMAX,
63                           G_UMIN):$min_or_max,
64         [{ return matchIntMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
65  (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
66
67def fp_minmax_to_med3 : GICombineRule<
68  (defs root:$min_or_max, med3_matchdata:$matchinfo),
69  (match (wip_match_opcode G_FMAXNUM,
70                           G_FMINNUM,
71                           G_FMAXNUM_IEEE,
72                           G_FMINNUM_IEEE):$min_or_max,
73         [{ return matchFPMinMaxToMed3(*${min_or_max}, ${matchinfo}); }]),
74  (apply [{ applyMed3(*${min_or_max}, ${matchinfo}); }])>;
75
76def fp_minmax_to_clamp : GICombineRule<
77  (defs root:$min_or_max, register_matchinfo:$matchinfo),
78  (match (wip_match_opcode G_FMAXNUM,
79                           G_FMINNUM,
80                           G_FMAXNUM_IEEE,
81                           G_FMINNUM_IEEE):$min_or_max,
82         [{ return matchFPMinMaxToClamp(*${min_or_max}, ${matchinfo}); }]),
83  (apply [{ applyClamp(*${min_or_max}, ${matchinfo}); }])>;
84
85def fmed3_intrinsic_to_clamp : GICombineRule<
86  (defs root:$fmed3, register_matchinfo:$matchinfo),
87  (match (wip_match_opcode G_AMDGPU_FMED3):$fmed3,
88         [{ return matchFPMed3ToClamp(*${fmed3}, ${matchinfo}); }]),
89  (apply [{ applyClamp(*${fmed3}, ${matchinfo}); }])>;
90
91def remove_fcanonicalize_matchinfo : GIDefMatchData<"Register">;
92
93def remove_fcanonicalize : GICombineRule<
94  (defs root:$fcanonicalize, remove_fcanonicalize_matchinfo:$matchinfo),
95  (match (wip_match_opcode G_FCANONICALIZE):$fcanonicalize,
96         [{ return matchRemoveFcanonicalize(*${fcanonicalize}, ${matchinfo}); }]),
97  (apply [{ Helper.replaceSingleDefInstWithReg(*${fcanonicalize}, ${matchinfo}); }])>;
98
99def foldable_fneg_matchdata : GIDefMatchData<"MachineInstr *">;
100
101def foldable_fneg : GICombineRule<
102  (defs root:$ffn, foldable_fneg_matchdata:$matchinfo),
103  (match (wip_match_opcode G_FNEG):$ffn,
104         [{ return Helper.matchFoldableFneg(*${ffn}, ${matchinfo}); }]),
105  (apply [{ Helper.applyFoldableFneg(*${ffn}, ${matchinfo}); }])>;
106
107// Detects s_mul_u64 instructions whose higher bits are zero/sign extended.
108def smulu64 : GICombineRule<
109  (defs root:$smul, unsigned_matchinfo:$matchinfo),
110  (match (wip_match_opcode G_MUL):$smul,
111         [{ return matchCombine_s_mul_u64(*${smul}, ${matchinfo}); }]),
112  (apply [{ applyCombine_s_mul_u64(*${smul}, ${matchinfo}); }])>;
113
114def sign_exension_in_reg_matchdata : GIDefMatchData<"MachineInstr *">;
115
116def sign_extension_in_reg : GICombineRule<
117  (defs root:$sign_inreg, sign_exension_in_reg_matchdata:$matchinfo),
118  (match (wip_match_opcode G_SEXT_INREG):$sign_inreg,
119         [{ return matchCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }]),
120  (apply [{ applyCombineSignExtendInReg(*${sign_inreg}, ${matchinfo}); }])>;
121
122
123let Predicates = [Has16BitInsts, NotHasMed3_16] in {
124// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
125// saves one instruction compared to the promotion.
126//
127// FIXME: Should have ComplexPattern like in/out matchers
128//
129// FIXME: We should be able to match either G_AMDGPU_FMED3 or
130// G_INTRINSIC @llvm.amdgcn.fmed3. Currently the legalizer will
131// replace the intrinsic with G_AMDGPU_FMED3 since we can't write a
132// pattern to match it.
133def expand_promoted_fmed3 : GICombineRule<
134  (defs root:$fptrunc_dst),
135  (match (G_FPTRUNC $fptrunc_dst, $fmed3_dst):$fptrunc,
136         (G_AMDGPU_FMED3 $fmed3_dst, $src0, $src1, $src2),
137    [{ return Helper.matchExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }]),
138  (apply [{ Helper.applyExpandPromotedF16FMed3(*${fptrunc}, ${src0}.getReg(), ${src1}.getReg(), ${src2}.getReg()); }])
139>;
140
141} // End Predicates = [NotHasMed3_16]
142
143// Combines which should only apply on SI/CI
144def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>;
145
146// Combines which should only apply on VI
147def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
148
149def AMDGPUPreLegalizerCombiner: GICombiner<
150  "AMDGPUPreLegalizerCombinerImpl",
151  [all_combines, clamp_i64_to_i16, foldable_fneg]> {
152  let CombineAllMethodName = "tryCombineAllImpl";
153}
154
155def AMDGPUPostLegalizerCombiner: GICombiner<
156  "AMDGPUPostLegalizerCombinerImpl",
157  [all_combines, gfx6gfx7_combines, gfx8_combines,
158   uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
159   rcp_sqrt_to_rsq, sign_extension_in_reg, smulu64]> {
160  let CombineAllMethodName = "tryCombineAllImpl";
161}
162
163def AMDGPURegBankCombiner : GICombiner<
164  "AMDGPURegBankCombinerImpl",
165  [unmerge_merge, unmerge_cst, unmerge_undef,
166   zext_trunc_fold, int_minmax_to_med3, ptr_add_immed_chain,
167   fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp]> {
168}
169