xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (revision 46c59ea9b61755455ff6bf9f3e7b834e1af634ea)
1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUMachineFunction.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/KnownBits.h"
28 #include "llvm/Target/TargetMachine.h"
29 
30 using namespace llvm;
31 
32 #include "AMDGPUGenCallingConv.inc"
33 
34 static cl::opt<bool> AMDGPUBypassSlowDiv(
35   "amdgpu-bypass-slow-div",
36   cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
37   cl::init(true));
38 
39 // Find a larger type to do a load / store of a vector with.
40 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
41   unsigned StoreSize = VT.getStoreSizeInBits();
42   if (StoreSize <= 32)
43     return EVT::getIntegerVT(Ctx, StoreSize);
44 
45   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
46   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
47 }
48 
49 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
50   return DAG.computeKnownBits(Op).countMaxActiveBits();
51 }
52 
53 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
54   // In order for this to be a signed 24-bit value, bit 23, must
55   // be a sign bit.
56   return DAG.ComputeMaxSignificantBits(Op);
57 }
58 
59 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
60                                            const AMDGPUSubtarget &STI)
61     : TargetLowering(TM), Subtarget(&STI) {
62   // Lower floating point store/load to integer store/load to reduce the number
63   // of patterns in tablegen.
64   setOperationAction(ISD::LOAD, MVT::f32, Promote);
65   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
66 
67   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
68   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
69 
70   setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
71   AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
72 
73   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
74   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
75 
76   setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
77   AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
78 
79   setOperationAction(ISD::LOAD, MVT::v6f32, Promote);
80   AddPromotedToType(ISD::LOAD, MVT::v6f32, MVT::v6i32);
81 
82   setOperationAction(ISD::LOAD, MVT::v7f32, Promote);
83   AddPromotedToType(ISD::LOAD, MVT::v7f32, MVT::v7i32);
84 
85   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
86   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
87 
88   setOperationAction(ISD::LOAD, MVT::v9f32, Promote);
89   AddPromotedToType(ISD::LOAD, MVT::v9f32, MVT::v9i32);
90 
91   setOperationAction(ISD::LOAD, MVT::v10f32, Promote);
92   AddPromotedToType(ISD::LOAD, MVT::v10f32, MVT::v10i32);
93 
94   setOperationAction(ISD::LOAD, MVT::v11f32, Promote);
95   AddPromotedToType(ISD::LOAD, MVT::v11f32, MVT::v11i32);
96 
97   setOperationAction(ISD::LOAD, MVT::v12f32, Promote);
98   AddPromotedToType(ISD::LOAD, MVT::v12f32, MVT::v12i32);
99 
100   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
101   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
102 
103   setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
104   AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
105 
106   setOperationAction(ISD::LOAD, MVT::i64, Promote);
107   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
108 
109   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
110   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
111 
112   setOperationAction(ISD::LOAD, MVT::f64, Promote);
113   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
114 
115   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
116   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
117 
118   setOperationAction(ISD::LOAD, MVT::v3i64, Promote);
119   AddPromotedToType(ISD::LOAD, MVT::v3i64, MVT::v6i32);
120 
121   setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
122   AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
123 
124   setOperationAction(ISD::LOAD, MVT::v3f64, Promote);
125   AddPromotedToType(ISD::LOAD, MVT::v3f64, MVT::v6i32);
126 
127   setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
128   AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
129 
130   setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
131   AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
132 
133   setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
134   AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
135 
136   setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
137   AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
138 
139   setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
140   AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
141 
142   setOperationAction(ISD::LOAD, MVT::i128, Promote);
143   AddPromotedToType(ISD::LOAD, MVT::i128, MVT::v4i32);
144 
145   // There are no 64-bit extloads. These should be done as a 32-bit extload and
146   // an extension to 64-bit.
147   for (MVT VT : MVT::integer_valuetypes())
148     setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT,
149                      Expand);
150 
151   for (MVT VT : MVT::integer_valuetypes()) {
152     if (VT == MVT::i64)
153       continue;
154 
155     for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
156       setLoadExtAction(Op, VT, MVT::i1, Promote);
157       setLoadExtAction(Op, VT, MVT::i8, Legal);
158       setLoadExtAction(Op, VT, MVT::i16, Legal);
159       setLoadExtAction(Op, VT, MVT::i32, Expand);
160     }
161   }
162 
163   for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
164     for (auto MemVT :
165          {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
166       setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT,
167                        Expand);
168 
169   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
170   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
171   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
172   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2bf16, Expand);
173   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
174   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3bf16, Expand);
175   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
176   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4bf16, Expand);
177   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
178   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8bf16, Expand);
179   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
180   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16bf16, Expand);
181   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
182   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32bf16, Expand);
183 
184   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
185   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
186   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f32, Expand);
187   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
188   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
189   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
190 
191   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
192   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
193   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
194   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2bf16, Expand);
195   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f16, Expand);
196   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3bf16, Expand);
197   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
198   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4bf16, Expand);
199   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
200   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8bf16, Expand);
201   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
202   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16bf16, Expand);
203 
204   setOperationAction(ISD::STORE, MVT::f32, Promote);
205   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
206 
207   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
208   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
209 
210   setOperationAction(ISD::STORE, MVT::v3f32, Promote);
211   AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
212 
213   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
214   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
215 
216   setOperationAction(ISD::STORE, MVT::v5f32, Promote);
217   AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
218 
219   setOperationAction(ISD::STORE, MVT::v6f32, Promote);
220   AddPromotedToType(ISD::STORE, MVT::v6f32, MVT::v6i32);
221 
222   setOperationAction(ISD::STORE, MVT::v7f32, Promote);
223   AddPromotedToType(ISD::STORE, MVT::v7f32, MVT::v7i32);
224 
225   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
226   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
227 
228   setOperationAction(ISD::STORE, MVT::v9f32, Promote);
229   AddPromotedToType(ISD::STORE, MVT::v9f32, MVT::v9i32);
230 
231   setOperationAction(ISD::STORE, MVT::v10f32, Promote);
232   AddPromotedToType(ISD::STORE, MVT::v10f32, MVT::v10i32);
233 
234   setOperationAction(ISD::STORE, MVT::v11f32, Promote);
235   AddPromotedToType(ISD::STORE, MVT::v11f32, MVT::v11i32);
236 
237   setOperationAction(ISD::STORE, MVT::v12f32, Promote);
238   AddPromotedToType(ISD::STORE, MVT::v12f32, MVT::v12i32);
239 
240   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
241   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
242 
243   setOperationAction(ISD::STORE, MVT::v32f32, Promote);
244   AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
245 
246   setOperationAction(ISD::STORE, MVT::i64, Promote);
247   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
248 
249   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
250   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
251 
252   setOperationAction(ISD::STORE, MVT::f64, Promote);
253   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
254 
255   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
256   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
257 
258   setOperationAction(ISD::STORE, MVT::v3i64, Promote);
259   AddPromotedToType(ISD::STORE, MVT::v3i64, MVT::v6i32);
260 
261   setOperationAction(ISD::STORE, MVT::v3f64, Promote);
262   AddPromotedToType(ISD::STORE, MVT::v3f64, MVT::v6i32);
263 
264   setOperationAction(ISD::STORE, MVT::v4i64, Promote);
265   AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
266 
267   setOperationAction(ISD::STORE, MVT::v4f64, Promote);
268   AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
269 
270   setOperationAction(ISD::STORE, MVT::v8i64, Promote);
271   AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
272 
273   setOperationAction(ISD::STORE, MVT::v8f64, Promote);
274   AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
275 
276   setOperationAction(ISD::STORE, MVT::v16i64, Promote);
277   AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
278 
279   setOperationAction(ISD::STORE, MVT::v16f64, Promote);
280   AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
281 
282   setOperationAction(ISD::STORE, MVT::i128, Promote);
283   AddPromotedToType(ISD::STORE, MVT::i128, MVT::v4i32);
284 
285   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
286   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
287   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
288   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
289 
290   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
291   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
292   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
293   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
294 
295   setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
296   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
297   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
298   setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
299   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
300   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
301   setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
302   setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
303 
304   setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
305   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
306   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
307 
308   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
309   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
310 
311   setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
312   setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
313   setTruncStoreAction(MVT::v3f64, MVT::v3f32, Expand);
314   setTruncStoreAction(MVT::v3f64, MVT::v3f16, Expand);
315 
316   setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
317   setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
318   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
319   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
320 
321   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
322   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
323 
324   setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
325   setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
326   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
327   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
328   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
329   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
330   setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
331 
332   setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal);
333   setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal);
334 
335   setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand);
336 
337   // For R600, this is totally unsupported, just custom lower to produce an
338   // error.
339   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
340 
341   // Library functions.  These default to Expand, but we have instructions
342   // for them.
343   setOperationAction({ISD::FCEIL, ISD::FPOW, ISD::FABS, ISD::FFLOOR,
344                       ISD::FROUNDEVEN, ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM},
345                      MVT::f32, Legal);
346 
347   setOperationAction(ISD::FLOG2, MVT::f32, Custom);
348   setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
349 
350   setOperationAction(
351       {ISD::FLOG, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FEXP10}, MVT::f32,
352       Custom);
353 
354   setOperationAction(ISD::FNEARBYINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
355 
356   setOperationAction(ISD::FRINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
357 
358   setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
359 
360   if (Subtarget->has16BitInsts())
361     setOperationAction(ISD::IS_FPCLASS, {MVT::f16, MVT::f32, MVT::f64}, Legal);
362   else {
363     setOperationAction(ISD::IS_FPCLASS, {MVT::f32, MVT::f64}, Legal);
364     setOperationAction({ISD::FLOG2, ISD::FEXP2}, MVT::f16, Custom);
365   }
366 
367   setOperationAction({ISD::FLOG10, ISD::FLOG, ISD::FEXP, ISD::FEXP10}, MVT::f16,
368                      Custom);
369 
370   // FIXME: These IS_FPCLASS vector fp types are marked custom so it reaches
371   // scalarization code. Can be removed when IS_FPCLASS expand isn't called by
372   // default unless marked custom/legal.
373   setOperationAction(
374       ISD::IS_FPCLASS,
375       {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16, MVT::v2f32, MVT::v3f32,
376        MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
377        MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64, MVT::v16f64},
378       Custom);
379 
380   // Expand to fneg + fadd.
381   setOperationAction(ISD::FSUB, MVT::f64, Expand);
382 
383   setOperationAction(ISD::CONCAT_VECTORS,
384                      {MVT::v3i32,  MVT::v3f32,  MVT::v4i32,  MVT::v4f32,
385                       MVT::v5i32,  MVT::v5f32,  MVT::v6i32,  MVT::v6f32,
386                       MVT::v7i32,  MVT::v7f32,  MVT::v8i32,  MVT::v8f32,
387                       MVT::v9i32,  MVT::v9f32,  MVT::v10i32, MVT::v10f32,
388                       MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
389                      Custom);
390 
391   // FIXME: Why is v8f16/v8bf16 missing?
392   setOperationAction(
393       ISD::EXTRACT_SUBVECTOR,
394       {MVT::v2f16,  MVT::v2bf16, MVT::v2i16,  MVT::v4f16,  MVT::v4bf16,
395        MVT::v4i16,  MVT::v2f32,  MVT::v2i32,  MVT::v3f32,  MVT::v3i32,
396        MVT::v4f32,  MVT::v4i32,  MVT::v5f32,  MVT::v5i32,  MVT::v6f32,
397        MVT::v6i32,  MVT::v7f32,  MVT::v7i32,  MVT::v8f32,  MVT::v8i32,
398        MVT::v9f32,  MVT::v9i32,  MVT::v10i32, MVT::v10f32, MVT::v11i32,
399        MVT::v11f32, MVT::v12i32, MVT::v12f32, MVT::v16f16, MVT::v16bf16,
400        MVT::v16i16, MVT::v16f32, MVT::v16i32, MVT::v32f32, MVT::v32i32,
401        MVT::v2f64,  MVT::v2i64,  MVT::v3f64,  MVT::v3i64,  MVT::v4f64,
402        MVT::v4i64,  MVT::v8f64,  MVT::v8i64,  MVT::v16f64, MVT::v16i64,
403        MVT::v32i16, MVT::v32f16, MVT::v32bf16},
404       Custom);
405 
406   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
407   setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom);
408 
409   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
410   for (MVT VT : ScalarIntVTs) {
411     // These should use [SU]DIVREM, so set them to expand
412     setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT,
413                        Expand);
414 
415     // GPU does not have divrem function for signed or unsigned.
416     setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom);
417 
418     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
419     setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
420 
421     setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand);
422 
423     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
424     setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal);
425   }
426 
427   // The hardware supports 32-bit FSHR, but not FSHL.
428   setOperationAction(ISD::FSHR, MVT::i32, Legal);
429 
430   // The hardware supports 32-bit ROTR, but not ROTL.
431   setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand);
432   setOperationAction(ISD::ROTR, MVT::i64, Expand);
433 
434   setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand);
435 
436   setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand);
437   setOperationAction(
438       {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
439       MVT::i64, Custom);
440   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
441 
442   setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
443                      Legal);
444 
445   setOperationAction(
446       {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
447       MVT::i64, Custom);
448 
449   static const MVT::SimpleValueType VectorIntTypes[] = {
450       MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
451       MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
452 
453   for (MVT VT : VectorIntTypes) {
454     // Expand the following operations for the current type by default.
455     setOperationAction({ISD::ADD,        ISD::AND,     ISD::FP_TO_SINT,
456                         ISD::FP_TO_UINT, ISD::MUL,     ISD::MULHU,
457                         ISD::MULHS,      ISD::OR,      ISD::SHL,
458                         ISD::SRA,        ISD::SRL,     ISD::ROTL,
459                         ISD::ROTR,       ISD::SUB,     ISD::SINT_TO_FP,
460                         ISD::UINT_TO_FP, ISD::SDIV,    ISD::UDIV,
461                         ISD::SREM,       ISD::UREM,    ISD::SMUL_LOHI,
462                         ISD::UMUL_LOHI,  ISD::SDIVREM, ISD::UDIVREM,
463                         ISD::SELECT,     ISD::VSELECT, ISD::SELECT_CC,
464                         ISD::XOR,        ISD::BSWAP,   ISD::CTPOP,
465                         ISD::CTTZ,       ISD::CTLZ,    ISD::VECTOR_SHUFFLE,
466                         ISD::SETCC},
467                        VT, Expand);
468   }
469 
470   static const MVT::SimpleValueType FloatVectorTypes[] = {
471       MVT::v2f32, MVT::v3f32,  MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
472       MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
473 
474   for (MVT VT : FloatVectorTypes) {
475     setOperationAction(
476         {ISD::FABS,          ISD::FMINNUM,        ISD::FMAXNUM,
477          ISD::FADD,          ISD::FCEIL,          ISD::FCOS,
478          ISD::FDIV,          ISD::FEXP2,          ISD::FEXP,
479          ISD::FEXP10,        ISD::FLOG2,          ISD::FREM,
480          ISD::FLOG,          ISD::FLOG10,         ISD::FPOW,
481          ISD::FFLOOR,        ISD::FTRUNC,         ISD::FMUL,
482          ISD::FMA,           ISD::FRINT,          ISD::FNEARBYINT,
483          ISD::FSQRT,         ISD::FSIN,           ISD::FSUB,
484          ISD::FNEG,          ISD::VSELECT,        ISD::SELECT_CC,
485          ISD::FCOPYSIGN,     ISD::VECTOR_SHUFFLE, ISD::SETCC,
486          ISD::FCANONICALIZE, ISD::FROUNDEVEN},
487         VT, Expand);
488   }
489 
490   // This causes using an unrolled select operation rather than expansion with
491   // bit operations. This is in general better, but the alternative using BFI
492   // instructions may be better if the select sources are SGPRs.
493   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
494   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
495 
496   setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
497   AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
498 
499   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
500   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
501 
502   setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
503   AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
504 
505   setOperationAction(ISD::SELECT, MVT::v6f32, Promote);
506   AddPromotedToType(ISD::SELECT, MVT::v6f32, MVT::v6i32);
507 
508   setOperationAction(ISD::SELECT, MVT::v7f32, Promote);
509   AddPromotedToType(ISD::SELECT, MVT::v7f32, MVT::v7i32);
510 
511   setOperationAction(ISD::SELECT, MVT::v9f32, Promote);
512   AddPromotedToType(ISD::SELECT, MVT::v9f32, MVT::v9i32);
513 
514   setOperationAction(ISD::SELECT, MVT::v10f32, Promote);
515   AddPromotedToType(ISD::SELECT, MVT::v10f32, MVT::v10i32);
516 
517   setOperationAction(ISD::SELECT, MVT::v11f32, Promote);
518   AddPromotedToType(ISD::SELECT, MVT::v11f32, MVT::v11i32);
519 
520   setOperationAction(ISD::SELECT, MVT::v12f32, Promote);
521   AddPromotedToType(ISD::SELECT, MVT::v12f32, MVT::v12i32);
522 
523   // Disable most libcalls.
524   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) {
525     if (I < RTLIB::ATOMIC_LOAD || I > RTLIB::ATOMIC_FETCH_NAND_16)
526       setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
527   }
528 
529   setSchedulingPreference(Sched::RegPressure);
530   setJumpIsExpensive(true);
531 
532   // FIXME: This is only partially true. If we have to do vector compares, any
533   // SGPR pair can be a condition register. If we have a uniform condition, we
534   // are better off doing SALU operations, where there is only one SCC. For now,
535   // we don't have a way of knowing during instruction selection if a condition
536   // will be uniform and we always use vector compares. Assume we are using
537   // vector compares until that is fixed.
538   setHasMultipleConditionRegisters(true);
539 
540   setMinCmpXchgSizeInBits(32);
541   setSupportsUnalignedAtomics(false);
542 
543   PredictableSelectIsExpensive = false;
544 
545   // We want to find all load dependencies for long chains of stores to enable
546   // merging into very wide vectors. The problem is with vectors with > 4
547   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
548   // vectors are a legal type, even though we have to split the loads
549   // usually. When we can more precisely specify load legality per address
550   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
551   // smarter so that they can figure out what to do in 2 iterations without all
552   // N > 4 stores on the same chain.
553   GatherAllAliasesMaxDepth = 16;
554 
555   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
556   // about these during lowering.
557   MaxStoresPerMemcpy  = 0xffffffff;
558   MaxStoresPerMemmove = 0xffffffff;
559   MaxStoresPerMemset  = 0xffffffff;
560 
561   // The expansion for 64-bit division is enormous.
562   if (AMDGPUBypassSlowDiv)
563     addBypassSlowDiv(64, 32);
564 
565   setTargetDAGCombine({ISD::BITCAST,    ISD::SHL,
566                        ISD::SRA,        ISD::SRL,
567                        ISD::TRUNCATE,   ISD::MUL,
568                        ISD::SMUL_LOHI,  ISD::UMUL_LOHI,
569                        ISD::MULHU,      ISD::MULHS,
570                        ISD::SELECT,     ISD::SELECT_CC,
571                        ISD::STORE,      ISD::FADD,
572                        ISD::FSUB,       ISD::FNEG,
573                        ISD::FABS,       ISD::AssertZext,
574                        ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
575 
576   setMaxAtomicSizeInBitsSupported(64);
577 }
578 
579 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {
580   if (getTargetMachine().Options.NoSignedZerosFPMath)
581     return true;
582 
583   const auto Flags = Op.getNode()->getFlags();
584   if (Flags.hasNoSignedZeros())
585     return true;
586 
587   return false;
588 }
589 
590 //===----------------------------------------------------------------------===//
591 // Target Information
592 //===----------------------------------------------------------------------===//
593 
594 LLVM_READNONE
595 static bool fnegFoldsIntoOpcode(unsigned Opc) {
596   switch (Opc) {
597   case ISD::FADD:
598   case ISD::FSUB:
599   case ISD::FMUL:
600   case ISD::FMA:
601   case ISD::FMAD:
602   case ISD::FMINNUM:
603   case ISD::FMAXNUM:
604   case ISD::FMINNUM_IEEE:
605   case ISD::FMAXNUM_IEEE:
606   case ISD::FMINIMUM:
607   case ISD::FMAXIMUM:
608   case ISD::SELECT:
609   case ISD::FSIN:
610   case ISD::FTRUNC:
611   case ISD::FRINT:
612   case ISD::FNEARBYINT:
613   case ISD::FROUNDEVEN:
614   case ISD::FCANONICALIZE:
615   case AMDGPUISD::RCP:
616   case AMDGPUISD::RCP_LEGACY:
617   case AMDGPUISD::RCP_IFLAG:
618   case AMDGPUISD::SIN_HW:
619   case AMDGPUISD::FMUL_LEGACY:
620   case AMDGPUISD::FMIN_LEGACY:
621   case AMDGPUISD::FMAX_LEGACY:
622   case AMDGPUISD::FMED3:
623     // TODO: handle llvm.amdgcn.fma.legacy
624     return true;
625   case ISD::BITCAST:
626     llvm_unreachable("bitcast is special cased");
627   default:
628     return false;
629   }
630 }
631 
632 static bool fnegFoldsIntoOp(const SDNode *N) {
633   unsigned Opc = N->getOpcode();
634   if (Opc == ISD::BITCAST) {
635     // TODO: Is there a benefit to checking the conditions performFNegCombine
636     // does? We don't for the other cases.
637     SDValue BCSrc = N->getOperand(0);
638     if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
639       return BCSrc.getNumOperands() == 2 &&
640              BCSrc.getOperand(1).getValueSizeInBits() == 32;
641     }
642 
643     return BCSrc.getOpcode() == ISD::SELECT && BCSrc.getValueType() == MVT::f32;
644   }
645 
646   return fnegFoldsIntoOpcode(Opc);
647 }
648 
649 /// \p returns true if the operation will definitely need to use a 64-bit
650 /// encoding, and thus will use a VOP3 encoding regardless of the source
651 /// modifiers.
652 LLVM_READONLY
653 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
654   return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) ||
655          VT == MVT::f64;
656 }
657 
658 /// Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the
659 /// type for ISD::SELECT.
660 LLVM_READONLY
661 static bool selectSupportsSourceMods(const SDNode *N) {
662   // TODO: Only applies if select will be vector
663   return N->getValueType(0) == MVT::f32;
664 }
665 
666 // Most FP instructions support source modifiers, but this could be refined
667 // slightly.
668 LLVM_READONLY
669 static bool hasSourceMods(const SDNode *N) {
670   if (isa<MemSDNode>(N))
671     return false;
672 
673   switch (N->getOpcode()) {
674   case ISD::CopyToReg:
675   case ISD::FDIV:
676   case ISD::FREM:
677   case ISD::INLINEASM:
678   case ISD::INLINEASM_BR:
679   case AMDGPUISD::DIV_SCALE:
680   case ISD::INTRINSIC_W_CHAIN:
681 
682   // TODO: Should really be looking at the users of the bitcast. These are
683   // problematic because bitcasts are used to legalize all stores to integer
684   // types.
685   case ISD::BITCAST:
686     return false;
687   case ISD::INTRINSIC_WO_CHAIN: {
688     switch (N->getConstantOperandVal(0)) {
689     case Intrinsic::amdgcn_interp_p1:
690     case Intrinsic::amdgcn_interp_p2:
691     case Intrinsic::amdgcn_interp_mov:
692     case Intrinsic::amdgcn_interp_p1_f16:
693     case Intrinsic::amdgcn_interp_p2_f16:
694       return false;
695     default:
696       return true;
697     }
698   }
699   case ISD::SELECT:
700     return selectSupportsSourceMods(N);
701   default:
702     return true;
703   }
704 }
705 
706 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
707                                                  unsigned CostThreshold) {
708   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
709   // it is truly free to use a source modifier in all cases. If there are
710   // multiple users but for each one will necessitate using VOP3, there will be
711   // a code size increase. Try to avoid increasing code size unless we know it
712   // will save on the instruction count.
713   unsigned NumMayIncreaseSize = 0;
714   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
715 
716   assert(!N->use_empty());
717 
718   // XXX - Should this limit number of uses to check?
719   for (const SDNode *U : N->uses()) {
720     if (!hasSourceMods(U))
721       return false;
722 
723     if (!opMustUseVOP3Encoding(U, VT)) {
724       if (++NumMayIncreaseSize > CostThreshold)
725         return false;
726     }
727   }
728 
729   return true;
730 }
731 
732 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
733                                               ISD::NodeType ExtendKind) const {
734   assert(!VT.isVector() && "only scalar expected");
735 
736   // Round to the next multiple of 32-bits.
737   unsigned Size = VT.getSizeInBits();
738   if (Size <= 32)
739     return MVT::i32;
740   return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
741 }
742 
743 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
744   return MVT::i32;
745 }
746 
747 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
748   return true;
749 }
750 
751 // The backend supports 32 and 64 bit floating point immediates.
752 // FIXME: Why are we reporting vectors of FP immediates as legal?
753 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
754                                         bool ForCodeSize) const {
755   EVT ScalarVT = VT.getScalarType();
756   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
757          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
758 }
759 
760 // We don't want to shrink f64 / f32 constants.
761 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
762   EVT ScalarVT = VT.getScalarType();
763   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
764 }
765 
766 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
767                                                  ISD::LoadExtType ExtTy,
768                                                  EVT NewVT) const {
769   // TODO: This may be worth removing. Check regression tests for diffs.
770   if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
771     return false;
772 
773   unsigned NewSize = NewVT.getStoreSizeInBits();
774 
775   // If we are reducing to a 32-bit load or a smaller multi-dword load,
776   // this is always better.
777   if (NewSize >= 32)
778     return true;
779 
780   EVT OldVT = N->getValueType(0);
781   unsigned OldSize = OldVT.getStoreSizeInBits();
782 
783   MemSDNode *MN = cast<MemSDNode>(N);
784   unsigned AS = MN->getAddressSpace();
785   // Do not shrink an aligned scalar load to sub-dword.
786   // Scalar engine cannot do sub-dword loads.
787   if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
788       (AS == AMDGPUAS::CONSTANT_ADDRESS ||
789        AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
790        (isa<LoadSDNode>(N) && AS == AMDGPUAS::GLOBAL_ADDRESS &&
791         MN->isInvariant())) &&
792       AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
793     return false;
794 
795   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
796   // extloads, so doing one requires using a buffer_load. In cases where we
797   // still couldn't use a scalar load, using the wider load shouldn't really
798   // hurt anything.
799 
800   // If the old size already had to be an extload, there's no harm in continuing
801   // to reduce the width.
802   return (OldSize < 32);
803 }
804 
805 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
806                                                    const SelectionDAG &DAG,
807                                                    const MachineMemOperand &MMO) const {
808 
809   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
810 
811   if (LoadTy.getScalarType() == MVT::i32)
812     return false;
813 
814   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
815   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
816 
817   if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
818     return false;
819 
820   unsigned Fast = 0;
821   return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
822                                         CastTy, MMO, &Fast) &&
823          Fast;
824 }
825 
826 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
827 // profitable with the expansion for 64-bit since it's generally good to
828 // speculate things.
829 bool AMDGPUTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
830   return true;
831 }
832 
833 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
834   return true;
835 }
836 
837 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
838   switch (N->getOpcode()) {
839   case ISD::EntryToken:
840   case ISD::TokenFactor:
841     return true;
842   case ISD::INTRINSIC_WO_CHAIN: {
843     unsigned IntrID = N->getConstantOperandVal(0);
844     switch (IntrID) {
845     case Intrinsic::amdgcn_readfirstlane:
846     case Intrinsic::amdgcn_readlane:
847       return true;
848     }
849     return false;
850   }
851   case ISD::LOAD:
852     if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
853         AMDGPUAS::CONSTANT_ADDRESS_32BIT)
854       return true;
855     return false;
856   case AMDGPUISD::SETCC: // ballot-style instruction
857     return true;
858   }
859   return false;
860 }
861 
862 SDValue AMDGPUTargetLowering::getNegatedExpression(
863     SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
864     NegatibleCost &Cost, unsigned Depth) const {
865 
866   switch (Op.getOpcode()) {
867   case ISD::FMA:
868   case ISD::FMAD: {
869     // Negating a fma is not free if it has users without source mods.
870     if (!allUsesHaveSourceMods(Op.getNode()))
871       return SDValue();
872     break;
873   }
874   case AMDGPUISD::RCP: {
875     SDValue Src = Op.getOperand(0);
876     EVT VT = Op.getValueType();
877     SDLoc SL(Op);
878 
879     SDValue NegSrc = getNegatedExpression(Src, DAG, LegalOperations,
880                                           ForCodeSize, Cost, Depth + 1);
881     if (NegSrc)
882       return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags());
883     return SDValue();
884   }
885   default:
886     break;
887   }
888 
889   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
890                                               ForCodeSize, Cost, Depth);
891 }
892 
893 //===---------------------------------------------------------------------===//
894 // Target Properties
895 //===---------------------------------------------------------------------===//
896 
897 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
898   assert(VT.isFloatingPoint());
899 
900   // Packed operations do not have a fabs modifier.
901   return VT == MVT::f32 || VT == MVT::f64 ||
902          (Subtarget->has16BitInsts() && VT == MVT::f16);
903 }
904 
905 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
906   assert(VT.isFloatingPoint());
907   // Report this based on the end legalized type.
908   VT = VT.getScalarType();
909   return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16;
910 }
911 
912 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
913                                                          unsigned NumElem,
914                                                          unsigned AS) const {
915   return true;
916 }
917 
918 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
919   // There are few operations which truly have vector input operands. Any vector
920   // operation is going to involve operations on each component, and a
921   // build_vector will be a copy per element, so it always makes sense to use a
922   // build_vector input in place of the extracted element to avoid a copy into a
923   // super register.
924   //
925   // We should probably only do this if all users are extracts only, but this
926   // should be the common case.
927   return true;
928 }
929 
930 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
931   // Truncate is just accessing a subregister.
932 
933   unsigned SrcSize = Source.getSizeInBits();
934   unsigned DestSize = Dest.getSizeInBits();
935 
936   return DestSize < SrcSize && DestSize % 32 == 0 ;
937 }
938 
939 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
940   // Truncate is just accessing a subregister.
941 
942   unsigned SrcSize = Source->getScalarSizeInBits();
943   unsigned DestSize = Dest->getScalarSizeInBits();
944 
945   if (DestSize== 16 && Subtarget->has16BitInsts())
946     return SrcSize >= 32;
947 
948   return DestSize < SrcSize && DestSize % 32 == 0;
949 }
950 
951 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
952   unsigned SrcSize = Src->getScalarSizeInBits();
953   unsigned DestSize = Dest->getScalarSizeInBits();
954 
955   if (SrcSize == 16 && Subtarget->has16BitInsts())
956     return DestSize >= 32;
957 
958   return SrcSize == 32 && DestSize == 64;
959 }
960 
961 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
962   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
963   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
964   // this will enable reducing 64-bit operations the 32-bit, which is always
965   // good.
966 
967   if (Src == MVT::i16)
968     return Dest == MVT::i32 ||Dest == MVT::i64 ;
969 
970   return Src == MVT::i32 && Dest == MVT::i64;
971 }
972 
973 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
974   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
975   // limited number of native 64-bit operations. Shrinking an operation to fit
976   // in a single 32-bit register should always be helpful. As currently used,
977   // this is much less general than the name suggests, and is only used in
978   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
979   // not profitable, and may actually be harmful.
980   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
981 }
982 
983 bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
984     const SDNode* N, CombineLevel Level) const {
985   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
986           N->getOpcode() == ISD::SRL) &&
987          "Expected shift op");
988   // Always commute pre-type legalization and right shifts.
989   // We're looking for shl(or(x,y),z) patterns.
990   if (Level < CombineLevel::AfterLegalizeTypes ||
991       N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR)
992     return true;
993 
994   // If only user is a i32 right-shift, then don't destroy a BFE pattern.
995   if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 &&
996       (N->use_begin()->getOpcode() == ISD::SRA ||
997        N->use_begin()->getOpcode() == ISD::SRL))
998     return false;
999 
1000   // Don't destroy or(shl(load_zext(),c), load_zext()) patterns.
1001   auto IsShiftAndLoad = [](SDValue LHS, SDValue RHS) {
1002     if (LHS.getOpcode() != ISD::SHL)
1003       return false;
1004     auto *RHSLd = dyn_cast<LoadSDNode>(RHS);
1005     auto *LHS0 = dyn_cast<LoadSDNode>(LHS.getOperand(0));
1006     auto *LHS1 = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
1007     return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD &&
1008            LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1009            RHSLd->getExtensionType() == ISD::ZEXTLOAD;
1010   };
1011   SDValue LHS = N->getOperand(0).getOperand(0);
1012   SDValue RHS = N->getOperand(0).getOperand(1);
1013   return !(IsShiftAndLoad(LHS, RHS) || IsShiftAndLoad(RHS, LHS));
1014 }
1015 
1016 //===---------------------------------------------------------------------===//
1017 // TargetLowering Callbacks
1018 //===---------------------------------------------------------------------===//
1019 
1020 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
1021                                                   bool IsVarArg) {
1022   switch (CC) {
1023   case CallingConv::AMDGPU_VS:
1024   case CallingConv::AMDGPU_GS:
1025   case CallingConv::AMDGPU_PS:
1026   case CallingConv::AMDGPU_CS:
1027   case CallingConv::AMDGPU_HS:
1028   case CallingConv::AMDGPU_ES:
1029   case CallingConv::AMDGPU_LS:
1030     return CC_AMDGPU;
1031   case CallingConv::AMDGPU_CS_Chain:
1032   case CallingConv::AMDGPU_CS_ChainPreserve:
1033     return CC_AMDGPU_CS_CHAIN;
1034   case CallingConv::C:
1035   case CallingConv::Fast:
1036   case CallingConv::Cold:
1037     return CC_AMDGPU_Func;
1038   case CallingConv::AMDGPU_Gfx:
1039     return CC_SI_Gfx;
1040   case CallingConv::AMDGPU_KERNEL:
1041   case CallingConv::SPIR_KERNEL:
1042   default:
1043     report_fatal_error("Unsupported calling convention for call");
1044   }
1045 }
1046 
1047 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
1048                                                     bool IsVarArg) {
1049   switch (CC) {
1050   case CallingConv::AMDGPU_KERNEL:
1051   case CallingConv::SPIR_KERNEL:
1052     llvm_unreachable("kernels should not be handled here");
1053   case CallingConv::AMDGPU_VS:
1054   case CallingConv::AMDGPU_GS:
1055   case CallingConv::AMDGPU_PS:
1056   case CallingConv::AMDGPU_CS:
1057   case CallingConv::AMDGPU_CS_Chain:
1058   case CallingConv::AMDGPU_CS_ChainPreserve:
1059   case CallingConv::AMDGPU_HS:
1060   case CallingConv::AMDGPU_ES:
1061   case CallingConv::AMDGPU_LS:
1062     return RetCC_SI_Shader;
1063   case CallingConv::AMDGPU_Gfx:
1064     return RetCC_SI_Gfx;
1065   case CallingConv::C:
1066   case CallingConv::Fast:
1067   case CallingConv::Cold:
1068     return RetCC_AMDGPU_Func;
1069   default:
1070     report_fatal_error("Unsupported calling convention.");
1071   }
1072 }
1073 
1074 /// The SelectionDAGBuilder will automatically promote function arguments
1075 /// with illegal types.  However, this does not work for the AMDGPU targets
1076 /// since the function arguments are stored in memory as these illegal types.
1077 /// In order to handle this properly we need to get the original types sizes
1078 /// from the LLVM IR Function and fixup the ISD:InputArg values before
1079 /// passing them to AnalyzeFormalArguments()
1080 
1081 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
1082 /// input values across multiple registers.  Each item in the Ins array
1083 /// represents a single value that will be stored in registers.  Ins[x].VT is
1084 /// the value type of the value that will be stored in the register, so
1085 /// whatever SDNode we lower the argument to needs to be this type.
1086 ///
1087 /// In order to correctly lower the arguments we need to know the size of each
1088 /// argument.  Since Ins[x].VT gives us the size of the register that will
1089 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
1090 /// for the original function argument so that we can deduce the correct memory
1091 /// type to use for Ins[x].  In most cases the correct memory type will be
1092 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
1093 /// we have a kernel argument of type v8i8, this argument will be split into
1094 /// 8 parts and each part will be represented by its own item in the Ins array.
1095 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
1096 /// the argument before it was split.  From this, we deduce that the memory type
1097 /// for each individual part is i8.  We pass the memory type as LocVT to the
1098 /// calling convention analysis function and the register type (Ins[x].VT) as
1099 /// the ValVT.
1100 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
1101   CCState &State,
1102   const SmallVectorImpl<ISD::InputArg> &Ins) const {
1103   const MachineFunction &MF = State.getMachineFunction();
1104   const Function &Fn = MF.getFunction();
1105   LLVMContext &Ctx = Fn.getParent()->getContext();
1106   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
1107   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset();
1108   CallingConv::ID CC = Fn.getCallingConv();
1109 
1110   Align MaxAlign = Align(1);
1111   uint64_t ExplicitArgOffset = 0;
1112   const DataLayout &DL = Fn.getParent()->getDataLayout();
1113 
1114   unsigned InIndex = 0;
1115 
1116   for (const Argument &Arg : Fn.args()) {
1117     const bool IsByRef = Arg.hasByRefAttr();
1118     Type *BaseArgTy = Arg.getType();
1119     Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1120     Align Alignment = DL.getValueOrABITypeAlignment(
1121         IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1122     MaxAlign = std::max(Alignment, MaxAlign);
1123     uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
1124 
1125     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1126     ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1127 
1128     // We're basically throwing away everything passed into us and starting over
1129     // to get accurate in-memory offsets. The "PartOffset" is completely useless
1130     // to us as computed in Ins.
1131     //
1132     // We also need to figure out what type legalization is trying to do to get
1133     // the correct memory offsets.
1134 
1135     SmallVector<EVT, 16> ValueVTs;
1136     SmallVector<uint64_t, 16> Offsets;
1137     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1138 
1139     for (unsigned Value = 0, NumValues = ValueVTs.size();
1140          Value != NumValues; ++Value) {
1141       uint64_t BasePartOffset = Offsets[Value];
1142 
1143       EVT ArgVT = ValueVTs[Value];
1144       EVT MemVT = ArgVT;
1145       MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1146       unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1147 
1148       if (NumRegs == 1) {
1149         // This argument is not split, so the IR type is the memory type.
1150         if (ArgVT.isExtended()) {
1151           // We have an extended type, like i24, so we should just use the
1152           // register type.
1153           MemVT = RegisterVT;
1154         } else {
1155           MemVT = ArgVT;
1156         }
1157       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1158                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1159         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1160         // We have a vector value which has been split into a vector with
1161         // the same scalar type, but fewer elements.  This should handle
1162         // all the floating-point vector types.
1163         MemVT = RegisterVT;
1164       } else if (ArgVT.isVector() &&
1165                  ArgVT.getVectorNumElements() == NumRegs) {
1166         // This arg has been split so that each element is stored in a separate
1167         // register.
1168         MemVT = ArgVT.getScalarType();
1169       } else if (ArgVT.isExtended()) {
1170         // We have an extended type, like i65.
1171         MemVT = RegisterVT;
1172       } else {
1173         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1174         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1175         if (RegisterVT.isInteger()) {
1176           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1177         } else if (RegisterVT.isVector()) {
1178           assert(!RegisterVT.getScalarType().isFloatingPoint());
1179           unsigned NumElements = RegisterVT.getVectorNumElements();
1180           assert(MemoryBits % NumElements == 0);
1181           // This vector type has been split into another vector type with
1182           // a different elements size.
1183           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1184                                            MemoryBits / NumElements);
1185           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1186         } else {
1187           llvm_unreachable("cannot deduce memory type.");
1188         }
1189       }
1190 
1191       // Convert one element vectors to scalar.
1192       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1193         MemVT = MemVT.getScalarType();
1194 
1195       // Round up vec3/vec5 argument.
1196       if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1197         assert(MemVT.getVectorNumElements() == 3 ||
1198                MemVT.getVectorNumElements() == 5 ||
1199                (MemVT.getVectorNumElements() >= 9 &&
1200                 MemVT.getVectorNumElements() <= 12));
1201         MemVT = MemVT.getPow2VectorType(State.getContext());
1202       } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1203         MemVT = MemVT.getRoundIntegerType(State.getContext());
1204       }
1205 
1206       unsigned PartOffset = 0;
1207       for (unsigned i = 0; i != NumRegs; ++i) {
1208         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1209                                                BasePartOffset + PartOffset,
1210                                                MemVT.getSimpleVT(),
1211                                                CCValAssign::Full));
1212         PartOffset += MemVT.getStoreSize();
1213       }
1214     }
1215   }
1216 }
1217 
1218 SDValue AMDGPUTargetLowering::LowerReturn(
1219   SDValue Chain, CallingConv::ID CallConv,
1220   bool isVarArg,
1221   const SmallVectorImpl<ISD::OutputArg> &Outs,
1222   const SmallVectorImpl<SDValue> &OutVals,
1223   const SDLoc &DL, SelectionDAG &DAG) const {
1224   // FIXME: Fails for r600 tests
1225   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1226   // "wave terminate should not have return values");
1227   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1228 }
1229 
1230 //===---------------------------------------------------------------------===//
1231 // Target specific lowering
1232 //===---------------------------------------------------------------------===//
1233 
1234 /// Selects the correct CCAssignFn for a given CallingConvention value.
1235 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1236                                                     bool IsVarArg) {
1237   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1238 }
1239 
1240 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1241                                                       bool IsVarArg) {
1242   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1243 }
1244 
1245 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1246                                                   SelectionDAG &DAG,
1247                                                   MachineFrameInfo &MFI,
1248                                                   int ClobberedFI) const {
1249   SmallVector<SDValue, 8> ArgChains;
1250   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1251   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1252 
1253   // Include the original chain at the beginning of the list. When this is
1254   // used by target LowerCall hooks, this helps legalize find the
1255   // CALLSEQ_BEGIN node.
1256   ArgChains.push_back(Chain);
1257 
1258   // Add a chain value for each stack argument corresponding
1259   for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1260     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1261       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1262         if (FI->getIndex() < 0) {
1263           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1264           int64_t InLastByte = InFirstByte;
1265           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1266 
1267           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1268               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1269             ArgChains.push_back(SDValue(L, 1));
1270         }
1271       }
1272     }
1273   }
1274 
1275   // Build a tokenfactor for all the chains.
1276   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1277 }
1278 
1279 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1280                                                  SmallVectorImpl<SDValue> &InVals,
1281                                                  StringRef Reason) const {
1282   SDValue Callee = CLI.Callee;
1283   SelectionDAG &DAG = CLI.DAG;
1284 
1285   const Function &Fn = DAG.getMachineFunction().getFunction();
1286 
1287   StringRef FuncName("<unknown>");
1288 
1289   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1290     FuncName = G->getSymbol();
1291   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1292     FuncName = G->getGlobal()->getName();
1293 
1294   DiagnosticInfoUnsupported NoCalls(
1295     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1296   DAG.getContext()->diagnose(NoCalls);
1297 
1298   if (!CLI.IsTailCall) {
1299     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1300       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1301   }
1302 
1303   return DAG.getEntryNode();
1304 }
1305 
1306 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1307                                         SmallVectorImpl<SDValue> &InVals) const {
1308   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1309 }
1310 
1311 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1312                                                       SelectionDAG &DAG) const {
1313   const Function &Fn = DAG.getMachineFunction().getFunction();
1314 
1315   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1316                                             SDLoc(Op).getDebugLoc());
1317   DAG.getContext()->diagnose(NoDynamicAlloca);
1318   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1319   return DAG.getMergeValues(Ops, SDLoc());
1320 }
1321 
1322 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1323                                              SelectionDAG &DAG) const {
1324   switch (Op.getOpcode()) {
1325   default:
1326     Op->print(errs(), &DAG);
1327     llvm_unreachable("Custom lowering code for this "
1328                      "instruction is not implemented yet!");
1329     break;
1330   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1331   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1332   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1333   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1334   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1335   case ISD::FREM: return LowerFREM(Op, DAG);
1336   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1337   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1338   case ISD::FRINT: return LowerFRINT(Op, DAG);
1339   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1340   case ISD::FROUNDEVEN:
1341     return LowerFROUNDEVEN(Op, DAG);
1342   case ISD::FROUND: return LowerFROUND(Op, DAG);
1343   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1344   case ISD::FLOG2:
1345     return LowerFLOG2(Op, DAG);
1346   case ISD::FLOG:
1347   case ISD::FLOG10:
1348     return LowerFLOGCommon(Op, DAG);
1349   case ISD::FEXP:
1350   case ISD::FEXP10:
1351     return lowerFEXP(Op, DAG);
1352   case ISD::FEXP2:
1353     return lowerFEXP2(Op, DAG);
1354   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1355   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1356   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1357   case ISD::FP_TO_SINT:
1358   case ISD::FP_TO_UINT:
1359     return LowerFP_TO_INT(Op, DAG);
1360   case ISD::CTTZ:
1361   case ISD::CTTZ_ZERO_UNDEF:
1362   case ISD::CTLZ:
1363   case ISD::CTLZ_ZERO_UNDEF:
1364     return LowerCTLZ_CTTZ(Op, DAG);
1365   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1366   }
1367   return Op;
1368 }
1369 
1370 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1371                                               SmallVectorImpl<SDValue> &Results,
1372                                               SelectionDAG &DAG) const {
1373   switch (N->getOpcode()) {
1374   case ISD::SIGN_EXTEND_INREG:
1375     // Different parts of legalization seem to interpret which type of
1376     // sign_extend_inreg is the one to check for custom lowering. The extended
1377     // from type is what really matters, but some places check for custom
1378     // lowering of the result type. This results in trying to use
1379     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1380     // nothing here and let the illegal result integer be handled normally.
1381     return;
1382   case ISD::FLOG2:
1383     if (SDValue Lowered = LowerFLOG2(SDValue(N, 0), DAG))
1384       Results.push_back(Lowered);
1385     return;
1386   case ISD::FLOG:
1387   case ISD::FLOG10:
1388     if (SDValue Lowered = LowerFLOGCommon(SDValue(N, 0), DAG))
1389       Results.push_back(Lowered);
1390     return;
1391   case ISD::FEXP2:
1392     if (SDValue Lowered = lowerFEXP2(SDValue(N, 0), DAG))
1393       Results.push_back(Lowered);
1394     return;
1395   case ISD::FEXP:
1396   case ISD::FEXP10:
1397     if (SDValue Lowered = lowerFEXP(SDValue(N, 0), DAG))
1398       Results.push_back(Lowered);
1399     return;
1400   default:
1401     return;
1402   }
1403 }
1404 
1405 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1406                                                  SDValue Op,
1407                                                  SelectionDAG &DAG) const {
1408 
1409   const DataLayout &DL = DAG.getDataLayout();
1410   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1411   const GlobalValue *GV = G->getGlobal();
1412 
1413   if (!MFI->isModuleEntryFunction()) {
1414     if (std::optional<uint32_t> Address =
1415             AMDGPUMachineFunction::getLDSAbsoluteAddress(*GV)) {
1416       return DAG.getConstant(*Address, SDLoc(Op), Op.getValueType());
1417     }
1418   }
1419 
1420   if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1421       G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1422     if (!MFI->isModuleEntryFunction() &&
1423         !GV->getName().equals("llvm.amdgcn.module.lds")) {
1424       SDLoc DL(Op);
1425       const Function &Fn = DAG.getMachineFunction().getFunction();
1426       DiagnosticInfoUnsupported BadLDSDecl(
1427         Fn, "local memory global used by non-kernel function",
1428         DL.getDebugLoc(), DS_Warning);
1429       DAG.getContext()->diagnose(BadLDSDecl);
1430 
1431       // We currently don't have a way to correctly allocate LDS objects that
1432       // aren't directly associated with a kernel. We do force inlining of
1433       // functions that use local objects. However, if these dead functions are
1434       // not eliminated, we don't want a compile time error. Just emit a warning
1435       // and a trap, since there should be no callable path here.
1436       SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1437       SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1438                                         Trap, DAG.getRoot());
1439       DAG.setRoot(OutputChain);
1440       return DAG.getUNDEF(Op.getValueType());
1441     }
1442 
1443     // XXX: What does the value of G->getOffset() mean?
1444     assert(G->getOffset() == 0 &&
1445          "Do not know what to do with an non-zero offset");
1446 
1447     // TODO: We could emit code to handle the initialization somewhere.
1448     // We ignore the initializer for now and legalize it to allow selection.
1449     // The initializer will anyway get errored out during assembly emission.
1450     unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1451     return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1452   }
1453   return SDValue();
1454 }
1455 
1456 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1457                                                   SelectionDAG &DAG) const {
1458   SmallVector<SDValue, 8> Args;
1459   SDLoc SL(Op);
1460 
1461   EVT VT = Op.getValueType();
1462   if (VT.getVectorElementType().getSizeInBits() < 32) {
1463     unsigned OpBitSize = Op.getOperand(0).getValueType().getSizeInBits();
1464     if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1465       unsigned NewNumElt = OpBitSize / 32;
1466       EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1467                                       : EVT::getVectorVT(*DAG.getContext(),
1468                                                          MVT::i32, NewNumElt);
1469       for (const SDUse &U : Op->ops()) {
1470         SDValue In = U.get();
1471         SDValue NewIn = DAG.getNode(ISD::BITCAST, SL, NewEltVT, In);
1472         if (NewNumElt > 1)
1473           DAG.ExtractVectorElements(NewIn, Args);
1474         else
1475           Args.push_back(NewIn);
1476       }
1477 
1478       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
1479                                    NewNumElt * Op.getNumOperands());
1480       SDValue BV = DAG.getBuildVector(NewVT, SL, Args);
1481       return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1482     }
1483   }
1484 
1485   for (const SDUse &U : Op->ops())
1486     DAG.ExtractVectorElements(U.get(), Args);
1487 
1488   return DAG.getBuildVector(Op.getValueType(), SL, Args);
1489 }
1490 
1491 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1492                                                      SelectionDAG &DAG) const {
1493   SDLoc SL(Op);
1494   SmallVector<SDValue, 8> Args;
1495   unsigned Start = Op.getConstantOperandVal(1);
1496   EVT VT = Op.getValueType();
1497   EVT SrcVT = Op.getOperand(0).getValueType();
1498 
1499   if (VT.getScalarSizeInBits() == 16 && Start % 2 == 0) {
1500     unsigned NumElt = VT.getVectorNumElements();
1501     unsigned NumSrcElt = SrcVT.getVectorNumElements();
1502     assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 && "expect legal types");
1503 
1504     // Extract 32-bit registers at a time.
1505     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumSrcElt / 2);
1506     EVT NewVT = NumElt == 2
1507                     ? MVT::i32
1508                     : EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElt / 2);
1509     SDValue Tmp = DAG.getNode(ISD::BITCAST, SL, NewSrcVT, Op.getOperand(0));
1510 
1511     DAG.ExtractVectorElements(Tmp, Args, Start / 2, NumElt / 2);
1512     if (NumElt == 2)
1513       Tmp = Args[0];
1514     else
1515       Tmp = DAG.getBuildVector(NewVT, SL, Args);
1516 
1517     return DAG.getNode(ISD::BITCAST, SL, VT, Tmp);
1518   }
1519 
1520   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1521                             VT.getVectorNumElements());
1522 
1523   return DAG.getBuildVector(Op.getValueType(), SL, Args);
1524 }
1525 
1526 // TODO: Handle fabs too
1527 static SDValue peekFNeg(SDValue Val) {
1528   if (Val.getOpcode() == ISD::FNEG)
1529     return Val.getOperand(0);
1530 
1531   return Val;
1532 }
1533 
1534 static SDValue peekFPSignOps(SDValue Val) {
1535   if (Val.getOpcode() == ISD::FNEG)
1536     Val = Val.getOperand(0);
1537   if (Val.getOpcode() == ISD::FABS)
1538     Val = Val.getOperand(0);
1539   if (Val.getOpcode() == ISD::FCOPYSIGN)
1540     Val = Val.getOperand(0);
1541   return Val;
1542 }
1543 
1544 SDValue AMDGPUTargetLowering::combineFMinMaxLegacyImpl(
1545     const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True,
1546     SDValue False, SDValue CC, DAGCombinerInfo &DCI) const {
1547   SelectionDAG &DAG = DCI.DAG;
1548   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1549   switch (CCOpcode) {
1550   case ISD::SETOEQ:
1551   case ISD::SETONE:
1552   case ISD::SETUNE:
1553   case ISD::SETNE:
1554   case ISD::SETUEQ:
1555   case ISD::SETEQ:
1556   case ISD::SETFALSE:
1557   case ISD::SETFALSE2:
1558   case ISD::SETTRUE:
1559   case ISD::SETTRUE2:
1560   case ISD::SETUO:
1561   case ISD::SETO:
1562     break;
1563   case ISD::SETULE:
1564   case ISD::SETULT: {
1565     if (LHS == True)
1566       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1567     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1568   }
1569   case ISD::SETOLE:
1570   case ISD::SETOLT:
1571   case ISD::SETLE:
1572   case ISD::SETLT: {
1573     // Ordered. Assume ordered for undefined.
1574 
1575     // Only do this after legalization to avoid interfering with other combines
1576     // which might occur.
1577     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1578         !DCI.isCalledByLegalizer())
1579       return SDValue();
1580 
1581     // We need to permute the operands to get the correct NaN behavior. The
1582     // selected operand is the second one based on the failing compare with NaN,
1583     // so permute it based on the compare type the hardware uses.
1584     if (LHS == True)
1585       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1586     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1587   }
1588   case ISD::SETUGE:
1589   case ISD::SETUGT: {
1590     if (LHS == True)
1591       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1592     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1593   }
1594   case ISD::SETGT:
1595   case ISD::SETGE:
1596   case ISD::SETOGE:
1597   case ISD::SETOGT: {
1598     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1599         !DCI.isCalledByLegalizer())
1600       return SDValue();
1601 
1602     if (LHS == True)
1603       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1604     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1605   }
1606   case ISD::SETCC_INVALID:
1607     llvm_unreachable("Invalid setcc condcode!");
1608   }
1609   return SDValue();
1610 }
1611 
1612 /// Generate Min/Max node
1613 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1614                                                    SDValue LHS, SDValue RHS,
1615                                                    SDValue True, SDValue False,
1616                                                    SDValue CC,
1617                                                    DAGCombinerInfo &DCI) const {
1618   if ((LHS == True && RHS == False) || (LHS == False && RHS == True))
1619     return combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, True, False, CC, DCI);
1620 
1621   SelectionDAG &DAG = DCI.DAG;
1622 
1623   // If we can't directly match this, try to see if we can fold an fneg to
1624   // match.
1625 
1626   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
1627   ConstantFPSDNode *CFalse = dyn_cast<ConstantFPSDNode>(False);
1628   SDValue NegTrue = peekFNeg(True);
1629 
1630   // Undo the combine foldFreeOpFromSelect does if it helps us match the
1631   // fmin/fmax.
1632   //
1633   // select (fcmp olt (lhs, K)), (fneg lhs), -K
1634   // -> fneg (fmin_legacy lhs, K)
1635   //
1636   // TODO: Use getNegatedExpression
1637   if (LHS == NegTrue && CFalse && CRHS) {
1638     APFloat NegRHS = neg(CRHS->getValueAPF());
1639     if (NegRHS == CFalse->getValueAPF()) {
1640       SDValue Combined =
1641           combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, NegTrue, False, CC, DCI);
1642       if (Combined)
1643         return DAG.getNode(ISD::FNEG, DL, VT, Combined);
1644       return SDValue();
1645     }
1646   }
1647 
1648   return SDValue();
1649 }
1650 
1651 std::pair<SDValue, SDValue>
1652 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1653   SDLoc SL(Op);
1654 
1655   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1656 
1657   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1658   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1659 
1660   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1661   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1662 
1663   return std::pair(Lo, Hi);
1664 }
1665 
1666 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1667   SDLoc SL(Op);
1668 
1669   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1670   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1671   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1672 }
1673 
1674 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1675   SDLoc SL(Op);
1676 
1677   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1678   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1679   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1680 }
1681 
1682 // Split a vector type into two parts. The first part is a power of two vector.
1683 // The second part is whatever is left over, and is a scalar if it would
1684 // otherwise be a 1-vector.
1685 std::pair<EVT, EVT>
1686 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1687   EVT LoVT, HiVT;
1688   EVT EltVT = VT.getVectorElementType();
1689   unsigned NumElts = VT.getVectorNumElements();
1690   unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1691   LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1692   HiVT = NumElts - LoNumElts == 1
1693              ? EltVT
1694              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1695   return std::pair(LoVT, HiVT);
1696 }
1697 
1698 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1699 // scalar.
1700 std::pair<SDValue, SDValue>
1701 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1702                                   const EVT &LoVT, const EVT &HiVT,
1703                                   SelectionDAG &DAG) const {
1704   assert(LoVT.getVectorNumElements() +
1705                  (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1706              N.getValueType().getVectorNumElements() &&
1707          "More vector elements requested than available!");
1708   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1709                            DAG.getVectorIdxConstant(0, DL));
1710   SDValue Hi = DAG.getNode(
1711       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1712       HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1713   return std::pair(Lo, Hi);
1714 }
1715 
1716 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1717                                               SelectionDAG &DAG) const {
1718   LoadSDNode *Load = cast<LoadSDNode>(Op);
1719   EVT VT = Op.getValueType();
1720   SDLoc SL(Op);
1721 
1722 
1723   // If this is a 2 element vector, we really want to scalarize and not create
1724   // weird 1 element vectors.
1725   if (VT.getVectorNumElements() == 2) {
1726     SDValue Ops[2];
1727     std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1728     return DAG.getMergeValues(Ops, SL);
1729   }
1730 
1731   SDValue BasePtr = Load->getBasePtr();
1732   EVT MemVT = Load->getMemoryVT();
1733 
1734   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1735 
1736   EVT LoVT, HiVT;
1737   EVT LoMemVT, HiMemVT;
1738   SDValue Lo, Hi;
1739 
1740   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1741   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1742   std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1743 
1744   unsigned Size = LoMemVT.getStoreSize();
1745   Align BaseAlign = Load->getAlign();
1746   Align HiAlign = commonAlignment(BaseAlign, Size);
1747 
1748   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1749                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1750                                   BaseAlign, Load->getMemOperand()->getFlags());
1751   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::getFixed(Size));
1752   SDValue HiLoad =
1753       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1754                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1755                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1756 
1757   SDValue Join;
1758   if (LoVT == HiVT) {
1759     // This is the case that the vector is power of two so was evenly split.
1760     Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1761   } else {
1762     Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1763                        DAG.getVectorIdxConstant(0, SL));
1764     Join = DAG.getNode(
1765         HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1766         VT, Join, HiLoad,
1767         DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1768   }
1769 
1770   SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1771                                      LoLoad.getValue(1), HiLoad.getValue(1))};
1772 
1773   return DAG.getMergeValues(Ops, SL);
1774 }
1775 
1776 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1777                                                      SelectionDAG &DAG) const {
1778   LoadSDNode *Load = cast<LoadSDNode>(Op);
1779   EVT VT = Op.getValueType();
1780   SDValue BasePtr = Load->getBasePtr();
1781   EVT MemVT = Load->getMemoryVT();
1782   SDLoc SL(Op);
1783   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1784   Align BaseAlign = Load->getAlign();
1785   unsigned NumElements = MemVT.getVectorNumElements();
1786 
1787   // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1788   // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1789   if (NumElements != 3 ||
1790       (BaseAlign < Align(8) &&
1791        !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1792     return SplitVectorLoad(Op, DAG);
1793 
1794   assert(NumElements == 3);
1795 
1796   EVT WideVT =
1797       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1798   EVT WideMemVT =
1799       EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1800   SDValue WideLoad = DAG.getExtLoad(
1801       Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1802       WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1803   return DAG.getMergeValues(
1804       {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1805                    DAG.getVectorIdxConstant(0, SL)),
1806        WideLoad.getValue(1)},
1807       SL);
1808 }
1809 
1810 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1811                                                SelectionDAG &DAG) const {
1812   StoreSDNode *Store = cast<StoreSDNode>(Op);
1813   SDValue Val = Store->getValue();
1814   EVT VT = Val.getValueType();
1815 
1816   // If this is a 2 element vector, we really want to scalarize and not create
1817   // weird 1 element vectors.
1818   if (VT.getVectorNumElements() == 2)
1819     return scalarizeVectorStore(Store, DAG);
1820 
1821   EVT MemVT = Store->getMemoryVT();
1822   SDValue Chain = Store->getChain();
1823   SDValue BasePtr = Store->getBasePtr();
1824   SDLoc SL(Op);
1825 
1826   EVT LoVT, HiVT;
1827   EVT LoMemVT, HiMemVT;
1828   SDValue Lo, Hi;
1829 
1830   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1831   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1832   std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1833 
1834   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1835 
1836   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1837   Align BaseAlign = Store->getAlign();
1838   unsigned Size = LoMemVT.getStoreSize();
1839   Align HiAlign = commonAlignment(BaseAlign, Size);
1840 
1841   SDValue LoStore =
1842       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1843                         Store->getMemOperand()->getFlags());
1844   SDValue HiStore =
1845       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1846                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1847 
1848   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1849 }
1850 
1851 // This is a shortcut for integer division because we have fast i32<->f32
1852 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1853 // float is enough to accurately represent up to a 24-bit signed integer.
1854 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1855                                             bool Sign) const {
1856   SDLoc DL(Op);
1857   EVT VT = Op.getValueType();
1858   SDValue LHS = Op.getOperand(0);
1859   SDValue RHS = Op.getOperand(1);
1860   MVT IntVT = MVT::i32;
1861   MVT FltVT = MVT::f32;
1862 
1863   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1864   if (LHSSignBits < 9)
1865     return SDValue();
1866 
1867   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1868   if (RHSSignBits < 9)
1869     return SDValue();
1870 
1871   unsigned BitSize = VT.getSizeInBits();
1872   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1873   unsigned DivBits = BitSize - SignBits;
1874   if (Sign)
1875     ++DivBits;
1876 
1877   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1878   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1879 
1880   SDValue jq = DAG.getConstant(1, DL, IntVT);
1881 
1882   if (Sign) {
1883     // char|short jq = ia ^ ib;
1884     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1885 
1886     // jq = jq >> (bitsize - 2)
1887     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1888                      DAG.getConstant(BitSize - 2, DL, VT));
1889 
1890     // jq = jq | 0x1
1891     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1892   }
1893 
1894   // int ia = (int)LHS;
1895   SDValue ia = LHS;
1896 
1897   // int ib, (int)RHS;
1898   SDValue ib = RHS;
1899 
1900   // float fa = (float)ia;
1901   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1902 
1903   // float fb = (float)ib;
1904   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1905 
1906   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1907                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1908 
1909   // fq = trunc(fq);
1910   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1911 
1912   // float fqneg = -fq;
1913   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1914 
1915   MachineFunction &MF = DAG.getMachineFunction();
1916 
1917   bool UseFmadFtz = false;
1918   if (Subtarget->isGCN()) {
1919     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1920     UseFmadFtz =
1921         MFI->getMode().FP32Denormals != DenormalMode::getPreserveSign();
1922   }
1923 
1924   // float fr = mad(fqneg, fb, fa);
1925   unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
1926                     : UseFmadFtz ? (unsigned)AMDGPUISD::FMAD_FTZ
1927                                  : (unsigned)ISD::FMAD;
1928   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1929 
1930   // int iq = (int)fq;
1931   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1932 
1933   // fr = fabs(fr);
1934   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1935 
1936   // fb = fabs(fb);
1937   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1938 
1939   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1940 
1941   // int cv = fr >= fb;
1942   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1943 
1944   // jq = (cv ? jq : 0);
1945   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1946 
1947   // dst = iq + jq;
1948   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1949 
1950   // Rem needs compensation, it's easier to recompute it
1951   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1952   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1953 
1954   // Truncate to number of bits this divide really is.
1955   if (Sign) {
1956     SDValue InRegSize
1957       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1958     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1959     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1960   } else {
1961     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1962     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1963     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1964   }
1965 
1966   return DAG.getMergeValues({ Div, Rem }, DL);
1967 }
1968 
1969 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1970                                       SelectionDAG &DAG,
1971                                       SmallVectorImpl<SDValue> &Results) const {
1972   SDLoc DL(Op);
1973   EVT VT = Op.getValueType();
1974 
1975   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1976 
1977   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1978 
1979   SDValue One = DAG.getConstant(1, DL, HalfVT);
1980   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1981 
1982   //HiLo split
1983   SDValue LHS_Lo, LHS_Hi;
1984   SDValue LHS = Op.getOperand(0);
1985   std::tie(LHS_Lo, LHS_Hi) = DAG.SplitScalar(LHS, DL, HalfVT, HalfVT);
1986 
1987   SDValue RHS_Lo, RHS_Hi;
1988   SDValue RHS = Op.getOperand(1);
1989   std::tie(RHS_Lo, RHS_Hi) = DAG.SplitScalar(RHS, DL, HalfVT, HalfVT);
1990 
1991   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1992       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1993 
1994     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1995                               LHS_Lo, RHS_Lo);
1996 
1997     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1998     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1999 
2000     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
2001     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
2002     return;
2003   }
2004 
2005   if (isTypeLegal(MVT::i64)) {
2006     // The algorithm here is based on ideas from "Software Integer Division",
2007     // Tom Rodeheffer, August 2008.
2008 
2009     MachineFunction &MF = DAG.getMachineFunction();
2010     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2011 
2012     // Compute denominator reciprocal.
2013     unsigned FMAD =
2014         !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
2015         : MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign()
2016             ? (unsigned)ISD::FMAD
2017             : (unsigned)AMDGPUISD::FMAD_FTZ;
2018 
2019     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
2020     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
2021     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
2022       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
2023       Cvt_Lo);
2024     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
2025     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
2026       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
2027     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
2028       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
2029     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
2030     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
2031       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
2032       Mul1);
2033     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
2034     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
2035     SDValue Rcp64 = DAG.getBitcast(VT,
2036                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
2037 
2038     SDValue Zero64 = DAG.getConstant(0, DL, VT);
2039     SDValue One64  = DAG.getConstant(1, DL, VT);
2040     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
2041     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
2042 
2043     // First round of UNR (Unsigned integer Newton-Raphson).
2044     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
2045     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
2046     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
2047     SDValue Mulhi1_Lo, Mulhi1_Hi;
2048     std::tie(Mulhi1_Lo, Mulhi1_Hi) =
2049         DAG.SplitScalar(Mulhi1, DL, HalfVT, HalfVT);
2050     SDValue Add1_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Lo,
2051                                   Mulhi1_Lo, Zero1);
2052     SDValue Add1_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Hi,
2053                                   Mulhi1_Hi, Add1_Lo.getValue(1));
2054     SDValue Add1 = DAG.getBitcast(VT,
2055                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
2056 
2057     // Second round of UNR.
2058     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
2059     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
2060     SDValue Mulhi2_Lo, Mulhi2_Hi;
2061     std::tie(Mulhi2_Lo, Mulhi2_Hi) =
2062         DAG.SplitScalar(Mulhi2, DL, HalfVT, HalfVT);
2063     SDValue Add2_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Lo,
2064                                   Mulhi2_Lo, Zero1);
2065     SDValue Add2_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Hi,
2066                                   Mulhi2_Hi, Add2_Lo.getValue(1));
2067     SDValue Add2 = DAG.getBitcast(VT,
2068                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
2069 
2070     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
2071 
2072     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
2073 
2074     SDValue Mul3_Lo, Mul3_Hi;
2075     std::tie(Mul3_Lo, Mul3_Hi) = DAG.SplitScalar(Mul3, DL, HalfVT, HalfVT);
2076     SDValue Sub1_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Lo,
2077                                   Mul3_Lo, Zero1);
2078     SDValue Sub1_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Hi,
2079                                   Mul3_Hi, Sub1_Lo.getValue(1));
2080     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
2081     SDValue Sub1 = DAG.getBitcast(VT,
2082                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
2083 
2084     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
2085     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
2086                                  ISD::SETUGE);
2087     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
2088                                  ISD::SETUGE);
2089     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
2090 
2091     // TODO: Here and below portions of the code can be enclosed into if/endif.
2092     // Currently control flow is unconditional and we have 4 selects after
2093     // potential endif to substitute PHIs.
2094 
2095     // if C3 != 0 ...
2096     SDValue Sub2_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Lo,
2097                                   RHS_Lo, Zero1);
2098     SDValue Sub2_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Mi,
2099                                   RHS_Hi, Sub1_Lo.getValue(1));
2100     SDValue Sub2_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2101                                   Zero, Sub2_Lo.getValue(1));
2102     SDValue Sub2 = DAG.getBitcast(VT,
2103                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
2104 
2105     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
2106 
2107     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
2108                                  ISD::SETUGE);
2109     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
2110                                  ISD::SETUGE);
2111     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
2112 
2113     // if (C6 != 0)
2114     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
2115 
2116     SDValue Sub3_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Lo,
2117                                   RHS_Lo, Zero1);
2118     SDValue Sub3_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2119                                   RHS_Hi, Sub2_Lo.getValue(1));
2120     SDValue Sub3_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub3_Mi,
2121                                   Zero, Sub3_Lo.getValue(1));
2122     SDValue Sub3 = DAG.getBitcast(VT,
2123                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
2124 
2125     // endif C6
2126     // endif C3
2127 
2128     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
2129     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
2130 
2131     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
2132     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
2133 
2134     Results.push_back(Div);
2135     Results.push_back(Rem);
2136 
2137     return;
2138   }
2139 
2140   // r600 expandion.
2141   // Get Speculative values
2142   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
2143   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
2144 
2145   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
2146   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
2147   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
2148 
2149   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
2150   SDValue DIV_Lo = Zero;
2151 
2152   const unsigned halfBitWidth = HalfVT.getSizeInBits();
2153 
2154   for (unsigned i = 0; i < halfBitWidth; ++i) {
2155     const unsigned bitPos = halfBitWidth - i - 1;
2156     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
2157     // Get value of high bit
2158     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
2159     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
2160     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
2161 
2162     // Shift
2163     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
2164     // Add LHS high bit
2165     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
2166 
2167     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
2168     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
2169 
2170     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
2171 
2172     // Update REM
2173     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
2174     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
2175   }
2176 
2177   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
2178   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
2179   Results.push_back(DIV);
2180   Results.push_back(REM);
2181 }
2182 
2183 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
2184                                            SelectionDAG &DAG) const {
2185   SDLoc DL(Op);
2186   EVT VT = Op.getValueType();
2187 
2188   if (VT == MVT::i64) {
2189     SmallVector<SDValue, 2> Results;
2190     LowerUDIVREM64(Op, DAG, Results);
2191     return DAG.getMergeValues(Results, DL);
2192   }
2193 
2194   if (VT == MVT::i32) {
2195     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
2196       return Res;
2197   }
2198 
2199   SDValue X = Op.getOperand(0);
2200   SDValue Y = Op.getOperand(1);
2201 
2202   // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
2203   // algorithm used here.
2204 
2205   // Initial estimate of inv(y).
2206   SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
2207 
2208   // One round of UNR.
2209   SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
2210   SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
2211   Z = DAG.getNode(ISD::ADD, DL, VT, Z,
2212                   DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
2213 
2214   // Quotient/remainder estimate.
2215   SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
2216   SDValue R =
2217       DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
2218 
2219   // First quotient/remainder refinement.
2220   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2221   SDValue One = DAG.getConstant(1, DL, VT);
2222   SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2223   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2224                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2225   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2226                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2227 
2228   // Second quotient/remainder refinement.
2229   Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2230   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2231                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2232   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2233                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2234 
2235   return DAG.getMergeValues({Q, R}, DL);
2236 }
2237 
2238 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2239                                            SelectionDAG &DAG) const {
2240   SDLoc DL(Op);
2241   EVT VT = Op.getValueType();
2242 
2243   SDValue LHS = Op.getOperand(0);
2244   SDValue RHS = Op.getOperand(1);
2245 
2246   SDValue Zero = DAG.getConstant(0, DL, VT);
2247   SDValue NegOne = DAG.getConstant(-1, DL, VT);
2248 
2249   if (VT == MVT::i32) {
2250     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2251       return Res;
2252   }
2253 
2254   if (VT == MVT::i64 &&
2255       DAG.ComputeNumSignBits(LHS) > 32 &&
2256       DAG.ComputeNumSignBits(RHS) > 32) {
2257     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2258 
2259     //HiLo split
2260     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2261     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2262     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2263                                  LHS_Lo, RHS_Lo);
2264     SDValue Res[2] = {
2265       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2266       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2267     };
2268     return DAG.getMergeValues(Res, DL);
2269   }
2270 
2271   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2272   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2273   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2274   SDValue RSign = LHSign; // Remainder sign is the same as LHS
2275 
2276   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2277   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2278 
2279   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2280   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2281 
2282   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2283   SDValue Rem = Div.getValue(1);
2284 
2285   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2286   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2287 
2288   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2289   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2290 
2291   SDValue Res[2] = {
2292     Div,
2293     Rem
2294   };
2295   return DAG.getMergeValues(Res, DL);
2296 }
2297 
2298 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2299 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2300   SDLoc SL(Op);
2301   EVT VT = Op.getValueType();
2302   auto Flags = Op->getFlags();
2303   SDValue X = Op.getOperand(0);
2304   SDValue Y = Op.getOperand(1);
2305 
2306   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2307   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2308   SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2309   // TODO: For f32 use FMAD instead if !hasFastFMA32?
2310   return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2311 }
2312 
2313 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2314   SDLoc SL(Op);
2315   SDValue Src = Op.getOperand(0);
2316 
2317   // result = trunc(src)
2318   // if (src > 0.0 && src != result)
2319   //   result += 1.0
2320 
2321   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2322 
2323   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2324   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2325 
2326   EVT SetCCVT =
2327       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2328 
2329   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2330   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2331   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2332 
2333   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2334   // TODO: Should this propagate fast-math-flags?
2335   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2336 }
2337 
2338 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2339                                   SelectionDAG &DAG) {
2340   const unsigned FractBits = 52;
2341   const unsigned ExpBits = 11;
2342 
2343   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2344                                 Hi,
2345                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2346                                 DAG.getConstant(ExpBits, SL, MVT::i32));
2347   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2348                             DAG.getConstant(1023, SL, MVT::i32));
2349 
2350   return Exp;
2351 }
2352 
2353 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2354   SDLoc SL(Op);
2355   SDValue Src = Op.getOperand(0);
2356 
2357   assert(Op.getValueType() == MVT::f64);
2358 
2359   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2360 
2361   // Extract the upper half, since this is where we will find the sign and
2362   // exponent.
2363   SDValue Hi = getHiHalf64(Src, DAG);
2364 
2365   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2366 
2367   const unsigned FractBits = 52;
2368 
2369   // Extract the sign bit.
2370   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2371   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2372 
2373   // Extend back to 64-bits.
2374   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2375   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2376 
2377   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2378   const SDValue FractMask
2379     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2380 
2381   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2382   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2383   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2384 
2385   EVT SetCCVT =
2386       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2387 
2388   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2389 
2390   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2391   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2392 
2393   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2394   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2395 
2396   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2397 }
2398 
2399 SDValue AMDGPUTargetLowering::LowerFROUNDEVEN(SDValue Op,
2400                                               SelectionDAG &DAG) const {
2401   SDLoc SL(Op);
2402   SDValue Src = Op.getOperand(0);
2403 
2404   assert(Op.getValueType() == MVT::f64);
2405 
2406   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2407   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2408   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2409 
2410   // TODO: Should this propagate fast-math-flags?
2411 
2412   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2413   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2414 
2415   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2416 
2417   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2418   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2419 
2420   EVT SetCCVT =
2421       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2422   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2423 
2424   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2425 }
2426 
2427 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op,
2428                                               SelectionDAG &DAG) const {
2429   // FNEARBYINT and FRINT are the same, except in their handling of FP
2430   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2431   // rint, so just treat them as equivalent.
2432   return DAG.getNode(ISD::FROUNDEVEN, SDLoc(Op), Op.getValueType(),
2433                      Op.getOperand(0));
2434 }
2435 
2436 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2437   auto VT = Op.getValueType();
2438   auto Arg = Op.getOperand(0u);
2439   return DAG.getNode(ISD::FROUNDEVEN, SDLoc(Op), VT, Arg);
2440 }
2441 
2442 // XXX - May require not supporting f32 denormals?
2443 
2444 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2445 // compare and vselect end up producing worse code than scalarizing the whole
2446 // operation.
2447 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2448   SDLoc SL(Op);
2449   SDValue X = Op.getOperand(0);
2450   EVT VT = Op.getValueType();
2451 
2452   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2453 
2454   // TODO: Should this propagate fast-math-flags?
2455 
2456   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2457 
2458   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2459 
2460   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2461   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2462 
2463   EVT SetCCVT =
2464       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2465 
2466   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2467   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2468   SDValue OneOrZeroFP = DAG.getNode(ISD::SELECT, SL, VT, Cmp, One, Zero);
2469 
2470   SDValue SignedOffset = DAG.getNode(ISD::FCOPYSIGN, SL, VT, OneOrZeroFP, X);
2471   return DAG.getNode(ISD::FADD, SL, VT, T, SignedOffset);
2472 }
2473 
2474 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2475   SDLoc SL(Op);
2476   SDValue Src = Op.getOperand(0);
2477 
2478   // result = trunc(src);
2479   // if (src < 0.0 && src != result)
2480   //   result += -1.0.
2481 
2482   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2483 
2484   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2485   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2486 
2487   EVT SetCCVT =
2488       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2489 
2490   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2491   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2492   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2493 
2494   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2495   // TODO: Should this propagate fast-math-flags?
2496   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2497 }
2498 
2499 /// Return true if it's known that \p Src can never be an f32 denormal value.
2500 static bool valueIsKnownNeverF32Denorm(SDValue Src) {
2501   switch (Src.getOpcode()) {
2502   case ISD::FP_EXTEND:
2503     return Src.getOperand(0).getValueType() == MVT::f16;
2504   case ISD::FP16_TO_FP:
2505   case ISD::FFREXP:
2506     return true;
2507   case ISD::INTRINSIC_WO_CHAIN: {
2508     unsigned IntrinsicID = Src.getConstantOperandVal(0);
2509     switch (IntrinsicID) {
2510     case Intrinsic::amdgcn_frexp_mant:
2511       return true;
2512     default:
2513       return false;
2514     }
2515   }
2516   default:
2517     return false;
2518   }
2519 
2520   llvm_unreachable("covered opcode switch");
2521 }
2522 
2523 bool AMDGPUTargetLowering::allowApproxFunc(const SelectionDAG &DAG,
2524                                            SDNodeFlags Flags) {
2525   if (Flags.hasApproximateFuncs())
2526     return true;
2527   auto &Options = DAG.getTarget().Options;
2528   return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
2529 }
2530 
2531 bool AMDGPUTargetLowering::needsDenormHandlingF32(const SelectionDAG &DAG,
2532                                                   SDValue Src,
2533                                                   SDNodeFlags Flags) {
2534   return !valueIsKnownNeverF32Denorm(Src) &&
2535          DAG.getMachineFunction()
2536                  .getDenormalMode(APFloat::IEEEsingle())
2537                  .Input != DenormalMode::PreserveSign;
2538 }
2539 
2540 SDValue AMDGPUTargetLowering::getIsLtSmallestNormal(SelectionDAG &DAG,
2541                                                     SDValue Src,
2542                                                     SDNodeFlags Flags) const {
2543   SDLoc SL(Src);
2544   EVT VT = Src.getValueType();
2545   const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT);
2546   SDValue SmallestNormal =
2547       DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT);
2548 
2549   // Want to scale denormals up, but negatives and 0 work just as well on the
2550   // scaled path.
2551   SDValue IsLtSmallestNormal = DAG.getSetCC(
2552       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
2553       SmallestNormal, ISD::SETOLT);
2554 
2555   return IsLtSmallestNormal;
2556 }
2557 
2558 SDValue AMDGPUTargetLowering::getIsFinite(SelectionDAG &DAG, SDValue Src,
2559                                           SDNodeFlags Flags) const {
2560   SDLoc SL(Src);
2561   EVT VT = Src.getValueType();
2562   const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT);
2563   SDValue Inf = DAG.getConstantFP(APFloat::getInf(Semantics), SL, VT);
2564 
2565   SDValue Fabs = DAG.getNode(ISD::FABS, SL, VT, Src, Flags);
2566   SDValue IsFinite = DAG.getSetCC(
2567       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Fabs,
2568       Inf, ISD::SETOLT);
2569   return IsFinite;
2570 }
2571 
2572 /// If denormal handling is required return the scaled input to FLOG2, and the
2573 /// check for denormal range. Otherwise, return null values.
2574 std::pair<SDValue, SDValue>
2575 AMDGPUTargetLowering::getScaledLogInput(SelectionDAG &DAG, const SDLoc SL,
2576                                         SDValue Src, SDNodeFlags Flags) const {
2577   if (!needsDenormHandlingF32(DAG, Src, Flags))
2578     return {};
2579 
2580   MVT VT = MVT::f32;
2581   const fltSemantics &Semantics = APFloat::IEEEsingle();
2582   SDValue SmallestNormal =
2583       DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT);
2584 
2585   SDValue IsLtSmallestNormal = DAG.getSetCC(
2586       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
2587       SmallestNormal, ISD::SETOLT);
2588 
2589   SDValue Scale32 = DAG.getConstantFP(0x1.0p+32, SL, VT);
2590   SDValue One = DAG.getConstantFP(1.0, SL, VT);
2591   SDValue ScaleFactor =
2592       DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, Scale32, One, Flags);
2593 
2594   SDValue ScaledInput = DAG.getNode(ISD::FMUL, SL, VT, Src, ScaleFactor, Flags);
2595   return {ScaledInput, IsLtSmallestNormal};
2596 }
2597 
2598 SDValue AMDGPUTargetLowering::LowerFLOG2(SDValue Op, SelectionDAG &DAG) const {
2599   // v_log_f32 is good enough for OpenCL, except it doesn't handle denormals.
2600   // If we have to handle denormals, scale up the input and adjust the result.
2601 
2602   // scaled = x * (is_denormal ? 0x1.0p+32 : 1.0)
2603   // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0)
2604 
2605   SDLoc SL(Op);
2606   EVT VT = Op.getValueType();
2607   SDValue Src = Op.getOperand(0);
2608   SDNodeFlags Flags = Op->getFlags();
2609 
2610   if (VT == MVT::f16) {
2611     // Nothing in half is a denormal when promoted to f32.
2612     assert(!Subtarget->has16BitInsts());
2613     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2614     SDValue Log = DAG.getNode(AMDGPUISD::LOG, SL, MVT::f32, Ext, Flags);
2615     return DAG.getNode(ISD::FP_ROUND, SL, VT, Log,
2616                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2617   }
2618 
2619   auto [ScaledInput, IsLtSmallestNormal] =
2620       getScaledLogInput(DAG, SL, Src, Flags);
2621   if (!ScaledInput)
2622     return DAG.getNode(AMDGPUISD::LOG, SL, VT, Src, Flags);
2623 
2624   SDValue Log2 = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2625 
2626   SDValue ThirtyTwo = DAG.getConstantFP(32.0, SL, VT);
2627   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2628   SDValue ResultOffset =
2629       DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, ThirtyTwo, Zero);
2630   return DAG.getNode(ISD::FSUB, SL, VT, Log2, ResultOffset, Flags);
2631 }
2632 
2633 static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X,
2634                       SDValue Y, SDValue C, SDNodeFlags Flags = SDNodeFlags()) {
2635   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Y, Flags);
2636   return DAG.getNode(ISD::FADD, SL, VT, Mul, C, Flags);
2637 }
2638 
2639 SDValue AMDGPUTargetLowering::LowerFLOGCommon(SDValue Op,
2640                                               SelectionDAG &DAG) const {
2641   SDValue X = Op.getOperand(0);
2642   EVT VT = Op.getValueType();
2643   SDNodeFlags Flags = Op->getFlags();
2644   SDLoc DL(Op);
2645 
2646   const bool IsLog10 = Op.getOpcode() == ISD::FLOG10;
2647   assert(IsLog10 || Op.getOpcode() == ISD::FLOG);
2648 
2649   const auto &Options = getTargetMachine().Options;
2650   if (VT == MVT::f16 || Flags.hasApproximateFuncs() ||
2651       Options.ApproxFuncFPMath || Options.UnsafeFPMath) {
2652 
2653     if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2654       // Log and multiply in f32 is good enough for f16.
2655       X = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, X, Flags);
2656     }
2657 
2658     SDValue Lowered = LowerFLOGUnsafe(X, DL, DAG, IsLog10, Flags);
2659     if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2660       return DAG.getNode(ISD::FP_ROUND, DL, VT, Lowered,
2661                          DAG.getTargetConstant(0, DL, MVT::i32), Flags);
2662     }
2663 
2664     return Lowered;
2665   }
2666 
2667   auto [ScaledInput, IsScaled] = getScaledLogInput(DAG, DL, X, Flags);
2668   if (ScaledInput)
2669     X = ScaledInput;
2670 
2671   SDValue Y = DAG.getNode(AMDGPUISD::LOG, DL, VT, X, Flags);
2672 
2673   SDValue R;
2674   if (Subtarget->hasFastFMAF32()) {
2675     // c+cc are ln(2)/ln(10) to more than 49 bits
2676     const float c_log10 = 0x1.344134p-2f;
2677     const float cc_log10 = 0x1.09f79ep-26f;
2678 
2679     // c + cc is ln(2) to more than 49 bits
2680     const float c_log = 0x1.62e42ep-1f;
2681     const float cc_log = 0x1.efa39ep-25f;
2682 
2683     SDValue C = DAG.getConstantFP(IsLog10 ? c_log10 : c_log, DL, VT);
2684     SDValue CC = DAG.getConstantFP(IsLog10 ? cc_log10 : cc_log, DL, VT);
2685 
2686     R = DAG.getNode(ISD::FMUL, DL, VT, Y, C, Flags);
2687     SDValue NegR = DAG.getNode(ISD::FNEG, DL, VT, R, Flags);
2688     SDValue FMA0 = DAG.getNode(ISD::FMA, DL, VT, Y, C, NegR, Flags);
2689     SDValue FMA1 = DAG.getNode(ISD::FMA, DL, VT, Y, CC, FMA0, Flags);
2690     R = DAG.getNode(ISD::FADD, DL, VT, R, FMA1, Flags);
2691   } else {
2692     // ch+ct is ln(2)/ln(10) to more than 36 bits
2693     const float ch_log10 = 0x1.344000p-2f;
2694     const float ct_log10 = 0x1.3509f6p-18f;
2695 
2696     // ch + ct is ln(2) to more than 36 bits
2697     const float ch_log = 0x1.62e000p-1f;
2698     const float ct_log = 0x1.0bfbe8p-15f;
2699 
2700     SDValue CH = DAG.getConstantFP(IsLog10 ? ch_log10 : ch_log, DL, VT);
2701     SDValue CT = DAG.getConstantFP(IsLog10 ? ct_log10 : ct_log, DL, VT);
2702 
2703     SDValue YAsInt = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Y);
2704     SDValue MaskConst = DAG.getConstant(0xfffff000, DL, MVT::i32);
2705     SDValue YHInt = DAG.getNode(ISD::AND, DL, MVT::i32, YAsInt, MaskConst);
2706     SDValue YH = DAG.getNode(ISD::BITCAST, DL, MVT::f32, YHInt);
2707     SDValue YT = DAG.getNode(ISD::FSUB, DL, VT, Y, YH, Flags);
2708 
2709     SDValue YTCT = DAG.getNode(ISD::FMUL, DL, VT, YT, CT, Flags);
2710     SDValue Mad0 = getMad(DAG, DL, VT, YH, CT, YTCT, Flags);
2711     SDValue Mad1 = getMad(DAG, DL, VT, YT, CH, Mad0, Flags);
2712     R = getMad(DAG, DL, VT, YH, CH, Mad1);
2713   }
2714 
2715   const bool IsFiniteOnly = (Flags.hasNoNaNs() || Options.NoNaNsFPMath) &&
2716                             (Flags.hasNoInfs() || Options.NoInfsFPMath);
2717 
2718   // TODO: Check if known finite from source value.
2719   if (!IsFiniteOnly) {
2720     SDValue IsFinite = getIsFinite(DAG, Y, Flags);
2721     R = DAG.getNode(ISD::SELECT, DL, VT, IsFinite, R, Y, Flags);
2722   }
2723 
2724   if (IsScaled) {
2725     SDValue Zero = DAG.getConstantFP(0.0f, DL, VT);
2726     SDValue ShiftK =
2727         DAG.getConstantFP(IsLog10 ? 0x1.344136p+3f : 0x1.62e430p+4f, DL, VT);
2728     SDValue Shift =
2729         DAG.getNode(ISD::SELECT, DL, VT, IsScaled, ShiftK, Zero, Flags);
2730     R = DAG.getNode(ISD::FSUB, DL, VT, R, Shift, Flags);
2731   }
2732 
2733   return R;
2734 }
2735 
2736 SDValue AMDGPUTargetLowering::LowerFLOG10(SDValue Op, SelectionDAG &DAG) const {
2737   return LowerFLOGCommon(Op, DAG);
2738 }
2739 
2740 // Do f32 fast math expansion for flog2 or flog10. This is accurate enough for a
2741 // promote f16 operation.
2742 SDValue AMDGPUTargetLowering::LowerFLOGUnsafe(SDValue Src, const SDLoc &SL,
2743                                               SelectionDAG &DAG, bool IsLog10,
2744                                               SDNodeFlags Flags) const {
2745   EVT VT = Src.getValueType();
2746   unsigned LogOp =
2747       VT == MVT::f32 ? (unsigned)AMDGPUISD::LOG : (unsigned)ISD::FLOG2;
2748 
2749   double Log2BaseInverted =
2750       IsLog10 ? numbers::ln2 / numbers::ln10 : numbers::ln2;
2751 
2752   if (VT == MVT::f32) {
2753     auto [ScaledInput, IsScaled] = getScaledLogInput(DAG, SL, Src, Flags);
2754     if (ScaledInput) {
2755       SDValue LogSrc = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2756       SDValue ScaledResultOffset =
2757           DAG.getConstantFP(-32.0 * Log2BaseInverted, SL, VT);
2758 
2759       SDValue Zero = DAG.getConstantFP(0.0f, SL, VT);
2760 
2761       SDValue ResultOffset = DAG.getNode(ISD::SELECT, SL, VT, IsScaled,
2762                                          ScaledResultOffset, Zero, Flags);
2763 
2764       SDValue Log2Inv = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2765 
2766       if (Subtarget->hasFastFMAF32())
2767         return DAG.getNode(ISD::FMA, SL, VT, LogSrc, Log2Inv, ResultOffset,
2768                            Flags);
2769       SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, LogSrc, Log2Inv, Flags);
2770       return DAG.getNode(ISD::FADD, SL, VT, Mul, ResultOffset);
2771     }
2772   }
2773 
2774   SDValue Log2Operand = DAG.getNode(LogOp, SL, VT, Src, Flags);
2775   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2776 
2777   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand,
2778                      Flags);
2779 }
2780 
2781 SDValue AMDGPUTargetLowering::lowerFEXP2(SDValue Op, SelectionDAG &DAG) const {
2782   // v_exp_f32 is good enough for OpenCL, except it doesn't handle denormals.
2783   // If we have to handle denormals, scale up the input and adjust the result.
2784 
2785   SDLoc SL(Op);
2786   EVT VT = Op.getValueType();
2787   SDValue Src = Op.getOperand(0);
2788   SDNodeFlags Flags = Op->getFlags();
2789 
2790   if (VT == MVT::f16) {
2791     // Nothing in half is a denormal when promoted to f32.
2792     assert(!Subtarget->has16BitInsts());
2793     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2794     SDValue Log = DAG.getNode(AMDGPUISD::EXP, SL, MVT::f32, Ext, Flags);
2795     return DAG.getNode(ISD::FP_ROUND, SL, VT, Log,
2796                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2797   }
2798 
2799   assert(VT == MVT::f32);
2800 
2801   if (!needsDenormHandlingF32(DAG, Src, Flags))
2802     return DAG.getNode(AMDGPUISD::EXP, SL, MVT::f32, Src, Flags);
2803 
2804   // bool needs_scaling = x < -0x1.f80000p+6f;
2805   // v_exp_f32(x + (s ? 0x1.0p+6f : 0.0f)) * (s ? 0x1.0p-64f : 1.0f);
2806 
2807   // -nextafter(128.0, -1)
2808   SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT);
2809 
2810   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2811 
2812   SDValue NeedsScaling =
2813       DAG.getSetCC(SL, SetCCVT, Src, RangeCheckConst, ISD::SETOLT);
2814 
2815   SDValue SixtyFour = DAG.getConstantFP(0x1.0p+6f, SL, VT);
2816   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2817 
2818   SDValue AddOffset =
2819       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, SixtyFour, Zero);
2820 
2821   SDValue AddInput = DAG.getNode(ISD::FADD, SL, VT, Src, AddOffset, Flags);
2822   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, AddInput, Flags);
2823 
2824   SDValue TwoExpNeg64 = DAG.getConstantFP(0x1.0p-64f, SL, VT);
2825   SDValue One = DAG.getConstantFP(1.0, SL, VT);
2826   SDValue ResultScale =
2827       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, TwoExpNeg64, One);
2828 
2829   return DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScale, Flags);
2830 }
2831 
2832 SDValue AMDGPUTargetLowering::lowerFEXPUnsafe(SDValue X, const SDLoc &SL,
2833                                               SelectionDAG &DAG,
2834                                               SDNodeFlags Flags) const {
2835   EVT VT = X.getValueType();
2836   const SDValue Log2E = DAG.getConstantFP(numbers::log2e, SL, VT);
2837 
2838   if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) {
2839     // exp2(M_LOG2E_F * f);
2840     SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Log2E, Flags);
2841     return DAG.getNode(VT == MVT::f32 ? (unsigned)AMDGPUISD::EXP
2842                                       : (unsigned)ISD::FEXP2,
2843                        SL, VT, Mul, Flags);
2844   }
2845 
2846   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2847 
2848   SDValue Threshold = DAG.getConstantFP(-0x1.5d58a0p+6f, SL, VT);
2849   SDValue NeedsScaling = DAG.getSetCC(SL, SetCCVT, X, Threshold, ISD::SETOLT);
2850 
2851   SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+6f, SL, VT);
2852 
2853   SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags);
2854 
2855   SDValue AdjustedX =
2856       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X);
2857 
2858   SDValue ExpInput = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, Log2E, Flags);
2859 
2860   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, ExpInput, Flags);
2861 
2862   SDValue ResultScaleFactor = DAG.getConstantFP(0x1.969d48p-93f, SL, VT);
2863   SDValue AdjustedResult =
2864       DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScaleFactor, Flags);
2865 
2866   return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, Exp2,
2867                      Flags);
2868 }
2869 
2870 /// Emit approx-funcs appropriate lowering for exp10. inf/nan should still be
2871 /// handled correctly.
2872 SDValue AMDGPUTargetLowering::lowerFEXP10Unsafe(SDValue X, const SDLoc &SL,
2873                                                 SelectionDAG &DAG,
2874                                                 SDNodeFlags Flags) const {
2875   const EVT VT = X.getValueType();
2876   const unsigned Exp2Op = VT == MVT::f32 ? AMDGPUISD::EXP : ISD::FEXP2;
2877 
2878   if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) {
2879     // exp2(x * 0x1.a92000p+1f) * exp2(x * 0x1.4f0978p-11f);
2880     SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT);
2881     SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2882 
2883     SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, X, K0, Flags);
2884     SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags);
2885     SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, X, K1, Flags);
2886     SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags);
2887     return DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1);
2888   }
2889 
2890   // bool s = x < -0x1.2f7030p+5f;
2891   // x += s ? 0x1.0p+5f : 0.0f;
2892   // exp10 = exp2(x * 0x1.a92000p+1f) *
2893   //        exp2(x * 0x1.4f0978p-11f) *
2894   //        (s ? 0x1.9f623ep-107f : 1.0f);
2895 
2896   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2897 
2898   SDValue Threshold = DAG.getConstantFP(-0x1.2f7030p+5f, SL, VT);
2899   SDValue NeedsScaling = DAG.getSetCC(SL, SetCCVT, X, Threshold, ISD::SETOLT);
2900 
2901   SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+5f, SL, VT);
2902   SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags);
2903   SDValue AdjustedX =
2904       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X);
2905 
2906   SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT);
2907   SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2908 
2909   SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K0, Flags);
2910   SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags);
2911   SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K1, Flags);
2912   SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags);
2913 
2914   SDValue MulExps = DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1, Flags);
2915 
2916   SDValue ResultScaleFactor = DAG.getConstantFP(0x1.9f623ep-107f, SL, VT);
2917   SDValue AdjustedResult =
2918       DAG.getNode(ISD::FMUL, SL, VT, MulExps, ResultScaleFactor, Flags);
2919 
2920   return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, MulExps,
2921                      Flags);
2922 }
2923 
2924 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2925   EVT VT = Op.getValueType();
2926   SDLoc SL(Op);
2927   SDValue X = Op.getOperand(0);
2928   SDNodeFlags Flags = Op->getFlags();
2929   const bool IsExp10 = Op.getOpcode() == ISD::FEXP10;
2930 
2931   if (VT.getScalarType() == MVT::f16) {
2932     // v_exp_f16 (fmul x, log2e)
2933     if (allowApproxFunc(DAG, Flags)) // TODO: Does this really require fast?
2934       return lowerFEXPUnsafe(X, SL, DAG, Flags);
2935 
2936     if (VT.isVector())
2937       return SDValue();
2938 
2939     // exp(f16 x) ->
2940     //   fptrunc (v_exp_f32 (fmul (fpext x), log2e))
2941 
2942     // Nothing in half is a denormal when promoted to f32.
2943     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, X, Flags);
2944     SDValue Lowered = lowerFEXPUnsafe(Ext, SL, DAG, Flags);
2945     return DAG.getNode(ISD::FP_ROUND, SL, VT, Lowered,
2946                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2947   }
2948 
2949   assert(VT == MVT::f32);
2950 
2951   // TODO: Interpret allowApproxFunc as ignoring DAZ. This is currently copying
2952   // library behavior. Also, is known-not-daz source sufficient?
2953   if (allowApproxFunc(DAG, Flags)) {
2954     return IsExp10 ? lowerFEXP10Unsafe(X, SL, DAG, Flags)
2955                    : lowerFEXPUnsafe(X, SL, DAG, Flags);
2956   }
2957 
2958   //    Algorithm:
2959   //
2960   //    e^x = 2^(x/ln(2)) = 2^(x*(64/ln(2))/64)
2961   //
2962   //    x*(64/ln(2)) = n + f, |f| <= 0.5, n is integer
2963   //    n = 64*m + j,   0 <= j < 64
2964   //
2965   //    e^x = 2^((64*m + j + f)/64)
2966   //        = (2^m) * (2^(j/64)) * 2^(f/64)
2967   //        = (2^m) * (2^(j/64)) * e^(f*(ln(2)/64))
2968   //
2969   //    f = x*(64/ln(2)) - n
2970   //    r = f*(ln(2)/64) = x - n*(ln(2)/64)
2971   //
2972   //    e^x = (2^m) * (2^(j/64)) * e^r
2973   //
2974   //    (2^(j/64)) is precomputed
2975   //
2976   //    e^r = 1 + r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
2977   //    e^r = 1 + q
2978   //
2979   //    q = r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
2980   //
2981   //    e^x = (2^m) * ( (2^(j/64)) + q*(2^(j/64)) )
2982   SDNodeFlags FlagsNoContract = Flags;
2983   FlagsNoContract.setAllowContract(false);
2984 
2985   SDValue PH, PL;
2986   if (Subtarget->hasFastFMAF32()) {
2987     const float c_exp = numbers::log2ef;
2988     const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits
2989     const float c_exp10 = 0x1.a934f0p+1f;
2990     const float cc_exp10 = 0x1.2f346ep-24f;
2991 
2992     SDValue C = DAG.getConstantFP(IsExp10 ? c_exp10 : c_exp, SL, VT);
2993     SDValue CC = DAG.getConstantFP(IsExp10 ? cc_exp10 : cc_exp, SL, VT);
2994 
2995     PH = DAG.getNode(ISD::FMUL, SL, VT, X, C, Flags);
2996     SDValue NegPH = DAG.getNode(ISD::FNEG, SL, VT, PH, Flags);
2997     SDValue FMA0 = DAG.getNode(ISD::FMA, SL, VT, X, C, NegPH, Flags);
2998     PL = DAG.getNode(ISD::FMA, SL, VT, X, CC, FMA0, Flags);
2999   } else {
3000     const float ch_exp = 0x1.714000p+0f;
3001     const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits
3002 
3003     const float ch_exp10 = 0x1.a92000p+1f;
3004     const float cl_exp10 = 0x1.4f0978p-11f;
3005 
3006     SDValue CH = DAG.getConstantFP(IsExp10 ? ch_exp10 : ch_exp, SL, VT);
3007     SDValue CL = DAG.getConstantFP(IsExp10 ? cl_exp10 : cl_exp, SL, VT);
3008 
3009     SDValue XAsInt = DAG.getNode(ISD::BITCAST, SL, MVT::i32, X);
3010     SDValue MaskConst = DAG.getConstant(0xfffff000, SL, MVT::i32);
3011     SDValue XHAsInt = DAG.getNode(ISD::AND, SL, MVT::i32, XAsInt, MaskConst);
3012     SDValue XH = DAG.getNode(ISD::BITCAST, SL, VT, XHAsInt);
3013     SDValue XL = DAG.getNode(ISD::FSUB, SL, VT, X, XH, Flags);
3014 
3015     PH = DAG.getNode(ISD::FMUL, SL, VT, XH, CH, Flags);
3016 
3017     SDValue XLCL = DAG.getNode(ISD::FMUL, SL, VT, XL, CL, Flags);
3018     SDValue Mad0 = getMad(DAG, SL, VT, XL, CH, XLCL, Flags);
3019     PL = getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
3020   }
3021 
3022   SDValue E = DAG.getNode(ISD::FROUNDEVEN, SL, VT, PH, Flags);
3023 
3024   // It is unsafe to contract this fsub into the PH multiply.
3025   SDValue PHSubE = DAG.getNode(ISD::FSUB, SL, VT, PH, E, FlagsNoContract);
3026 
3027   SDValue A = DAG.getNode(ISD::FADD, SL, VT, PHSubE, PL, Flags);
3028   SDValue IntE = DAG.getNode(ISD::FP_TO_SINT, SL, MVT::i32, E);
3029   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, A, Flags);
3030 
3031   SDValue R = DAG.getNode(ISD::FLDEXP, SL, VT, Exp2, IntE, Flags);
3032 
3033   SDValue UnderflowCheckConst =
3034       DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3035 
3036   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
3037   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
3038   SDValue Underflow =
3039       DAG.getSetCC(SL, SetCCVT, X, UnderflowCheckConst, ISD::SETOLT);
3040 
3041   R = DAG.getNode(ISD::SELECT, SL, VT, Underflow, Zero, R);
3042   const auto &Options = getTargetMachine().Options;
3043 
3044   if (!Flags.hasNoInfs() && !Options.NoInfsFPMath) {
3045     SDValue OverflowCheckConst =
3046         DAG.getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
3047     SDValue Overflow =
3048         DAG.getSetCC(SL, SetCCVT, X, OverflowCheckConst, ISD::SETOGT);
3049     SDValue Inf =
3050         DAG.getConstantFP(APFloat::getInf(APFloat::IEEEsingle()), SL, VT);
3051     R = DAG.getNode(ISD::SELECT, SL, VT, Overflow, Inf, R);
3052   }
3053 
3054   return R;
3055 }
3056 
3057 static bool isCtlzOpc(unsigned Opc) {
3058   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
3059 }
3060 
3061 static bool isCttzOpc(unsigned Opc) {
3062   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
3063 }
3064 
3065 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
3066   SDLoc SL(Op);
3067   SDValue Src = Op.getOperand(0);
3068 
3069   assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode()));
3070   bool Ctlz = isCtlzOpc(Op.getOpcode());
3071   unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
3072 
3073   bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ||
3074                    Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF;
3075   bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3076 
3077   if (Src.getValueType() == MVT::i32 || Is64BitScalar) {
3078     // (ctlz hi:lo) -> (umin (ffbh src), 32)
3079     // (cttz hi:lo) -> (umin (ffbl src), 32)
3080     // (ctlz_zero_undef src) -> (ffbh src)
3081     // (cttz_zero_undef src) -> (ffbl src)
3082 
3083     //  64-bit scalar version produce 32-bit result
3084     // (ctlz hi:lo) -> (umin (S_FLBIT_I32_B64 src), 64)
3085     // (cttz hi:lo) -> (umin (S_FF1_I32_B64 src), 64)
3086     // (ctlz_zero_undef src) -> (S_FLBIT_I32_B64 src)
3087     // (cttz_zero_undef src) -> (S_FF1_I32_B64 src)
3088     SDValue NewOpr = DAG.getNode(NewOpc, SL, MVT::i32, Src);
3089     if (!ZeroUndef) {
3090       const SDValue ConstVal = DAG.getConstant(
3091           Op.getValueType().getScalarSizeInBits(), SL, MVT::i32);
3092       NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, ConstVal);
3093     }
3094     return DAG.getNode(ISD::ZERO_EXTEND, SL, Src.getValueType(), NewOpr);
3095   }
3096 
3097   SDValue Lo, Hi;
3098   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3099 
3100   SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo);
3101   SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi);
3102 
3103   // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
3104   // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
3105   // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
3106   // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
3107 
3108   unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT;
3109   const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
3110   if (Ctlz)
3111     OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
3112   else
3113     OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
3114 
3115   SDValue NewOpr;
3116   NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi);
3117   if (!ZeroUndef) {
3118     const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32);
3119     NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64);
3120   }
3121 
3122   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
3123 }
3124 
3125 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
3126                                                bool Signed) const {
3127   // The regular method converting a 64-bit integer to float roughly consists of
3128   // 2 steps: normalization and rounding. In fact, after normalization, the
3129   // conversion from a 64-bit integer to a float is essentially the same as the
3130   // one from a 32-bit integer. The only difference is that it has more
3131   // trailing bits to be rounded. To leverage the native 32-bit conversion, a
3132   // 64-bit integer could be preprocessed and fit into a 32-bit integer then
3133   // converted into the correct float number. The basic steps for the unsigned
3134   // conversion are illustrated in the following pseudo code:
3135   //
3136   // f32 uitofp(i64 u) {
3137   //   i32 hi, lo = split(u);
3138   //   // Only count the leading zeros in hi as we have native support of the
3139   //   // conversion from i32 to f32. If hi is all 0s, the conversion is
3140   //   // reduced to a 32-bit one automatically.
3141   //   i32 shamt = clz(hi); // Return 32 if hi is all 0s.
3142   //   u <<= shamt;
3143   //   hi, lo = split(u);
3144   //   hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
3145   //   // convert it as a 32-bit integer and scale the result back.
3146   //   return uitofp(hi) * 2^(32 - shamt);
3147   // }
3148   //
3149   // The signed one follows the same principle but uses 'ffbh_i32' to count its
3150   // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
3151   // converted instead followed by negation based its sign bit.
3152 
3153   SDLoc SL(Op);
3154   SDValue Src = Op.getOperand(0);
3155 
3156   SDValue Lo, Hi;
3157   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3158   SDValue Sign;
3159   SDValue ShAmt;
3160   if (Signed && Subtarget->isGCN()) {
3161     // We also need to consider the sign bit in Lo if Hi has just sign bits,
3162     // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
3163     // account. That is, the maximal shift is
3164     // - 32 if Lo and Hi have opposite signs;
3165     // - 33 if Lo and Hi have the same sign.
3166     //
3167     // Or, MaxShAmt = 33 + OppositeSign, where
3168     //
3169     // OppositeSign is defined as ((Lo ^ Hi) >> 31), which is
3170     // - -1 if Lo and Hi have opposite signs; and
3171     // -  0 otherwise.
3172     //
3173     // All in all, ShAmt is calculated as
3174     //
3175     //  umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
3176     //
3177     // or
3178     //
3179     //  umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
3180     //
3181     // to reduce the critical path.
3182     SDValue OppositeSign = DAG.getNode(
3183         ISD::SRA, SL, MVT::i32, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
3184         DAG.getConstant(31, SL, MVT::i32));
3185     SDValue MaxShAmt =
3186         DAG.getNode(ISD::ADD, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
3187                     OppositeSign);
3188     // Count the leading sign bits.
3189     ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
3190     // Different from unsigned conversion, the shift should be one bit less to
3191     // preserve the sign bit.
3192     ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
3193                         DAG.getConstant(1, SL, MVT::i32));
3194     ShAmt = DAG.getNode(ISD::UMIN, SL, MVT::i32, ShAmt, MaxShAmt);
3195   } else {
3196     if (Signed) {
3197       // Without 'ffbh_i32', only leading zeros could be counted. Take the
3198       // absolute value first.
3199       Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
3200                          DAG.getConstant(63, SL, MVT::i64));
3201       SDValue Abs =
3202           DAG.getNode(ISD::XOR, SL, MVT::i64,
3203                       DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
3204       std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
3205     }
3206     // Count the leading zeros.
3207     ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
3208     // The shift amount for signed integers is [0, 32].
3209   }
3210   // Normalize the given 64-bit integer.
3211   SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
3212   // Split it again.
3213   std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
3214   // Calculate the adjust bit for rounding.
3215   // (lo != 0) ? 1 : 0 => (lo >= 1) ? 1 : 0 => umin(1, lo)
3216   SDValue Adjust = DAG.getNode(ISD::UMIN, SL, MVT::i32,
3217                                DAG.getConstant(1, SL, MVT::i32), Lo);
3218   // Get the 32-bit normalized integer.
3219   Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
3220   // Convert the normalized 32-bit integer into f32.
3221   unsigned Opc =
3222       (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
3223   SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
3224 
3225   // Finally, need to scale back the converted floating number as the original
3226   // 64-bit integer is converted as a 32-bit one.
3227   ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
3228                       ShAmt);
3229   // On GCN, use LDEXP directly.
3230   if (Subtarget->isGCN())
3231     return DAG.getNode(ISD::FLDEXP, SL, MVT::f32, FVal, ShAmt);
3232 
3233   // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
3234   // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
3235   // exponent is enough to avoid overflowing into the sign bit.
3236   SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
3237                             DAG.getConstant(23, SL, MVT::i32));
3238   SDValue IVal =
3239       DAG.getNode(ISD::ADD, SL, MVT::i32,
3240                   DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
3241   if (Signed) {
3242     // Set the sign bit.
3243     Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
3244                        DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
3245                        DAG.getConstant(31, SL, MVT::i32));
3246     IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
3247   }
3248   return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
3249 }
3250 
3251 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
3252                                                bool Signed) const {
3253   SDLoc SL(Op);
3254   SDValue Src = Op.getOperand(0);
3255 
3256   SDValue Lo, Hi;
3257   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3258 
3259   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
3260                               SL, MVT::f64, Hi);
3261 
3262   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
3263 
3264   SDValue LdExp = DAG.getNode(ISD::FLDEXP, SL, MVT::f64, CvtHi,
3265                               DAG.getConstant(32, SL, MVT::i32));
3266   // TODO: Should this propagate fast-math-flags?
3267   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
3268 }
3269 
3270 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
3271                                                SelectionDAG &DAG) const {
3272   // TODO: Factor out code common with LowerSINT_TO_FP.
3273   EVT DestVT = Op.getValueType();
3274   SDValue Src = Op.getOperand(0);
3275   EVT SrcVT = Src.getValueType();
3276 
3277   if (SrcVT == MVT::i16) {
3278     if (DestVT == MVT::f16)
3279       return Op;
3280     SDLoc DL(Op);
3281 
3282     // Promote src to i32
3283     SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
3284     return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
3285   }
3286 
3287   if (DestVT == MVT::bf16) {
3288     SDLoc SL(Op);
3289     SDValue ToF32 = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f32, Src);
3290     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SL, /*isTarget=*/true);
3291     return DAG.getNode(ISD::FP_ROUND, SL, MVT::bf16, ToF32, FPRoundFlag);
3292   }
3293 
3294   if (SrcVT != MVT::i64)
3295     return Op;
3296 
3297   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3298     SDLoc DL(Op);
3299 
3300     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
3301     SDValue FPRoundFlag =
3302         DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
3303     SDValue FPRound =
3304         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
3305 
3306     return FPRound;
3307   }
3308 
3309   if (DestVT == MVT::f32)
3310     return LowerINT_TO_FP32(Op, DAG, false);
3311 
3312   assert(DestVT == MVT::f64);
3313   return LowerINT_TO_FP64(Op, DAG, false);
3314 }
3315 
3316 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
3317                                               SelectionDAG &DAG) const {
3318   EVT DestVT = Op.getValueType();
3319 
3320   SDValue Src = Op.getOperand(0);
3321   EVT SrcVT = Src.getValueType();
3322 
3323   if (SrcVT == MVT::i16) {
3324     if (DestVT == MVT::f16)
3325       return Op;
3326 
3327     SDLoc DL(Op);
3328     // Promote src to i32
3329     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
3330     return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
3331   }
3332 
3333   if (DestVT == MVT::bf16) {
3334     SDLoc SL(Op);
3335     SDValue ToF32 = DAG.getNode(ISD::SINT_TO_FP, SL, MVT::f32, Src);
3336     SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SL, /*isTarget=*/true);
3337     return DAG.getNode(ISD::FP_ROUND, SL, MVT::bf16, ToF32, FPRoundFlag);
3338   }
3339 
3340   if (SrcVT != MVT::i64)
3341     return Op;
3342 
3343   // TODO: Factor out code common with LowerUINT_TO_FP.
3344 
3345   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3346     SDLoc DL(Op);
3347     SDValue Src = Op.getOperand(0);
3348 
3349     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
3350     SDValue FPRoundFlag =
3351         DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
3352     SDValue FPRound =
3353         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
3354 
3355     return FPRound;
3356   }
3357 
3358   if (DestVT == MVT::f32)
3359     return LowerINT_TO_FP32(Op, DAG, true);
3360 
3361   assert(DestVT == MVT::f64);
3362   return LowerINT_TO_FP64(Op, DAG, true);
3363 }
3364 
3365 SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
3366                                                bool Signed) const {
3367   SDLoc SL(Op);
3368 
3369   SDValue Src = Op.getOperand(0);
3370   EVT SrcVT = Src.getValueType();
3371 
3372   assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
3373 
3374   // The basic idea of converting a floating point number into a pair of 32-bit
3375   // integers is illustrated as follows:
3376   //
3377   //     tf := trunc(val);
3378   //    hif := floor(tf * 2^-32);
3379   //    lof := tf - hif * 2^32; // lof is always positive due to floor.
3380   //     hi := fptoi(hif);
3381   //     lo := fptoi(lof);
3382   //
3383   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
3384   SDValue Sign;
3385   if (Signed && SrcVT == MVT::f32) {
3386     // However, a 32-bit floating point number has only 23 bits mantissa and
3387     // it's not enough to hold all the significant bits of `lof` if val is
3388     // negative. To avoid the loss of precision, We need to take the absolute
3389     // value after truncating and flip the result back based on the original
3390     // signedness.
3391     Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
3392                        DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
3393                        DAG.getConstant(31, SL, MVT::i32));
3394     Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
3395   }
3396 
3397   SDValue K0, K1;
3398   if (SrcVT == MVT::f64) {
3399     K0 = DAG.getConstantFP(
3400         llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL,
3401         SrcVT);
3402     K1 = DAG.getConstantFP(
3403         llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL,
3404         SrcVT);
3405   } else {
3406     K0 = DAG.getConstantFP(
3407         llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT);
3408     K1 = DAG.getConstantFP(
3409         llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT);
3410   }
3411   // TODO: Should this propagate fast-math-flags?
3412   SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
3413 
3414   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
3415 
3416   SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
3417 
3418   SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
3419                                                          : ISD::FP_TO_UINT,
3420                            SL, MVT::i32, FloorMul);
3421   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
3422 
3423   SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
3424                                DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
3425 
3426   if (Signed && SrcVT == MVT::f32) {
3427     assert(Sign);
3428     // Flip the result based on the signedness, which is either all 0s or 1s.
3429     Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
3430                        DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
3431     // r := xor(r, sign) - sign;
3432     Result =
3433         DAG.getNode(ISD::SUB, SL, MVT::i64,
3434                     DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
3435   }
3436 
3437   return Result;
3438 }
3439 
3440 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
3441   SDLoc DL(Op);
3442   SDValue N0 = Op.getOperand(0);
3443 
3444   // Convert to target node to get known bits
3445   if (N0.getValueType() == MVT::f32)
3446     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
3447 
3448   if (getTargetMachine().Options.UnsafeFPMath) {
3449     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
3450     return SDValue();
3451   }
3452 
3453   assert(N0.getSimpleValueType() == MVT::f64);
3454 
3455   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
3456   const unsigned ExpMask = 0x7ff;
3457   const unsigned ExpBiasf64 = 1023;
3458   const unsigned ExpBiasf16 = 15;
3459   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
3460   SDValue One = DAG.getConstant(1, DL, MVT::i32);
3461   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
3462   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
3463                            DAG.getConstant(32, DL, MVT::i64));
3464   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
3465   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
3466   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3467                           DAG.getConstant(20, DL, MVT::i64));
3468   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
3469                   DAG.getConstant(ExpMask, DL, MVT::i32));
3470   // Subtract the fp64 exponent bias (1023) to get the real exponent and
3471   // add the f16 bias (15) to get the biased exponent for the f16 format.
3472   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
3473                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
3474 
3475   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3476                           DAG.getConstant(8, DL, MVT::i32));
3477   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
3478                   DAG.getConstant(0xffe, DL, MVT::i32));
3479 
3480   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
3481                                   DAG.getConstant(0x1ff, DL, MVT::i32));
3482   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
3483 
3484   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
3485   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
3486 
3487   // (M != 0 ? 0x0200 : 0) | 0x7c00;
3488   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
3489       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
3490                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
3491 
3492   // N = M | (E << 12);
3493   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
3494       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
3495                   DAG.getConstant(12, DL, MVT::i32)));
3496 
3497   // B = clamp(1-E, 0, 13);
3498   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
3499                                   One, E);
3500   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
3501   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
3502                   DAG.getConstant(13, DL, MVT::i32));
3503 
3504   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
3505                                    DAG.getConstant(0x1000, DL, MVT::i32));
3506 
3507   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
3508   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
3509   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
3510   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
3511 
3512   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
3513   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
3514                               DAG.getConstant(0x7, DL, MVT::i32));
3515   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
3516                   DAG.getConstant(2, DL, MVT::i32));
3517   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
3518                                One, Zero, ISD::SETEQ);
3519   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
3520                                One, Zero, ISD::SETGT);
3521   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
3522   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
3523 
3524   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
3525                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
3526   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
3527                       I, V, ISD::SETEQ);
3528 
3529   // Extract the sign bit.
3530   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3531                             DAG.getConstant(16, DL, MVT::i32));
3532   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
3533                      DAG.getConstant(0x8000, DL, MVT::i32));
3534 
3535   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
3536   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
3537 }
3538 
3539 SDValue AMDGPUTargetLowering::LowerFP_TO_INT(const SDValue Op,
3540                                              SelectionDAG &DAG) const {
3541   SDValue Src = Op.getOperand(0);
3542   unsigned OpOpcode = Op.getOpcode();
3543   EVT SrcVT = Src.getValueType();
3544   EVT DestVT = Op.getValueType();
3545 
3546   // Will be selected natively
3547   if (SrcVT == MVT::f16 && DestVT == MVT::i16)
3548     return Op;
3549 
3550   if (SrcVT == MVT::bf16) {
3551     SDLoc DL(Op);
3552     SDValue PromotedSrc = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
3553     return DAG.getNode(Op.getOpcode(), DL, DestVT, PromotedSrc);
3554   }
3555 
3556   // Promote i16 to i32
3557   if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3558     SDLoc DL(Op);
3559 
3560     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3561     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToInt32);
3562   }
3563 
3564   if (DestVT != MVT::i64)
3565     return Op;
3566 
3567   if (SrcVT == MVT::f16 ||
3568       (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
3569     SDLoc DL(Op);
3570 
3571     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3572     unsigned Ext =
3573         OpOpcode == ISD::FP_TO_SINT ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3574     return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
3575   }
3576 
3577   if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
3578     return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
3579 
3580   return SDValue();
3581 }
3582 
3583 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
3584                                                      SelectionDAG &DAG) const {
3585   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3586   MVT VT = Op.getSimpleValueType();
3587   MVT ScalarVT = VT.getScalarType();
3588 
3589   assert(VT.isVector());
3590 
3591   SDValue Src = Op.getOperand(0);
3592   SDLoc DL(Op);
3593 
3594   // TODO: Don't scalarize on Evergreen?
3595   unsigned NElts = VT.getVectorNumElements();
3596   SmallVector<SDValue, 8> Args;
3597   DAG.ExtractVectorElements(Src, Args, 0, NElts);
3598 
3599   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
3600   for (unsigned I = 0; I < NElts; ++I)
3601     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
3602 
3603   return DAG.getBuildVector(VT, DL, Args);
3604 }
3605 
3606 //===----------------------------------------------------------------------===//
3607 // Custom DAG optimizations
3608 //===----------------------------------------------------------------------===//
3609 
3610 static bool isU24(SDValue Op, SelectionDAG &DAG) {
3611   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
3612 }
3613 
3614 static bool isI24(SDValue Op, SelectionDAG &DAG) {
3615   EVT VT = Op.getValueType();
3616   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
3617                                      // as unsigned 24-bit values.
3618          AMDGPUTargetLowering::numBitsSigned(Op, DAG) <= 24;
3619 }
3620 
3621 static SDValue simplifyMul24(SDNode *Node24,
3622                              TargetLowering::DAGCombinerInfo &DCI) {
3623   SelectionDAG &DAG = DCI.DAG;
3624   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3625   bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
3626 
3627   SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
3628   SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
3629   unsigned NewOpcode = Node24->getOpcode();
3630   if (IsIntrin) {
3631     unsigned IID = Node24->getConstantOperandVal(0);
3632     switch (IID) {
3633     case Intrinsic::amdgcn_mul_i24:
3634       NewOpcode = AMDGPUISD::MUL_I24;
3635       break;
3636     case Intrinsic::amdgcn_mul_u24:
3637       NewOpcode = AMDGPUISD::MUL_U24;
3638       break;
3639     case Intrinsic::amdgcn_mulhi_i24:
3640       NewOpcode = AMDGPUISD::MULHI_I24;
3641       break;
3642     case Intrinsic::amdgcn_mulhi_u24:
3643       NewOpcode = AMDGPUISD::MULHI_U24;
3644       break;
3645     default:
3646       llvm_unreachable("Expected 24-bit mul intrinsic");
3647     }
3648   }
3649 
3650   APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
3651 
3652   // First try to simplify using SimplifyMultipleUseDemandedBits which allows
3653   // the operands to have other uses, but will only perform simplifications that
3654   // involve bypassing some nodes for this user.
3655   SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
3656   SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
3657   if (DemandedLHS || DemandedRHS)
3658     return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
3659                        DemandedLHS ? DemandedLHS : LHS,
3660                        DemandedRHS ? DemandedRHS : RHS);
3661 
3662   // Now try SimplifyDemandedBits which can simplify the nodes used by our
3663   // operands if this node is the only user.
3664   if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
3665     return SDValue(Node24, 0);
3666   if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
3667     return SDValue(Node24, 0);
3668 
3669   return SDValue();
3670 }
3671 
3672 template <typename IntTy>
3673 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
3674                                uint32_t Width, const SDLoc &DL) {
3675   if (Width + Offset < 32) {
3676     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
3677     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
3678     return DAG.getConstant(Result, DL, MVT::i32);
3679   }
3680 
3681   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
3682 }
3683 
3684 static bool hasVolatileUser(SDNode *Val) {
3685   for (SDNode *U : Val->uses()) {
3686     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
3687       if (M->isVolatile())
3688         return true;
3689     }
3690   }
3691 
3692   return false;
3693 }
3694 
3695 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
3696   // i32 vectors are the canonical memory type.
3697   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
3698     return false;
3699 
3700   if (!VT.isByteSized())
3701     return false;
3702 
3703   unsigned Size = VT.getStoreSize();
3704 
3705   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
3706     return false;
3707 
3708   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
3709     return false;
3710 
3711   return true;
3712 }
3713 
3714 // Replace load of an illegal type with a store of a bitcast to a friendlier
3715 // type.
3716 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
3717                                                  DAGCombinerInfo &DCI) const {
3718   if (!DCI.isBeforeLegalize())
3719     return SDValue();
3720 
3721   LoadSDNode *LN = cast<LoadSDNode>(N);
3722   if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
3723     return SDValue();
3724 
3725   SDLoc SL(N);
3726   SelectionDAG &DAG = DCI.DAG;
3727   EVT VT = LN->getMemoryVT();
3728 
3729   unsigned Size = VT.getStoreSize();
3730   Align Alignment = LN->getAlign();
3731   if (Alignment < Size && isTypeLegal(VT)) {
3732     unsigned IsFast;
3733     unsigned AS = LN->getAddressSpace();
3734 
3735     // Expand unaligned loads earlier than legalization. Due to visitation order
3736     // problems during legalization, the emitted instructions to pack and unpack
3737     // the bytes again are not eliminated in the case of an unaligned copy.
3738     if (!allowsMisalignedMemoryAccesses(
3739             VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
3740       if (VT.isVector())
3741         return SplitVectorLoad(SDValue(LN, 0), DAG);
3742 
3743       SDValue Ops[2];
3744       std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
3745 
3746       return DAG.getMergeValues(Ops, SDLoc(N));
3747     }
3748 
3749     if (!IsFast)
3750       return SDValue();
3751   }
3752 
3753   if (!shouldCombineMemoryType(VT))
3754     return SDValue();
3755 
3756   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3757 
3758   SDValue NewLoad
3759     = DAG.getLoad(NewVT, SL, LN->getChain(),
3760                   LN->getBasePtr(), LN->getMemOperand());
3761 
3762   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
3763   DCI.CombineTo(N, BC, NewLoad.getValue(1));
3764   return SDValue(N, 0);
3765 }
3766 
3767 // Replace store of an illegal type with a store of a bitcast to a friendlier
3768 // type.
3769 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
3770                                                   DAGCombinerInfo &DCI) const {
3771   if (!DCI.isBeforeLegalize())
3772     return SDValue();
3773 
3774   StoreSDNode *SN = cast<StoreSDNode>(N);
3775   if (!SN->isSimple() || !ISD::isNormalStore(SN))
3776     return SDValue();
3777 
3778   EVT VT = SN->getMemoryVT();
3779   unsigned Size = VT.getStoreSize();
3780 
3781   SDLoc SL(N);
3782   SelectionDAG &DAG = DCI.DAG;
3783   Align Alignment = SN->getAlign();
3784   if (Alignment < Size && isTypeLegal(VT)) {
3785     unsigned IsFast;
3786     unsigned AS = SN->getAddressSpace();
3787 
3788     // Expand unaligned stores earlier than legalization. Due to visitation
3789     // order problems during legalization, the emitted instructions to pack and
3790     // unpack the bytes again are not eliminated in the case of an unaligned
3791     // copy.
3792     if (!allowsMisalignedMemoryAccesses(
3793             VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
3794       if (VT.isVector())
3795         return SplitVectorStore(SDValue(SN, 0), DAG);
3796 
3797       return expandUnalignedStore(SN, DAG);
3798     }
3799 
3800     if (!IsFast)
3801       return SDValue();
3802   }
3803 
3804   if (!shouldCombineMemoryType(VT))
3805     return SDValue();
3806 
3807   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3808   SDValue Val = SN->getValue();
3809 
3810   //DCI.AddToWorklist(Val.getNode());
3811 
3812   bool OtherUses = !Val.hasOneUse();
3813   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
3814   if (OtherUses) {
3815     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
3816     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
3817   }
3818 
3819   return DAG.getStore(SN->getChain(), SL, CastVal,
3820                       SN->getBasePtr(), SN->getMemOperand());
3821 }
3822 
3823 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
3824 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
3825 // issues.
3826 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
3827                                                         DAGCombinerInfo &DCI) const {
3828   SelectionDAG &DAG = DCI.DAG;
3829   SDValue N0 = N->getOperand(0);
3830 
3831   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3832   //     (vt2 (truncate (assertzext vt0:x, vt1)))
3833   if (N0.getOpcode() == ISD::TRUNCATE) {
3834     SDValue N1 = N->getOperand(1);
3835     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3836     SDLoc SL(N);
3837 
3838     SDValue Src = N0.getOperand(0);
3839     EVT SrcVT = Src.getValueType();
3840     if (SrcVT.bitsGE(ExtVT)) {
3841       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3842       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3843     }
3844   }
3845 
3846   return SDValue();
3847 }
3848 
3849 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
3850   SDNode *N, DAGCombinerInfo &DCI) const {
3851   unsigned IID = N->getConstantOperandVal(0);
3852   switch (IID) {
3853   case Intrinsic::amdgcn_mul_i24:
3854   case Intrinsic::amdgcn_mul_u24:
3855   case Intrinsic::amdgcn_mulhi_i24:
3856   case Intrinsic::amdgcn_mulhi_u24:
3857     return simplifyMul24(N, DCI);
3858   case Intrinsic::amdgcn_fract:
3859   case Intrinsic::amdgcn_rsq:
3860   case Intrinsic::amdgcn_rcp_legacy:
3861   case Intrinsic::amdgcn_rsq_legacy:
3862   case Intrinsic::amdgcn_rsq_clamp: {
3863     // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3864     SDValue Src = N->getOperand(1);
3865     return Src.isUndef() ? Src : SDValue();
3866   }
3867   case Intrinsic::amdgcn_frexp_exp: {
3868     // frexp_exp (fneg x) -> frexp_exp x
3869     // frexp_exp (fabs x) -> frexp_exp x
3870     // frexp_exp (fneg (fabs x)) -> frexp_exp x
3871     SDValue Src = N->getOperand(1);
3872     SDValue PeekSign = peekFPSignOps(Src);
3873     if (PeekSign == Src)
3874       return SDValue();
3875     return SDValue(DCI.DAG.UpdateNodeOperands(N, N->getOperand(0), PeekSign),
3876                    0);
3877   }
3878   default:
3879     return SDValue();
3880   }
3881 }
3882 
3883 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3884 /// binary operation \p Opc to it with the corresponding constant operands.
3885 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3886   DAGCombinerInfo &DCI, const SDLoc &SL,
3887   unsigned Opc, SDValue LHS,
3888   uint32_t ValLo, uint32_t ValHi) const {
3889   SelectionDAG &DAG = DCI.DAG;
3890   SDValue Lo, Hi;
3891   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3892 
3893   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3894   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3895 
3896   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3897   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3898 
3899   // Re-visit the ands. It's possible we eliminated one of them and it could
3900   // simplify the vector.
3901   DCI.AddToWorklist(Lo.getNode());
3902   DCI.AddToWorklist(Hi.getNode());
3903 
3904   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3905   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3906 }
3907 
3908 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3909                                                 DAGCombinerInfo &DCI) const {
3910   EVT VT = N->getValueType(0);
3911 
3912   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3913   if (!RHS)
3914     return SDValue();
3915 
3916   SDValue LHS = N->getOperand(0);
3917   unsigned RHSVal = RHS->getZExtValue();
3918   if (!RHSVal)
3919     return LHS;
3920 
3921   SDLoc SL(N);
3922   SelectionDAG &DAG = DCI.DAG;
3923 
3924   switch (LHS->getOpcode()) {
3925   default:
3926     break;
3927   case ISD::ZERO_EXTEND:
3928   case ISD::SIGN_EXTEND:
3929   case ISD::ANY_EXTEND: {
3930     SDValue X = LHS->getOperand(0);
3931 
3932     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3933         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3934       // Prefer build_vector as the canonical form if packed types are legal.
3935       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3936       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3937        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3938       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3939     }
3940 
3941     // shl (ext x) => zext (shl x), if shift does not overflow int
3942     if (VT != MVT::i64)
3943       break;
3944     KnownBits Known = DAG.computeKnownBits(X);
3945     unsigned LZ = Known.countMinLeadingZeros();
3946     if (LZ < RHSVal)
3947       break;
3948     EVT XVT = X.getValueType();
3949     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3950     return DAG.getZExtOrTrunc(Shl, SL, VT);
3951   }
3952   }
3953 
3954   if (VT != MVT::i64)
3955     return SDValue();
3956 
3957   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3958 
3959   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3960   // common case, splitting this into a move and a 32-bit shift is faster and
3961   // the same code size.
3962   if (RHSVal < 32)
3963     return SDValue();
3964 
3965   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3966 
3967   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3968   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3969 
3970   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3971 
3972   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3973   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3974 }
3975 
3976 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3977                                                 DAGCombinerInfo &DCI) const {
3978   if (N->getValueType(0) != MVT::i64)
3979     return SDValue();
3980 
3981   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3982   if (!RHS)
3983     return SDValue();
3984 
3985   SelectionDAG &DAG = DCI.DAG;
3986   SDLoc SL(N);
3987   unsigned RHSVal = RHS->getZExtValue();
3988 
3989   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3990   if (RHSVal == 32) {
3991     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3992     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3993                                    DAG.getConstant(31, SL, MVT::i32));
3994 
3995     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3996     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3997   }
3998 
3999   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
4000   if (RHSVal == 63) {
4001     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
4002     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
4003                                    DAG.getConstant(31, SL, MVT::i32));
4004     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
4005     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
4006   }
4007 
4008   return SDValue();
4009 }
4010 
4011 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
4012                                                 DAGCombinerInfo &DCI) const {
4013   auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
4014   if (!RHS)
4015     return SDValue();
4016 
4017   EVT VT = N->getValueType(0);
4018   SDValue LHS = N->getOperand(0);
4019   unsigned ShiftAmt = RHS->getZExtValue();
4020   SelectionDAG &DAG = DCI.DAG;
4021   SDLoc SL(N);
4022 
4023   // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
4024   // this improves the ability to match BFE patterns in isel.
4025   if (LHS.getOpcode() == ISD::AND) {
4026     if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
4027       unsigned MaskIdx, MaskLen;
4028       if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4029           MaskIdx == ShiftAmt) {
4030         return DAG.getNode(
4031             ISD::AND, SL, VT,
4032             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
4033             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
4034       }
4035     }
4036   }
4037 
4038   if (VT != MVT::i64)
4039     return SDValue();
4040 
4041   if (ShiftAmt < 32)
4042     return SDValue();
4043 
4044   // srl i64:x, C for C >= 32
4045   // =>
4046   //   build_pair (srl hi_32(x), C - 32), 0
4047   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
4048 
4049   SDValue Hi = getHiHalf64(LHS, DAG);
4050 
4051   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
4052   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
4053 
4054   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
4055 
4056   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
4057 }
4058 
4059 SDValue AMDGPUTargetLowering::performTruncateCombine(
4060   SDNode *N, DAGCombinerInfo &DCI) const {
4061   SDLoc SL(N);
4062   SelectionDAG &DAG = DCI.DAG;
4063   EVT VT = N->getValueType(0);
4064   SDValue Src = N->getOperand(0);
4065 
4066   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
4067   if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
4068     SDValue Vec = Src.getOperand(0);
4069     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
4070       SDValue Elt0 = Vec.getOperand(0);
4071       EVT EltVT = Elt0.getValueType();
4072       if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
4073         if (EltVT.isFloatingPoint()) {
4074           Elt0 = DAG.getNode(ISD::BITCAST, SL,
4075                              EltVT.changeTypeToInteger(), Elt0);
4076         }
4077 
4078         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
4079       }
4080     }
4081   }
4082 
4083   // Equivalent of above for accessing the high element of a vector as an
4084   // integer operation.
4085   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
4086   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
4087     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
4088       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
4089         SDValue BV = stripBitcast(Src.getOperand(0));
4090         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
4091             BV.getValueType().getVectorNumElements() == 2) {
4092           SDValue SrcElt = BV.getOperand(1);
4093           EVT SrcEltVT = SrcElt.getValueType();
4094           if (SrcEltVT.isFloatingPoint()) {
4095             SrcElt = DAG.getNode(ISD::BITCAST, SL,
4096                                  SrcEltVT.changeTypeToInteger(), SrcElt);
4097           }
4098 
4099           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
4100         }
4101       }
4102     }
4103   }
4104 
4105   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
4106   //
4107   // i16 (trunc (srl i64:x, K)), K <= 16 ->
4108   //     i16 (trunc (srl (i32 (trunc x), K)))
4109   if (VT.getScalarSizeInBits() < 32) {
4110     EVT SrcVT = Src.getValueType();
4111     if (SrcVT.getScalarSizeInBits() > 32 &&
4112         (Src.getOpcode() == ISD::SRL ||
4113          Src.getOpcode() == ISD::SRA ||
4114          Src.getOpcode() == ISD::SHL)) {
4115       SDValue Amt = Src.getOperand(1);
4116       KnownBits Known = DAG.computeKnownBits(Amt);
4117 
4118       // - For left shifts, do the transform as long as the shift
4119       //   amount is still legal for i32, so when ShiftAmt < 32 (<= 31)
4120       // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid
4121       //   losing information stored in the high bits when truncating.
4122       const unsigned MaxCstSize =
4123           (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits());
4124       if (Known.getMaxValue().ule(MaxCstSize)) {
4125         EVT MidVT = VT.isVector() ?
4126           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4127                            VT.getVectorNumElements()) : MVT::i32;
4128 
4129         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
4130         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
4131                                     Src.getOperand(0));
4132         DCI.AddToWorklist(Trunc.getNode());
4133 
4134         if (Amt.getValueType() != NewShiftVT) {
4135           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
4136           DCI.AddToWorklist(Amt.getNode());
4137         }
4138 
4139         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
4140                                           Trunc, Amt);
4141         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
4142       }
4143     }
4144   }
4145 
4146   return SDValue();
4147 }
4148 
4149 // We need to specifically handle i64 mul here to avoid unnecessary conversion
4150 // instructions. If we only match on the legalized i64 mul expansion,
4151 // SimplifyDemandedBits will be unable to remove them because there will be
4152 // multiple uses due to the separate mul + mulh[su].
4153 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
4154                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
4155   if (Size <= 32) {
4156     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4157     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
4158   }
4159 
4160   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4161   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
4162 
4163   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
4164   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
4165 
4166   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
4167 }
4168 
4169 /// If \p V is an add of a constant 1, returns the other operand. Otherwise
4170 /// return SDValue().
4171 static SDValue getAddOneOp(const SDNode *V) {
4172   if (V->getOpcode() != ISD::ADD)
4173     return SDValue();
4174 
4175   return isOneConstant(V->getOperand(1)) ? V->getOperand(0) : SDValue();
4176 }
4177 
4178 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
4179                                                 DAGCombinerInfo &DCI) const {
4180   EVT VT = N->getValueType(0);
4181 
4182   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4183   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4184   // unnecessarily). isDivergent() is used as an approximation of whether the
4185   // value is in an SGPR.
4186   if (!N->isDivergent())
4187     return SDValue();
4188 
4189   unsigned Size = VT.getSizeInBits();
4190   if (VT.isVector() || Size > 64)
4191     return SDValue();
4192 
4193   SelectionDAG &DAG = DCI.DAG;
4194   SDLoc DL(N);
4195 
4196   SDValue N0 = N->getOperand(0);
4197   SDValue N1 = N->getOperand(1);
4198 
4199   // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad
4200   // matching.
4201 
4202   // mul x, (add y, 1) -> add (mul x, y), x
4203   auto IsFoldableAdd = [](SDValue V) -> SDValue {
4204     SDValue AddOp = getAddOneOp(V.getNode());
4205     if (!AddOp)
4206       return SDValue();
4207 
4208     if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool {
4209           return U->getOpcode() == ISD::MUL;
4210         }))
4211       return AddOp;
4212 
4213     return SDValue();
4214   };
4215 
4216   // FIXME: The selection pattern is not properly checking for commuted
4217   // operands, so we have to place the mul in the LHS
4218   if (SDValue MulOper = IsFoldableAdd(N0)) {
4219     SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper);
4220     return DAG.getNode(ISD::ADD, DL, VT, MulVal, N1);
4221   }
4222 
4223   if (SDValue MulOper = IsFoldableAdd(N1)) {
4224     SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper);
4225     return DAG.getNode(ISD::ADD, DL, VT, MulVal, N0);
4226   }
4227 
4228   // Skip if already mul24.
4229   if (N->getOpcode() != ISD::MUL)
4230     return SDValue();
4231 
4232   // There are i16 integer mul/mad.
4233   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
4234     return SDValue();
4235 
4236   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
4237   // in the source into any_extends if the result of the mul is truncated. Since
4238   // we can assume the high bits are whatever we want, use the underlying value
4239   // to avoid the unknown high bits from interfering.
4240   if (N0.getOpcode() == ISD::ANY_EXTEND)
4241     N0 = N0.getOperand(0);
4242 
4243   if (N1.getOpcode() == ISD::ANY_EXTEND)
4244     N1 = N1.getOperand(0);
4245 
4246   SDValue Mul;
4247 
4248   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4249     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4250     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4251     Mul = getMul24(DAG, DL, N0, N1, Size, false);
4252   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4253     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4254     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4255     Mul = getMul24(DAG, DL, N0, N1, Size, true);
4256   } else {
4257     return SDValue();
4258   }
4259 
4260   // We need to use sext even for MUL_U24, because MUL_U24 is used
4261   // for signed multiply of 8 and 16-bit types.
4262   return DAG.getSExtOrTrunc(Mul, DL, VT);
4263 }
4264 
4265 SDValue
4266 AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
4267                                             DAGCombinerInfo &DCI) const {
4268   if (N->getValueType(0) != MVT::i32)
4269     return SDValue();
4270 
4271   SelectionDAG &DAG = DCI.DAG;
4272   SDLoc DL(N);
4273 
4274   SDValue N0 = N->getOperand(0);
4275   SDValue N1 = N->getOperand(1);
4276 
4277   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
4278   // in the source into any_extends if the result of the mul is truncated. Since
4279   // we can assume the high bits are whatever we want, use the underlying value
4280   // to avoid the unknown high bits from interfering.
4281   if (N0.getOpcode() == ISD::ANY_EXTEND)
4282     N0 = N0.getOperand(0);
4283   if (N1.getOpcode() == ISD::ANY_EXTEND)
4284     N1 = N1.getOperand(0);
4285 
4286   // Try to use two fast 24-bit multiplies (one for each half of the result)
4287   // instead of one slow extending multiply.
4288   unsigned LoOpcode, HiOpcode;
4289   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4290     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4291     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4292     LoOpcode = AMDGPUISD::MUL_U24;
4293     HiOpcode = AMDGPUISD::MULHI_U24;
4294   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4295     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4296     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4297     LoOpcode = AMDGPUISD::MUL_I24;
4298     HiOpcode = AMDGPUISD::MULHI_I24;
4299   } else {
4300     return SDValue();
4301   }
4302 
4303   SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
4304   SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
4305   DCI.CombineTo(N, Lo, Hi);
4306   return SDValue(N, 0);
4307 }
4308 
4309 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
4310                                                   DAGCombinerInfo &DCI) const {
4311   EVT VT = N->getValueType(0);
4312 
4313   if (!Subtarget->hasMulI24() || VT.isVector())
4314     return SDValue();
4315 
4316   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4317   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4318   // unnecessarily). isDivergent() is used as an approximation of whether the
4319   // value is in an SGPR.
4320   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
4321   // valu op anyway)
4322   if (Subtarget->hasSMulHi() && !N->isDivergent())
4323     return SDValue();
4324 
4325   SelectionDAG &DAG = DCI.DAG;
4326   SDLoc DL(N);
4327 
4328   SDValue N0 = N->getOperand(0);
4329   SDValue N1 = N->getOperand(1);
4330 
4331   if (!isI24(N0, DAG) || !isI24(N1, DAG))
4332     return SDValue();
4333 
4334   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4335   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4336 
4337   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
4338   DCI.AddToWorklist(Mulhi.getNode());
4339   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
4340 }
4341 
4342 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
4343                                                   DAGCombinerInfo &DCI) const {
4344   EVT VT = N->getValueType(0);
4345 
4346   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
4347     return SDValue();
4348 
4349   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4350   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4351   // unnecessarily). isDivergent() is used as an approximation of whether the
4352   // value is in an SGPR.
4353   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
4354   // valu op anyway)
4355   if (Subtarget->hasSMulHi() && !N->isDivergent())
4356     return SDValue();
4357 
4358   SelectionDAG &DAG = DCI.DAG;
4359   SDLoc DL(N);
4360 
4361   SDValue N0 = N->getOperand(0);
4362   SDValue N1 = N->getOperand(1);
4363 
4364   if (!isU24(N0, DAG) || !isU24(N1, DAG))
4365     return SDValue();
4366 
4367   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4368   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4369 
4370   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
4371   DCI.AddToWorklist(Mulhi.getNode());
4372   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
4373 }
4374 
4375 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
4376                                           SDValue Op,
4377                                           const SDLoc &DL,
4378                                           unsigned Opc) const {
4379   EVT VT = Op.getValueType();
4380   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
4381   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
4382                               LegalVT != MVT::i16))
4383     return SDValue();
4384 
4385   if (VT != MVT::i32)
4386     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
4387 
4388   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
4389   if (VT != MVT::i32)
4390     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
4391 
4392   return FFBX;
4393 }
4394 
4395 // The native instructions return -1 on 0 input. Optimize out a select that
4396 // produces -1 on 0.
4397 //
4398 // TODO: If zero is not undef, we could also do this if the output is compared
4399 // against the bitwidth.
4400 //
4401 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
4402 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
4403                                                  SDValue LHS, SDValue RHS,
4404                                                  DAGCombinerInfo &DCI) const {
4405   if (!isNullConstant(Cond.getOperand(1)))
4406     return SDValue();
4407 
4408   SelectionDAG &DAG = DCI.DAG;
4409   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
4410   SDValue CmpLHS = Cond.getOperand(0);
4411 
4412   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
4413   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
4414   if (CCOpcode == ISD::SETEQ &&
4415       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
4416       RHS.getOperand(0) == CmpLHS && isAllOnesConstant(LHS)) {
4417     unsigned Opc =
4418         isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4419     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
4420   }
4421 
4422   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
4423   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
4424   if (CCOpcode == ISD::SETNE &&
4425       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
4426       LHS.getOperand(0) == CmpLHS && isAllOnesConstant(RHS)) {
4427     unsigned Opc =
4428         isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4429 
4430     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
4431   }
4432 
4433   return SDValue();
4434 }
4435 
4436 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
4437                                          unsigned Op,
4438                                          const SDLoc &SL,
4439                                          SDValue Cond,
4440                                          SDValue N1,
4441                                          SDValue N2) {
4442   SelectionDAG &DAG = DCI.DAG;
4443   EVT VT = N1.getValueType();
4444 
4445   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
4446                                   N1.getOperand(0), N2.getOperand(0));
4447   DCI.AddToWorklist(NewSelect.getNode());
4448   return DAG.getNode(Op, SL, VT, NewSelect);
4449 }
4450 
4451 // Pull a free FP operation out of a select so it may fold into uses.
4452 //
4453 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
4454 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
4455 //
4456 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
4457 // select c, (fabs x), +k -> fabs (select c, x, k)
4458 SDValue
4459 AMDGPUTargetLowering::foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
4460                                            SDValue N) const {
4461   SelectionDAG &DAG = DCI.DAG;
4462   SDValue Cond = N.getOperand(0);
4463   SDValue LHS = N.getOperand(1);
4464   SDValue RHS = N.getOperand(2);
4465 
4466   EVT VT = N.getValueType();
4467   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
4468       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
4469     if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
4470       return SDValue();
4471 
4472     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
4473                                      SDLoc(N), Cond, LHS, RHS);
4474   }
4475 
4476   bool Inv = false;
4477   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
4478     std::swap(LHS, RHS);
4479     Inv = true;
4480   }
4481 
4482   // TODO: Support vector constants.
4483   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
4484   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS &&
4485       !selectSupportsSourceMods(N.getNode())) {
4486     SDLoc SL(N);
4487     // If one side is an fneg/fabs and the other is a constant, we can push the
4488     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
4489     SDValue NewLHS = LHS.getOperand(0);
4490     SDValue NewRHS = RHS;
4491 
4492     // Careful: if the neg can be folded up, don't try to pull it back down.
4493     bool ShouldFoldNeg = true;
4494 
4495     if (NewLHS.hasOneUse()) {
4496       unsigned Opc = NewLHS.getOpcode();
4497       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(NewLHS.getNode()))
4498         ShouldFoldNeg = false;
4499       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
4500         ShouldFoldNeg = false;
4501     }
4502 
4503     if (ShouldFoldNeg) {
4504       if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative())
4505         return SDValue();
4506 
4507       // We're going to be forced to use a source modifier anyway, there's no
4508       // point to pulling the negate out unless we can get a size reduction by
4509       // negating the constant.
4510       //
4511       // TODO: Generalize to use getCheaperNegatedExpression which doesn't know
4512       // about cheaper constants.
4513       if (NewLHS.getOpcode() == ISD::FABS &&
4514           getConstantNegateCost(CRHS) != NegatibleCost::Cheaper)
4515         return SDValue();
4516 
4517       if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
4518         return SDValue();
4519 
4520       if (LHS.getOpcode() == ISD::FNEG)
4521         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4522 
4523       if (Inv)
4524         std::swap(NewLHS, NewRHS);
4525 
4526       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
4527                                       Cond, NewLHS, NewRHS);
4528       DCI.AddToWorklist(NewSelect.getNode());
4529       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
4530     }
4531   }
4532 
4533   return SDValue();
4534 }
4535 
4536 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
4537                                                    DAGCombinerInfo &DCI) const {
4538   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
4539     return Folded;
4540 
4541   SDValue Cond = N->getOperand(0);
4542   if (Cond.getOpcode() != ISD::SETCC)
4543     return SDValue();
4544 
4545   EVT VT = N->getValueType(0);
4546   SDValue LHS = Cond.getOperand(0);
4547   SDValue RHS = Cond.getOperand(1);
4548   SDValue CC = Cond.getOperand(2);
4549 
4550   SDValue True = N->getOperand(1);
4551   SDValue False = N->getOperand(2);
4552 
4553   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
4554     SelectionDAG &DAG = DCI.DAG;
4555     if (DAG.isConstantValueOfAnyType(True) &&
4556         !DAG.isConstantValueOfAnyType(False)) {
4557       // Swap cmp + select pair to move constant to false input.
4558       // This will allow using VOPC cndmasks more often.
4559       // select (setcc x, y), k, x -> select (setccinv x, y), x, k
4560 
4561       SDLoc SL(N);
4562       ISD::CondCode NewCC =
4563           getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
4564 
4565       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
4566       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
4567     }
4568 
4569     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
4570       SDValue MinMax
4571         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
4572       // Revisit this node so we can catch min3/max3/med3 patterns.
4573       //DCI.AddToWorklist(MinMax.getNode());
4574       return MinMax;
4575     }
4576   }
4577 
4578   // There's no reason to not do this if the condition has other uses.
4579   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
4580 }
4581 
4582 static bool isInv2Pi(const APFloat &APF) {
4583   static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
4584   static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
4585   static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
4586 
4587   return APF.bitwiseIsEqual(KF16) ||
4588          APF.bitwiseIsEqual(KF32) ||
4589          APF.bitwiseIsEqual(KF64);
4590 }
4591 
4592 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
4593 // additional cost to negate them.
4594 TargetLowering::NegatibleCost
4595 AMDGPUTargetLowering::getConstantNegateCost(const ConstantFPSDNode *C) const {
4596   if (C->isZero())
4597     return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4598 
4599   if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
4600     return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4601 
4602   return NegatibleCost::Neutral;
4603 }
4604 
4605 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
4606   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4607     return getConstantNegateCost(C) == NegatibleCost::Expensive;
4608   return false;
4609 }
4610 
4611 bool AMDGPUTargetLowering::isConstantCheaperToNegate(SDValue N) const {
4612   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4613     return getConstantNegateCost(C) == NegatibleCost::Cheaper;
4614   return false;
4615 }
4616 
4617 static unsigned inverseMinMax(unsigned Opc) {
4618   switch (Opc) {
4619   case ISD::FMAXNUM:
4620     return ISD::FMINNUM;
4621   case ISD::FMINNUM:
4622     return ISD::FMAXNUM;
4623   case ISD::FMAXNUM_IEEE:
4624     return ISD::FMINNUM_IEEE;
4625   case ISD::FMINNUM_IEEE:
4626     return ISD::FMAXNUM_IEEE;
4627   case ISD::FMAXIMUM:
4628     return ISD::FMINIMUM;
4629   case ISD::FMINIMUM:
4630     return ISD::FMAXIMUM;
4631   case AMDGPUISD::FMAX_LEGACY:
4632     return AMDGPUISD::FMIN_LEGACY;
4633   case AMDGPUISD::FMIN_LEGACY:
4634     return  AMDGPUISD::FMAX_LEGACY;
4635   default:
4636     llvm_unreachable("invalid min/max opcode");
4637   }
4638 }
4639 
4640 /// \return true if it's profitable to try to push an fneg into its source
4641 /// instruction.
4642 bool AMDGPUTargetLowering::shouldFoldFNegIntoSrc(SDNode *N, SDValue N0) {
4643   // If the input has multiple uses and we can either fold the negate down, or
4644   // the other uses cannot, give up. This both prevents unprofitable
4645   // transformations and infinite loops: we won't repeatedly try to fold around
4646   // a negate that has no 'good' form.
4647   if (N0.hasOneUse()) {
4648     // This may be able to fold into the source, but at a code size cost. Don't
4649     // fold if the fold into the user is free.
4650     if (allUsesHaveSourceMods(N, 0))
4651       return false;
4652   } else {
4653     if (fnegFoldsIntoOp(N0.getNode()) &&
4654         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
4655       return false;
4656   }
4657 
4658   return true;
4659 }
4660 
4661 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
4662                                                  DAGCombinerInfo &DCI) const {
4663   SelectionDAG &DAG = DCI.DAG;
4664   SDValue N0 = N->getOperand(0);
4665   EVT VT = N->getValueType(0);
4666 
4667   unsigned Opc = N0.getOpcode();
4668 
4669   if (!shouldFoldFNegIntoSrc(N, N0))
4670     return SDValue();
4671 
4672   SDLoc SL(N);
4673   switch (Opc) {
4674   case ISD::FADD: {
4675     if (!mayIgnoreSignedZero(N0))
4676       return SDValue();
4677 
4678     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
4679     SDValue LHS = N0.getOperand(0);
4680     SDValue RHS = N0.getOperand(1);
4681 
4682     if (LHS.getOpcode() != ISD::FNEG)
4683       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4684     else
4685       LHS = LHS.getOperand(0);
4686 
4687     if (RHS.getOpcode() != ISD::FNEG)
4688       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4689     else
4690       RHS = RHS.getOperand(0);
4691 
4692     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
4693     if (Res.getOpcode() != ISD::FADD)
4694       return SDValue(); // Op got folded away.
4695     if (!N0.hasOneUse())
4696       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4697     return Res;
4698   }
4699   case ISD::FMUL:
4700   case AMDGPUISD::FMUL_LEGACY: {
4701     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
4702     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
4703     SDValue LHS = N0.getOperand(0);
4704     SDValue RHS = N0.getOperand(1);
4705 
4706     if (LHS.getOpcode() == ISD::FNEG)
4707       LHS = LHS.getOperand(0);
4708     else if (RHS.getOpcode() == ISD::FNEG)
4709       RHS = RHS.getOperand(0);
4710     else
4711       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4712 
4713     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
4714     if (Res.getOpcode() != Opc)
4715       return SDValue(); // Op got folded away.
4716     if (!N0.hasOneUse())
4717       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4718     return Res;
4719   }
4720   case ISD::FMA:
4721   case ISD::FMAD: {
4722     // TODO: handle llvm.amdgcn.fma.legacy
4723     if (!mayIgnoreSignedZero(N0))
4724       return SDValue();
4725 
4726     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
4727     SDValue LHS = N0.getOperand(0);
4728     SDValue MHS = N0.getOperand(1);
4729     SDValue RHS = N0.getOperand(2);
4730 
4731     if (LHS.getOpcode() == ISD::FNEG)
4732       LHS = LHS.getOperand(0);
4733     else if (MHS.getOpcode() == ISD::FNEG)
4734       MHS = MHS.getOperand(0);
4735     else
4736       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
4737 
4738     if (RHS.getOpcode() != ISD::FNEG)
4739       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4740     else
4741       RHS = RHS.getOperand(0);
4742 
4743     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
4744     if (Res.getOpcode() != Opc)
4745       return SDValue(); // Op got folded away.
4746     if (!N0.hasOneUse())
4747       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4748     return Res;
4749   }
4750   case ISD::FMAXNUM:
4751   case ISD::FMINNUM:
4752   case ISD::FMAXNUM_IEEE:
4753   case ISD::FMINNUM_IEEE:
4754   case ISD::FMINIMUM:
4755   case ISD::FMAXIMUM:
4756   case AMDGPUISD::FMAX_LEGACY:
4757   case AMDGPUISD::FMIN_LEGACY: {
4758     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
4759     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
4760     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
4761     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
4762 
4763     SDValue LHS = N0.getOperand(0);
4764     SDValue RHS = N0.getOperand(1);
4765 
4766     // 0 doesn't have a negated inline immediate.
4767     // TODO: This constant check should be generalized to other operations.
4768     if (isConstantCostlierToNegate(RHS))
4769       return SDValue();
4770 
4771     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4772     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4773     unsigned Opposite = inverseMinMax(Opc);
4774 
4775     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
4776     if (Res.getOpcode() != Opposite)
4777       return SDValue(); // Op got folded away.
4778     if (!N0.hasOneUse())
4779       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4780     return Res;
4781   }
4782   case AMDGPUISD::FMED3: {
4783     SDValue Ops[3];
4784     for (unsigned I = 0; I < 3; ++I)
4785       Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
4786 
4787     SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
4788     if (Res.getOpcode() != AMDGPUISD::FMED3)
4789       return SDValue(); // Op got folded away.
4790 
4791     if (!N0.hasOneUse()) {
4792       SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
4793       DAG.ReplaceAllUsesWith(N0, Neg);
4794 
4795       for (SDNode *U : Neg->uses())
4796         DCI.AddToWorklist(U);
4797     }
4798 
4799     return Res;
4800   }
4801   case ISD::FP_EXTEND:
4802   case ISD::FTRUNC:
4803   case ISD::FRINT:
4804   case ISD::FNEARBYINT: // XXX - Should fround be handled?
4805   case ISD::FROUNDEVEN:
4806   case ISD::FSIN:
4807   case ISD::FCANONICALIZE:
4808   case AMDGPUISD::RCP:
4809   case AMDGPUISD::RCP_LEGACY:
4810   case AMDGPUISD::RCP_IFLAG:
4811   case AMDGPUISD::SIN_HW: {
4812     SDValue CvtSrc = N0.getOperand(0);
4813     if (CvtSrc.getOpcode() == ISD::FNEG) {
4814       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
4815       // (fneg (rcp (fneg x))) -> (rcp x)
4816       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
4817     }
4818 
4819     if (!N0.hasOneUse())
4820       return SDValue();
4821 
4822     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
4823     // (fneg (rcp x)) -> (rcp (fneg x))
4824     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4825     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
4826   }
4827   case ISD::FP_ROUND: {
4828     SDValue CvtSrc = N0.getOperand(0);
4829 
4830     if (CvtSrc.getOpcode() == ISD::FNEG) {
4831       // (fneg (fp_round (fneg x))) -> (fp_round x)
4832       return DAG.getNode(ISD::FP_ROUND, SL, VT,
4833                          CvtSrc.getOperand(0), N0.getOperand(1));
4834     }
4835 
4836     if (!N0.hasOneUse())
4837       return SDValue();
4838 
4839     // (fneg (fp_round x)) -> (fp_round (fneg x))
4840     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4841     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
4842   }
4843   case ISD::FP16_TO_FP: {
4844     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
4845     // f16, but legalization of f16 fneg ends up pulling it out of the source.
4846     // Put the fneg back as a legal source operation that can be matched later.
4847     SDLoc SL(N);
4848 
4849     SDValue Src = N0.getOperand(0);
4850     EVT SrcVT = Src.getValueType();
4851 
4852     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
4853     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
4854                                   DAG.getConstant(0x8000, SL, SrcVT));
4855     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
4856   }
4857   case ISD::SELECT: {
4858     // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
4859     // TODO: Invert conditions of foldFreeOpFromSelect
4860     return SDValue();
4861   }
4862   case ISD::BITCAST: {
4863     SDLoc SL(N);
4864     SDValue BCSrc = N0.getOperand(0);
4865     if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
4866       SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1);
4867       if (HighBits.getValueType().getSizeInBits() != 32 ||
4868           !fnegFoldsIntoOp(HighBits.getNode()))
4869         return SDValue();
4870 
4871       // f64 fneg only really needs to operate on the high half of of the
4872       // register, so try to force it to an f32 operation to help make use of
4873       // source modifiers.
4874       //
4875       //
4876       // fneg (f64 (bitcast (build_vector x, y))) ->
4877       // f64 (bitcast (build_vector (bitcast i32:x to f32),
4878       //                            (fneg (bitcast i32:y to f32)))
4879 
4880       SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::f32, HighBits);
4881       SDValue NegHi = DAG.getNode(ISD::FNEG, SL, MVT::f32, CastHi);
4882       SDValue CastBack =
4883           DAG.getNode(ISD::BITCAST, SL, HighBits.getValueType(), NegHi);
4884 
4885       SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end());
4886       Ops.back() = CastBack;
4887       DCI.AddToWorklist(NegHi.getNode());
4888       SDValue Build =
4889           DAG.getNode(ISD::BUILD_VECTOR, SL, BCSrc.getValueType(), Ops);
4890       SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, Build);
4891 
4892       if (!N0.hasOneUse())
4893         DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Result));
4894       return Result;
4895     }
4896 
4897     if (BCSrc.getOpcode() == ISD::SELECT && VT == MVT::f32 &&
4898         BCSrc.hasOneUse()) {
4899       // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) ->
4900       //   select cond, (bitcast i32:lhs to f32), (bitcast i32:rhs to f32)
4901 
4902       // TODO: Cast back result for multiple uses is beneficial in some cases.
4903 
4904       SDValue LHS =
4905           DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(1));
4906       SDValue RHS =
4907           DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(2));
4908 
4909       SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, LHS);
4910       SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, RHS);
4911 
4912       return DAG.getNode(ISD::SELECT, SL, MVT::f32, BCSrc.getOperand(0), NegLHS,
4913                          NegRHS);
4914     }
4915 
4916     return SDValue();
4917   }
4918   default:
4919     return SDValue();
4920   }
4921 }
4922 
4923 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
4924                                                  DAGCombinerInfo &DCI) const {
4925   SelectionDAG &DAG = DCI.DAG;
4926   SDValue N0 = N->getOperand(0);
4927 
4928   if (!N0.hasOneUse())
4929     return SDValue();
4930 
4931   switch (N0.getOpcode()) {
4932   case ISD::FP16_TO_FP: {
4933     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
4934     SDLoc SL(N);
4935     SDValue Src = N0.getOperand(0);
4936     EVT SrcVT = Src.getValueType();
4937 
4938     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
4939     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
4940                                   DAG.getConstant(0x7fff, SL, SrcVT));
4941     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
4942   }
4943   default:
4944     return SDValue();
4945   }
4946 }
4947 
4948 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
4949                                                 DAGCombinerInfo &DCI) const {
4950   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
4951   if (!CFP)
4952     return SDValue();
4953 
4954   // XXX - Should this flush denormals?
4955   const APFloat &Val = CFP->getValueAPF();
4956   APFloat One(Val.getSemantics(), "1.0");
4957   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
4958 }
4959 
4960 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
4961                                                 DAGCombinerInfo &DCI) const {
4962   SelectionDAG &DAG = DCI.DAG;
4963   SDLoc DL(N);
4964 
4965   switch(N->getOpcode()) {
4966   default:
4967     break;
4968   case ISD::BITCAST: {
4969     EVT DestVT = N->getValueType(0);
4970 
4971     // Push casts through vector builds. This helps avoid emitting a large
4972     // number of copies when materializing floating point vector constants.
4973     //
4974     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
4975     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
4976     if (DestVT.isVector()) {
4977       SDValue Src = N->getOperand(0);
4978       if (Src.getOpcode() == ISD::BUILD_VECTOR &&
4979           (DCI.getDAGCombineLevel() < AfterLegalizeDAG ||
4980            isOperationLegal(ISD::BUILD_VECTOR, DestVT))) {
4981         EVT SrcVT = Src.getValueType();
4982         unsigned NElts = DestVT.getVectorNumElements();
4983 
4984         if (SrcVT.getVectorNumElements() == NElts) {
4985           EVT DestEltVT = DestVT.getVectorElementType();
4986 
4987           SmallVector<SDValue, 8> CastedElts;
4988           SDLoc SL(N);
4989           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
4990             SDValue Elt = Src.getOperand(I);
4991             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
4992           }
4993 
4994           return DAG.getBuildVector(DestVT, SL, CastedElts);
4995         }
4996       }
4997     }
4998 
4999     if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
5000       break;
5001 
5002     // Fold bitcasts of constants.
5003     //
5004     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
5005     // TODO: Generalize and move to DAGCombiner
5006     SDValue Src = N->getOperand(0);
5007     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
5008       SDLoc SL(N);
5009       uint64_t CVal = C->getZExtValue();
5010       SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
5011                                DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
5012                                DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
5013       return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
5014     }
5015 
5016     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
5017       const APInt &Val = C->getValueAPF().bitcastToAPInt();
5018       SDLoc SL(N);
5019       uint64_t CVal = Val.getZExtValue();
5020       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
5021                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
5022                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
5023 
5024       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
5025     }
5026 
5027     break;
5028   }
5029   case ISD::SHL: {
5030     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5031       break;
5032 
5033     return performShlCombine(N, DCI);
5034   }
5035   case ISD::SRL: {
5036     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5037       break;
5038 
5039     return performSrlCombine(N, DCI);
5040   }
5041   case ISD::SRA: {
5042     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5043       break;
5044 
5045     return performSraCombine(N, DCI);
5046   }
5047   case ISD::TRUNCATE:
5048     return performTruncateCombine(N, DCI);
5049   case ISD::MUL:
5050     return performMulCombine(N, DCI);
5051   case AMDGPUISD::MUL_U24:
5052   case AMDGPUISD::MUL_I24: {
5053     if (SDValue Simplified = simplifyMul24(N, DCI))
5054       return Simplified;
5055     return performMulCombine(N, DCI);
5056   }
5057   case AMDGPUISD::MULHI_I24:
5058   case AMDGPUISD::MULHI_U24:
5059     return simplifyMul24(N, DCI);
5060   case ISD::SMUL_LOHI:
5061   case ISD::UMUL_LOHI:
5062     return performMulLoHiCombine(N, DCI);
5063   case ISD::MULHS:
5064     return performMulhsCombine(N, DCI);
5065   case ISD::MULHU:
5066     return performMulhuCombine(N, DCI);
5067   case ISD::SELECT:
5068     return performSelectCombine(N, DCI);
5069   case ISD::FNEG:
5070     return performFNegCombine(N, DCI);
5071   case ISD::FABS:
5072     return performFAbsCombine(N, DCI);
5073   case AMDGPUISD::BFE_I32:
5074   case AMDGPUISD::BFE_U32: {
5075     assert(!N->getValueType(0).isVector() &&
5076            "Vector handling of BFE not implemented");
5077     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
5078     if (!Width)
5079       break;
5080 
5081     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
5082     if (WidthVal == 0)
5083       return DAG.getConstant(0, DL, MVT::i32);
5084 
5085     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
5086     if (!Offset)
5087       break;
5088 
5089     SDValue BitsFrom = N->getOperand(0);
5090     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
5091 
5092     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
5093 
5094     if (OffsetVal == 0) {
5095       // This is already sign / zero extended, so try to fold away extra BFEs.
5096       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5097 
5098       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
5099       if (OpSignBits >= SignBits)
5100         return BitsFrom;
5101 
5102       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
5103       if (Signed) {
5104         // This is a sign_extend_inreg. Replace it to take advantage of existing
5105         // DAG Combines. If not eliminated, we will match back to BFE during
5106         // selection.
5107 
5108         // TODO: The sext_inreg of extended types ends, although we can could
5109         // handle them in a single BFE.
5110         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
5111                            DAG.getValueType(SmallVT));
5112       }
5113 
5114       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
5115     }
5116 
5117     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
5118       if (Signed) {
5119         return constantFoldBFE<int32_t>(DAG,
5120                                         CVal->getSExtValue(),
5121                                         OffsetVal,
5122                                         WidthVal,
5123                                         DL);
5124       }
5125 
5126       return constantFoldBFE<uint32_t>(DAG,
5127                                        CVal->getZExtValue(),
5128                                        OffsetVal,
5129                                        WidthVal,
5130                                        DL);
5131     }
5132 
5133     if ((OffsetVal + WidthVal) >= 32 &&
5134         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
5135       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
5136       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
5137                          BitsFrom, ShiftVal);
5138     }
5139 
5140     if (BitsFrom.hasOneUse()) {
5141       APInt Demanded = APInt::getBitsSet(32,
5142                                          OffsetVal,
5143                                          OffsetVal + WidthVal);
5144 
5145       KnownBits Known;
5146       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
5147                                             !DCI.isBeforeLegalizeOps());
5148       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5149       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
5150           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
5151         DCI.CommitTargetLoweringOpt(TLO);
5152       }
5153     }
5154 
5155     break;
5156   }
5157   case ISD::LOAD:
5158     return performLoadCombine(N, DCI);
5159   case ISD::STORE:
5160     return performStoreCombine(N, DCI);
5161   case AMDGPUISD::RCP:
5162   case AMDGPUISD::RCP_IFLAG:
5163     return performRcpCombine(N, DCI);
5164   case ISD::AssertZext:
5165   case ISD::AssertSext:
5166     return performAssertSZExtCombine(N, DCI);
5167   case ISD::INTRINSIC_WO_CHAIN:
5168     return performIntrinsicWOChainCombine(N, DCI);
5169   case AMDGPUISD::FMAD_FTZ: {
5170     SDValue N0 = N->getOperand(0);
5171     SDValue N1 = N->getOperand(1);
5172     SDValue N2 = N->getOperand(2);
5173     EVT VT = N->getValueType(0);
5174 
5175     // FMAD_FTZ is a FMAD + flush denormals to zero.
5176     // We flush the inputs, the intermediate step, and the output.
5177     ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5178     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5179     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5180     if (N0CFP && N1CFP && N2CFP) {
5181       const auto FTZ = [](const APFloat &V) {
5182         if (V.isDenormal()) {
5183           APFloat Zero(V.getSemantics(), 0);
5184           return V.isNegative() ? -Zero : Zero;
5185         }
5186         return V;
5187       };
5188 
5189       APFloat V0 = FTZ(N0CFP->getValueAPF());
5190       APFloat V1 = FTZ(N1CFP->getValueAPF());
5191       APFloat V2 = FTZ(N2CFP->getValueAPF());
5192       V0.multiply(V1, APFloat::rmNearestTiesToEven);
5193       V0 = FTZ(V0);
5194       V0.add(V2, APFloat::rmNearestTiesToEven);
5195       return DAG.getConstantFP(FTZ(V0), DL, VT);
5196     }
5197     break;
5198   }
5199   }
5200   return SDValue();
5201 }
5202 
5203 //===----------------------------------------------------------------------===//
5204 // Helper functions
5205 //===----------------------------------------------------------------------===//
5206 
5207 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
5208                                                    const TargetRegisterClass *RC,
5209                                                    Register Reg, EVT VT,
5210                                                    const SDLoc &SL,
5211                                                    bool RawReg) const {
5212   MachineFunction &MF = DAG.getMachineFunction();
5213   MachineRegisterInfo &MRI = MF.getRegInfo();
5214   Register VReg;
5215 
5216   if (!MRI.isLiveIn(Reg)) {
5217     VReg = MRI.createVirtualRegister(RC);
5218     MRI.addLiveIn(Reg, VReg);
5219   } else {
5220     VReg = MRI.getLiveInVirtReg(Reg);
5221   }
5222 
5223   if (RawReg)
5224     return DAG.getRegister(VReg, VT);
5225 
5226   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
5227 }
5228 
5229 // This may be called multiple times, and nothing prevents creating multiple
5230 // objects at the same offset. See if we already defined this object.
5231 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
5232                                        int64_t Offset) {
5233   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
5234     if (MFI.getObjectOffset(I) == Offset) {
5235       assert(MFI.getObjectSize(I) == Size);
5236       return I;
5237     }
5238   }
5239 
5240   return MFI.CreateFixedObject(Size, Offset, true);
5241 }
5242 
5243 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
5244                                                   EVT VT,
5245                                                   const SDLoc &SL,
5246                                                   int64_t Offset) const {
5247   MachineFunction &MF = DAG.getMachineFunction();
5248   MachineFrameInfo &MFI = MF.getFrameInfo();
5249   int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
5250 
5251   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
5252   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
5253 
5254   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
5255                      MachineMemOperand::MODereferenceable |
5256                          MachineMemOperand::MOInvariant);
5257 }
5258 
5259 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
5260                                                    const SDLoc &SL,
5261                                                    SDValue Chain,
5262                                                    SDValue ArgVal,
5263                                                    int64_t Offset) const {
5264   MachineFunction &MF = DAG.getMachineFunction();
5265   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
5266   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5267 
5268   SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
5269   // Stores to the argument stack area are relative to the stack pointer.
5270   SDValue SP =
5271       DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
5272   Ptr = DAG.getNode(ISD::ADD, SL, MVT::i32, SP, Ptr);
5273   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
5274                                MachineMemOperand::MODereferenceable);
5275   return Store;
5276 }
5277 
5278 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
5279                                              const TargetRegisterClass *RC,
5280                                              EVT VT, const SDLoc &SL,
5281                                              const ArgDescriptor &Arg) const {
5282   assert(Arg && "Attempting to load missing argument");
5283 
5284   SDValue V = Arg.isRegister() ?
5285     CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
5286     loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
5287 
5288   if (!Arg.isMasked())
5289     return V;
5290 
5291   unsigned Mask = Arg.getMask();
5292   unsigned Shift = llvm::countr_zero<unsigned>(Mask);
5293   V = DAG.getNode(ISD::SRL, SL, VT, V,
5294                   DAG.getShiftAmountConstant(Shift, VT, SL));
5295   return DAG.getNode(ISD::AND, SL, VT, V,
5296                      DAG.getConstant(Mask >> Shift, SL, VT));
5297 }
5298 
5299 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
5300     uint64_t ExplicitKernArgSize, const ImplicitParameter Param) const {
5301   unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
5302   const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
5303   uint64_t ArgOffset =
5304       alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
5305   switch (Param) {
5306   case FIRST_IMPLICIT:
5307     return ArgOffset;
5308   case PRIVATE_BASE:
5309     return ArgOffset + AMDGPU::ImplicitArg::PRIVATE_BASE_OFFSET;
5310   case SHARED_BASE:
5311     return ArgOffset + AMDGPU::ImplicitArg::SHARED_BASE_OFFSET;
5312   case QUEUE_PTR:
5313     return ArgOffset + AMDGPU::ImplicitArg::QUEUE_PTR_OFFSET;
5314   }
5315   llvm_unreachable("unexpected implicit parameter type");
5316 }
5317 
5318 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
5319     const MachineFunction &MF, const ImplicitParameter Param) const {
5320   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
5321   return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param);
5322 }
5323 
5324 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
5325 
5326 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
5327   switch ((AMDGPUISD::NodeType)Opcode) {
5328   case AMDGPUISD::FIRST_NUMBER: break;
5329   // AMDIL DAG nodes
5330   NODE_NAME_CASE(UMUL);
5331   NODE_NAME_CASE(BRANCH_COND);
5332 
5333   // AMDGPU DAG nodes
5334   NODE_NAME_CASE(IF)
5335   NODE_NAME_CASE(ELSE)
5336   NODE_NAME_CASE(LOOP)
5337   NODE_NAME_CASE(CALL)
5338   NODE_NAME_CASE(TC_RETURN)
5339   NODE_NAME_CASE(TC_RETURN_GFX)
5340   NODE_NAME_CASE(TC_RETURN_CHAIN)
5341   NODE_NAME_CASE(TRAP)
5342   NODE_NAME_CASE(RET_GLUE)
5343   NODE_NAME_CASE(WAVE_ADDRESS)
5344   NODE_NAME_CASE(RETURN_TO_EPILOG)
5345   NODE_NAME_CASE(ENDPGM)
5346   NODE_NAME_CASE(ENDPGM_TRAP)
5347   NODE_NAME_CASE(DWORDADDR)
5348   NODE_NAME_CASE(FRACT)
5349   NODE_NAME_CASE(SETCC)
5350   NODE_NAME_CASE(SETREG)
5351   NODE_NAME_CASE(DENORM_MODE)
5352   NODE_NAME_CASE(FMA_W_CHAIN)
5353   NODE_NAME_CASE(FMUL_W_CHAIN)
5354   NODE_NAME_CASE(CLAMP)
5355   NODE_NAME_CASE(COS_HW)
5356   NODE_NAME_CASE(SIN_HW)
5357   NODE_NAME_CASE(FMAX_LEGACY)
5358   NODE_NAME_CASE(FMIN_LEGACY)
5359   NODE_NAME_CASE(FMAX3)
5360   NODE_NAME_CASE(SMAX3)
5361   NODE_NAME_CASE(UMAX3)
5362   NODE_NAME_CASE(FMIN3)
5363   NODE_NAME_CASE(SMIN3)
5364   NODE_NAME_CASE(UMIN3)
5365   NODE_NAME_CASE(FMED3)
5366   NODE_NAME_CASE(SMED3)
5367   NODE_NAME_CASE(UMED3)
5368   NODE_NAME_CASE(FMAXIMUM3)
5369   NODE_NAME_CASE(FMINIMUM3)
5370   NODE_NAME_CASE(FDOT2)
5371   NODE_NAME_CASE(URECIP)
5372   NODE_NAME_CASE(DIV_SCALE)
5373   NODE_NAME_CASE(DIV_FMAS)
5374   NODE_NAME_CASE(DIV_FIXUP)
5375   NODE_NAME_CASE(FMAD_FTZ)
5376   NODE_NAME_CASE(RCP)
5377   NODE_NAME_CASE(RSQ)
5378   NODE_NAME_CASE(RCP_LEGACY)
5379   NODE_NAME_CASE(RCP_IFLAG)
5380   NODE_NAME_CASE(LOG)
5381   NODE_NAME_CASE(EXP)
5382   NODE_NAME_CASE(FMUL_LEGACY)
5383   NODE_NAME_CASE(RSQ_CLAMP)
5384   NODE_NAME_CASE(FP_CLASS)
5385   NODE_NAME_CASE(DOT4)
5386   NODE_NAME_CASE(CARRY)
5387   NODE_NAME_CASE(BORROW)
5388   NODE_NAME_CASE(BFE_U32)
5389   NODE_NAME_CASE(BFE_I32)
5390   NODE_NAME_CASE(BFI)
5391   NODE_NAME_CASE(BFM)
5392   NODE_NAME_CASE(FFBH_U32)
5393   NODE_NAME_CASE(FFBH_I32)
5394   NODE_NAME_CASE(FFBL_B32)
5395   NODE_NAME_CASE(MUL_U24)
5396   NODE_NAME_CASE(MUL_I24)
5397   NODE_NAME_CASE(MULHI_U24)
5398   NODE_NAME_CASE(MULHI_I24)
5399   NODE_NAME_CASE(MAD_U24)
5400   NODE_NAME_CASE(MAD_I24)
5401   NODE_NAME_CASE(MAD_I64_I32)
5402   NODE_NAME_CASE(MAD_U64_U32)
5403   NODE_NAME_CASE(PERM)
5404   NODE_NAME_CASE(TEXTURE_FETCH)
5405   NODE_NAME_CASE(R600_EXPORT)
5406   NODE_NAME_CASE(CONST_ADDRESS)
5407   NODE_NAME_CASE(REGISTER_LOAD)
5408   NODE_NAME_CASE(REGISTER_STORE)
5409   NODE_NAME_CASE(SAMPLE)
5410   NODE_NAME_CASE(SAMPLEB)
5411   NODE_NAME_CASE(SAMPLED)
5412   NODE_NAME_CASE(SAMPLEL)
5413   NODE_NAME_CASE(CVT_F32_UBYTE0)
5414   NODE_NAME_CASE(CVT_F32_UBYTE1)
5415   NODE_NAME_CASE(CVT_F32_UBYTE2)
5416   NODE_NAME_CASE(CVT_F32_UBYTE3)
5417   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
5418   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
5419   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
5420   NODE_NAME_CASE(CVT_PK_I16_I32)
5421   NODE_NAME_CASE(CVT_PK_U16_U32)
5422   NODE_NAME_CASE(FP_TO_FP16)
5423   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
5424   NODE_NAME_CASE(CONST_DATA_PTR)
5425   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
5426   NODE_NAME_CASE(LDS)
5427   NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
5428   NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
5429   NODE_NAME_CASE(DUMMY_CHAIN)
5430   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
5431   NODE_NAME_CASE(LOAD_D16_HI)
5432   NODE_NAME_CASE(LOAD_D16_LO)
5433   NODE_NAME_CASE(LOAD_D16_HI_I8)
5434   NODE_NAME_CASE(LOAD_D16_HI_U8)
5435   NODE_NAME_CASE(LOAD_D16_LO_I8)
5436   NODE_NAME_CASE(LOAD_D16_LO_U8)
5437   NODE_NAME_CASE(STORE_MSKOR)
5438   NODE_NAME_CASE(LOAD_CONSTANT)
5439   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
5440   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
5441   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
5442   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
5443   NODE_NAME_CASE(DS_ORDERED_COUNT)
5444   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
5445   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
5446   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
5447   NODE_NAME_CASE(BUFFER_LOAD)
5448   NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
5449   NODE_NAME_CASE(BUFFER_LOAD_USHORT)
5450   NODE_NAME_CASE(BUFFER_LOAD_BYTE)
5451   NODE_NAME_CASE(BUFFER_LOAD_SHORT)
5452   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
5453   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_TFE)
5454   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
5455   NODE_NAME_CASE(SBUFFER_LOAD)
5456   NODE_NAME_CASE(BUFFER_STORE)
5457   NODE_NAME_CASE(BUFFER_STORE_BYTE)
5458   NODE_NAME_CASE(BUFFER_STORE_SHORT)
5459   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
5460   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
5461   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
5462   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
5463   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
5464   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
5465   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
5466   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
5467   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
5468   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
5469   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
5470   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
5471   NODE_NAME_CASE(BUFFER_ATOMIC_INC)
5472   NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
5473   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
5474   NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
5475   NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
5476   NODE_NAME_CASE(BUFFER_ATOMIC_FMIN)
5477   NODE_NAME_CASE(BUFFER_ATOMIC_FMAX)
5478 
5479   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
5480   }
5481   return nullptr;
5482 }
5483 
5484 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
5485                                               SelectionDAG &DAG, int Enabled,
5486                                               int &RefinementSteps,
5487                                               bool &UseOneConstNR,
5488                                               bool Reciprocal) const {
5489   EVT VT = Operand.getValueType();
5490 
5491   if (VT == MVT::f32) {
5492     RefinementSteps = 0;
5493     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
5494   }
5495 
5496   // TODO: There is also f64 rsq instruction, but the documentation is less
5497   // clear on its precision.
5498 
5499   return SDValue();
5500 }
5501 
5502 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
5503                                                SelectionDAG &DAG, int Enabled,
5504                                                int &RefinementSteps) const {
5505   EVT VT = Operand.getValueType();
5506 
5507   if (VT == MVT::f32) {
5508     // Reciprocal, < 1 ulp error.
5509     //
5510     // This reciprocal approximation converges to < 0.5 ulp error with one
5511     // newton rhapson performed with two fused multiple adds (FMAs).
5512 
5513     RefinementSteps = 0;
5514     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
5515   }
5516 
5517   // TODO: There is also f64 rcp instruction, but the documentation is less
5518   // clear on its precision.
5519 
5520   return SDValue();
5521 }
5522 
5523 static unsigned workitemIntrinsicDim(unsigned ID) {
5524   switch (ID) {
5525   case Intrinsic::amdgcn_workitem_id_x:
5526     return 0;
5527   case Intrinsic::amdgcn_workitem_id_y:
5528     return 1;
5529   case Intrinsic::amdgcn_workitem_id_z:
5530     return 2;
5531   default:
5532     llvm_unreachable("not a workitem intrinsic");
5533   }
5534 }
5535 
5536 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
5537     const SDValue Op, KnownBits &Known,
5538     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
5539 
5540   Known.resetAll(); // Don't know anything.
5541 
5542   unsigned Opc = Op.getOpcode();
5543 
5544   switch (Opc) {
5545   default:
5546     break;
5547   case AMDGPUISD::CARRY:
5548   case AMDGPUISD::BORROW: {
5549     Known.Zero = APInt::getHighBitsSet(32, 31);
5550     break;
5551   }
5552 
5553   case AMDGPUISD::BFE_I32:
5554   case AMDGPUISD::BFE_U32: {
5555     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5556     if (!CWidth)
5557       return;
5558 
5559     uint32_t Width = CWidth->getZExtValue() & 0x1f;
5560 
5561     if (Opc == AMDGPUISD::BFE_U32)
5562       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
5563 
5564     break;
5565   }
5566   case AMDGPUISD::FP_TO_FP16: {
5567     unsigned BitWidth = Known.getBitWidth();
5568 
5569     // High bits are zero.
5570     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
5571     break;
5572   }
5573   case AMDGPUISD::MUL_U24:
5574   case AMDGPUISD::MUL_I24: {
5575     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5576     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5577     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
5578                       RHSKnown.countMinTrailingZeros();
5579     Known.Zero.setLowBits(std::min(TrailZ, 32u));
5580     // Skip extra check if all bits are known zeros.
5581     if (TrailZ >= 32)
5582       break;
5583 
5584     // Truncate to 24 bits.
5585     LHSKnown = LHSKnown.trunc(24);
5586     RHSKnown = RHSKnown.trunc(24);
5587 
5588     if (Opc == AMDGPUISD::MUL_I24) {
5589       unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
5590       unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
5591       unsigned MaxValBits = LHSValBits + RHSValBits;
5592       if (MaxValBits > 32)
5593         break;
5594       unsigned SignBits = 32 - MaxValBits + 1;
5595       bool LHSNegative = LHSKnown.isNegative();
5596       bool LHSNonNegative = LHSKnown.isNonNegative();
5597       bool LHSPositive = LHSKnown.isStrictlyPositive();
5598       bool RHSNegative = RHSKnown.isNegative();
5599       bool RHSNonNegative = RHSKnown.isNonNegative();
5600       bool RHSPositive = RHSKnown.isStrictlyPositive();
5601 
5602       if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
5603         Known.Zero.setHighBits(SignBits);
5604       else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
5605         Known.One.setHighBits(SignBits);
5606     } else {
5607       unsigned LHSValBits = LHSKnown.countMaxActiveBits();
5608       unsigned RHSValBits = RHSKnown.countMaxActiveBits();
5609       unsigned MaxValBits = LHSValBits + RHSValBits;
5610       if (MaxValBits >= 32)
5611         break;
5612       Known.Zero.setBitsFrom(MaxValBits);
5613     }
5614     break;
5615   }
5616   case AMDGPUISD::PERM: {
5617     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5618     if (!CMask)
5619       return;
5620 
5621     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5622     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5623     unsigned Sel = CMask->getZExtValue();
5624 
5625     for (unsigned I = 0; I < 32; I += 8) {
5626       unsigned SelBits = Sel & 0xff;
5627       if (SelBits < 4) {
5628         SelBits *= 8;
5629         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5630         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5631       } else if (SelBits < 7) {
5632         SelBits = (SelBits & 3) * 8;
5633         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5634         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5635       } else if (SelBits == 0x0c) {
5636         Known.Zero |= 0xFFull << I;
5637       } else if (SelBits > 0x0c) {
5638         Known.One |= 0xFFull << I;
5639       }
5640       Sel >>= 8;
5641     }
5642     break;
5643   }
5644   case AMDGPUISD::BUFFER_LOAD_UBYTE:  {
5645     Known.Zero.setHighBits(24);
5646     break;
5647   }
5648   case AMDGPUISD::BUFFER_LOAD_USHORT: {
5649     Known.Zero.setHighBits(16);
5650     break;
5651   }
5652   case AMDGPUISD::LDS: {
5653     auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
5654     Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
5655 
5656     Known.Zero.setHighBits(16);
5657     Known.Zero.setLowBits(Log2(Alignment));
5658     break;
5659   }
5660   case AMDGPUISD::SMIN3:
5661   case AMDGPUISD::SMAX3:
5662   case AMDGPUISD::SMED3:
5663   case AMDGPUISD::UMIN3:
5664   case AMDGPUISD::UMAX3:
5665   case AMDGPUISD::UMED3: {
5666     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(2), Depth + 1);
5667     if (Known2.isUnknown())
5668       break;
5669 
5670     KnownBits Known1 = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5671     if (Known1.isUnknown())
5672       break;
5673 
5674     KnownBits Known0 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5675     if (Known0.isUnknown())
5676       break;
5677 
5678     // TODO: Handle LeadZero/LeadOne from UMIN/UMAX handling.
5679     Known.Zero = Known0.Zero & Known1.Zero & Known2.Zero;
5680     Known.One = Known0.One & Known1.One & Known2.One;
5681     break;
5682   }
5683   case ISD::INTRINSIC_WO_CHAIN: {
5684     unsigned IID = Op.getConstantOperandVal(0);
5685     switch (IID) {
5686     case Intrinsic::amdgcn_workitem_id_x:
5687     case Intrinsic::amdgcn_workitem_id_y:
5688     case Intrinsic::amdgcn_workitem_id_z: {
5689       unsigned MaxValue = Subtarget->getMaxWorkitemID(
5690           DAG.getMachineFunction().getFunction(), workitemIntrinsicDim(IID));
5691       Known.Zero.setHighBits(llvm::countl_zero(MaxValue));
5692       break;
5693     }
5694     default:
5695       break;
5696     }
5697   }
5698   }
5699 }
5700 
5701 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
5702     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
5703     unsigned Depth) const {
5704   switch (Op.getOpcode()) {
5705   case AMDGPUISD::BFE_I32: {
5706     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5707     if (!Width)
5708       return 1;
5709 
5710     unsigned SignBits = 32 - Width->getZExtValue() + 1;
5711     if (!isNullConstant(Op.getOperand(1)))
5712       return SignBits;
5713 
5714     // TODO: Could probably figure something out with non-0 offsets.
5715     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5716     return std::max(SignBits, Op0SignBits);
5717   }
5718 
5719   case AMDGPUISD::BFE_U32: {
5720     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5721     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
5722   }
5723 
5724   case AMDGPUISD::CARRY:
5725   case AMDGPUISD::BORROW:
5726     return 31;
5727   case AMDGPUISD::BUFFER_LOAD_BYTE:
5728     return 25;
5729   case AMDGPUISD::BUFFER_LOAD_SHORT:
5730     return 17;
5731   case AMDGPUISD::BUFFER_LOAD_UBYTE:
5732     return 24;
5733   case AMDGPUISD::BUFFER_LOAD_USHORT:
5734     return 16;
5735   case AMDGPUISD::FP_TO_FP16:
5736     return 16;
5737   case AMDGPUISD::SMIN3:
5738   case AMDGPUISD::SMAX3:
5739   case AMDGPUISD::SMED3:
5740   case AMDGPUISD::UMIN3:
5741   case AMDGPUISD::UMAX3:
5742   case AMDGPUISD::UMED3: {
5743     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(2), Depth + 1);
5744     if (Tmp2 == 1)
5745       return 1; // Early out.
5746 
5747     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth + 1);
5748     if (Tmp1 == 1)
5749       return 1; // Early out.
5750 
5751     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5752     if (Tmp0 == 1)
5753       return 1; // Early out.
5754 
5755     return std::min(Tmp0, std::min(Tmp1, Tmp2));
5756   }
5757   default:
5758     return 1;
5759   }
5760 }
5761 
5762 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
5763   GISelKnownBits &Analysis, Register R,
5764   const APInt &DemandedElts, const MachineRegisterInfo &MRI,
5765   unsigned Depth) const {
5766   const MachineInstr *MI = MRI.getVRegDef(R);
5767   if (!MI)
5768     return 1;
5769 
5770   // TODO: Check range metadata on MMO.
5771   switch (MI->getOpcode()) {
5772   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
5773     return 25;
5774   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
5775     return 17;
5776   case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
5777     return 24;
5778   case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
5779     return 16;
5780   case AMDGPU::G_AMDGPU_SMED3:
5781   case AMDGPU::G_AMDGPU_UMED3: {
5782     auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs();
5783     unsigned Tmp2 = Analysis.computeNumSignBits(Src2, DemandedElts, Depth + 1);
5784     if (Tmp2 == 1)
5785       return 1;
5786     unsigned Tmp1 = Analysis.computeNumSignBits(Src1, DemandedElts, Depth + 1);
5787     if (Tmp1 == 1)
5788       return 1;
5789     unsigned Tmp0 = Analysis.computeNumSignBits(Src0, DemandedElts, Depth + 1);
5790     if (Tmp0 == 1)
5791       return 1;
5792     return std::min(Tmp0, std::min(Tmp1, Tmp2));
5793   }
5794   default:
5795     return 1;
5796   }
5797 }
5798 
5799 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
5800                                                         const SelectionDAG &DAG,
5801                                                         bool SNaN,
5802                                                         unsigned Depth) const {
5803   unsigned Opcode = Op.getOpcode();
5804   switch (Opcode) {
5805   case AMDGPUISD::FMIN_LEGACY:
5806   case AMDGPUISD::FMAX_LEGACY: {
5807     if (SNaN)
5808       return true;
5809 
5810     // TODO: Can check no nans on one of the operands for each one, but which
5811     // one?
5812     return false;
5813   }
5814   case AMDGPUISD::FMUL_LEGACY:
5815   case AMDGPUISD::CVT_PKRTZ_F16_F32: {
5816     if (SNaN)
5817       return true;
5818     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5819            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5820   }
5821   case AMDGPUISD::FMED3:
5822   case AMDGPUISD::FMIN3:
5823   case AMDGPUISD::FMAX3:
5824   case AMDGPUISD::FMINIMUM3:
5825   case AMDGPUISD::FMAXIMUM3:
5826   case AMDGPUISD::FMAD_FTZ: {
5827     if (SNaN)
5828       return true;
5829     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5830            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5831            DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5832   }
5833   case AMDGPUISD::CVT_F32_UBYTE0:
5834   case AMDGPUISD::CVT_F32_UBYTE1:
5835   case AMDGPUISD::CVT_F32_UBYTE2:
5836   case AMDGPUISD::CVT_F32_UBYTE3:
5837     return true;
5838 
5839   case AMDGPUISD::RCP:
5840   case AMDGPUISD::RSQ:
5841   case AMDGPUISD::RCP_LEGACY:
5842   case AMDGPUISD::RSQ_CLAMP: {
5843     if (SNaN)
5844       return true;
5845 
5846     // TODO: Need is known positive check.
5847     return false;
5848   }
5849   case ISD::FLDEXP:
5850   case AMDGPUISD::FRACT: {
5851     if (SNaN)
5852       return true;
5853     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5854   }
5855   case AMDGPUISD::DIV_SCALE:
5856   case AMDGPUISD::DIV_FMAS:
5857   case AMDGPUISD::DIV_FIXUP:
5858     // TODO: Refine on operands.
5859     return SNaN;
5860   case AMDGPUISD::SIN_HW:
5861   case AMDGPUISD::COS_HW: {
5862     // TODO: Need check for infinity
5863     return SNaN;
5864   }
5865   case ISD::INTRINSIC_WO_CHAIN: {
5866     unsigned IntrinsicID = Op.getConstantOperandVal(0);
5867     // TODO: Handle more intrinsics
5868     switch (IntrinsicID) {
5869     case Intrinsic::amdgcn_cubeid:
5870       return true;
5871 
5872     case Intrinsic::amdgcn_frexp_mant: {
5873       if (SNaN)
5874         return true;
5875       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5876     }
5877     case Intrinsic::amdgcn_cvt_pkrtz: {
5878       if (SNaN)
5879         return true;
5880       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5881              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5882     }
5883     case Intrinsic::amdgcn_rcp:
5884     case Intrinsic::amdgcn_rsq:
5885     case Intrinsic::amdgcn_rcp_legacy:
5886     case Intrinsic::amdgcn_rsq_legacy:
5887     case Intrinsic::amdgcn_rsq_clamp: {
5888       if (SNaN)
5889         return true;
5890 
5891       // TODO: Need is known positive check.
5892       return false;
5893     }
5894     case Intrinsic::amdgcn_trig_preop:
5895     case Intrinsic::amdgcn_fdot2:
5896       // TODO: Refine on operand
5897       return SNaN;
5898     case Intrinsic::amdgcn_fma_legacy:
5899       if (SNaN)
5900         return true;
5901       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5902              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
5903              DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
5904     default:
5905       return false;
5906     }
5907   }
5908   default:
5909     return false;
5910   }
5911 }
5912 
5913 bool AMDGPUTargetLowering::isReassocProfitable(MachineRegisterInfo &MRI,
5914                                                Register N0, Register N1) const {
5915   return MRI.hasOneNonDBGUse(N0); // FIXME: handle regbanks
5916 }
5917 
5918 TargetLowering::AtomicExpansionKind
5919 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
5920   switch (RMW->getOperation()) {
5921   case AtomicRMWInst::Nand:
5922   case AtomicRMWInst::FAdd:
5923   case AtomicRMWInst::FSub:
5924   case AtomicRMWInst::FMax:
5925   case AtomicRMWInst::FMin:
5926     return AtomicExpansionKind::CmpXChg;
5927   default: {
5928     if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
5929       unsigned Size = IntTy->getBitWidth();
5930       if (Size == 32 || Size == 64)
5931         return AtomicExpansionKind::None;
5932     }
5933 
5934     return AtomicExpansionKind::CmpXChg;
5935   }
5936   }
5937 }
5938 
5939 /// Whether it is profitable to sink the operands of an
5940 /// Instruction I to the basic block of I.
5941 /// This helps using several modifiers (like abs and neg) more often.
5942 bool AMDGPUTargetLowering::shouldSinkOperands(
5943     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
5944   using namespace PatternMatch;
5945 
5946   for (auto &Op : I->operands()) {
5947     // Ensure we are not already sinking this operand.
5948     if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
5949       continue;
5950 
5951     if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
5952       Ops.push_back(&Op);
5953   }
5954 
5955   return !Ops.empty();
5956 }
5957