xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLoweringBase class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RuntimeLibcallUtil.h"
32 #include "llvm/CodeGen/StackMaps.h"
33 #include "llvm/CodeGen/TargetLowering.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/ValueTypes.h"
37 #include "llvm/CodeGenTypes/MachineValueType.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetMachine.h"
54 #include "llvm/Target/TargetOptions.h"
55 #include "llvm/TargetParser/Triple.h"
56 #include "llvm/Transforms/Utils/SizeOpts.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <cstring>
61 #include <iterator>
62 #include <string>
63 #include <tuple>
64 #include <utility>
65 
66 using namespace llvm;
67 
68 static cl::opt<bool> JumpIsExpensiveOverride(
69     "jump-is-expensive", cl::init(false),
70     cl::desc("Do not create extra branches to split comparison logic."),
71     cl::Hidden);
72 
73 static cl::opt<unsigned> MinimumJumpTableEntries
74   ("min-jump-table-entries", cl::init(4), cl::Hidden,
75    cl::desc("Set minimum number of entries to use a jump table."));
76 
77 static cl::opt<unsigned> MaximumJumpTableSize
78   ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79    cl::desc("Set maximum size of jump tables."));
80 
81 /// Minimum jump table density for normal functions.
82 static cl::opt<unsigned>
83     JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84                      cl::desc("Minimum density for building a jump table in "
85                               "a normal function"));
86 
87 /// Minimum jump table density for -Os or -Oz functions.
88 static cl::opt<unsigned> OptsizeJumpTableDensity(
89     "optsize-jump-table-density", cl::init(40), cl::Hidden,
90     cl::desc("Minimum density for building a jump table in "
91              "an optsize function"));
92 
93 // FIXME: This option is only to test if the strict fp operation processed
94 // correctly by preventing mutating strict fp operation to normal fp operation
95 // during development. When the backend supports strict float operation, this
96 // option will be meaningless.
97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98        cl::desc("Don't mutate strict-float node to a legalize node"),
99        cl::init(false), cl::Hidden);
100 
101 /// GetFPLibCall - Helper to return the right libcall for the given floating
102 /// point type, or UNKNOWN_LIBCALL if there is none.
getFPLibCall(EVT VT,RTLIB::Libcall Call_F32,RTLIB::Libcall Call_F64,RTLIB::Libcall Call_F80,RTLIB::Libcall Call_F128,RTLIB::Libcall Call_PPCF128)103 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT,
104                                    RTLIB::Libcall Call_F32,
105                                    RTLIB::Libcall Call_F64,
106                                    RTLIB::Libcall Call_F80,
107                                    RTLIB::Libcall Call_F128,
108                                    RTLIB::Libcall Call_PPCF128) {
109   return
110     VT == MVT::f32 ? Call_F32 :
111     VT == MVT::f64 ? Call_F64 :
112     VT == MVT::f80 ? Call_F80 :
113     VT == MVT::f128 ? Call_F128 :
114     VT == MVT::ppcf128 ? Call_PPCF128 :
115     RTLIB::UNKNOWN_LIBCALL;
116 }
117 
118 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
119 /// UNKNOWN_LIBCALL if there is none.
getFPEXT(EVT OpVT,EVT RetVT)120 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
121   if (OpVT == MVT::f16) {
122     if (RetVT == MVT::f32)
123       return FPEXT_F16_F32;
124     if (RetVT == MVT::f64)
125       return FPEXT_F16_F64;
126     if (RetVT == MVT::f80)
127       return FPEXT_F16_F80;
128     if (RetVT == MVT::f128)
129       return FPEXT_F16_F128;
130   } else if (OpVT == MVT::f32) {
131     if (RetVT == MVT::f64)
132       return FPEXT_F32_F64;
133     if (RetVT == MVT::f128)
134       return FPEXT_F32_F128;
135     if (RetVT == MVT::ppcf128)
136       return FPEXT_F32_PPCF128;
137   } else if (OpVT == MVT::f64) {
138     if (RetVT == MVT::f128)
139       return FPEXT_F64_F128;
140     else if (RetVT == MVT::ppcf128)
141       return FPEXT_F64_PPCF128;
142   } else if (OpVT == MVT::f80) {
143     if (RetVT == MVT::f128)
144       return FPEXT_F80_F128;
145   } else if (OpVT == MVT::bf16) {
146     if (RetVT == MVT::f32)
147       return FPEXT_BF16_F32;
148   }
149 
150   return UNKNOWN_LIBCALL;
151 }
152 
153 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
154 /// UNKNOWN_LIBCALL if there is none.
getFPROUND(EVT OpVT,EVT RetVT)155 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
156   if (RetVT == MVT::f16) {
157     if (OpVT == MVT::f32)
158       return FPROUND_F32_F16;
159     if (OpVT == MVT::f64)
160       return FPROUND_F64_F16;
161     if (OpVT == MVT::f80)
162       return FPROUND_F80_F16;
163     if (OpVT == MVT::f128)
164       return FPROUND_F128_F16;
165     if (OpVT == MVT::ppcf128)
166       return FPROUND_PPCF128_F16;
167   } else if (RetVT == MVT::bf16) {
168     if (OpVT == MVT::f32)
169       return FPROUND_F32_BF16;
170     if (OpVT == MVT::f64)
171       return FPROUND_F64_BF16;
172     if (OpVT == MVT::f80)
173       return FPROUND_F80_BF16;
174     if (OpVT == MVT::f128)
175       return FPROUND_F128_BF16;
176   } else if (RetVT == MVT::f32) {
177     if (OpVT == MVT::f64)
178       return FPROUND_F64_F32;
179     if (OpVT == MVT::f80)
180       return FPROUND_F80_F32;
181     if (OpVT == MVT::f128)
182       return FPROUND_F128_F32;
183     if (OpVT == MVT::ppcf128)
184       return FPROUND_PPCF128_F32;
185   } else if (RetVT == MVT::f64) {
186     if (OpVT == MVT::f80)
187       return FPROUND_F80_F64;
188     if (OpVT == MVT::f128)
189       return FPROUND_F128_F64;
190     if (OpVT == MVT::ppcf128)
191       return FPROUND_PPCF128_F64;
192   } else if (RetVT == MVT::f80) {
193     if (OpVT == MVT::f128)
194       return FPROUND_F128_F80;
195   }
196 
197   return UNKNOWN_LIBCALL;
198 }
199 
200 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
201 /// UNKNOWN_LIBCALL if there is none.
getFPTOSINT(EVT OpVT,EVT RetVT)202 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
203   if (OpVT == MVT::f16) {
204     if (RetVT == MVT::i32)
205       return FPTOSINT_F16_I32;
206     if (RetVT == MVT::i64)
207       return FPTOSINT_F16_I64;
208     if (RetVT == MVT::i128)
209       return FPTOSINT_F16_I128;
210   } else if (OpVT == MVT::f32) {
211     if (RetVT == MVT::i32)
212       return FPTOSINT_F32_I32;
213     if (RetVT == MVT::i64)
214       return FPTOSINT_F32_I64;
215     if (RetVT == MVT::i128)
216       return FPTOSINT_F32_I128;
217   } else if (OpVT == MVT::f64) {
218     if (RetVT == MVT::i32)
219       return FPTOSINT_F64_I32;
220     if (RetVT == MVT::i64)
221       return FPTOSINT_F64_I64;
222     if (RetVT == MVT::i128)
223       return FPTOSINT_F64_I128;
224   } else if (OpVT == MVT::f80) {
225     if (RetVT == MVT::i32)
226       return FPTOSINT_F80_I32;
227     if (RetVT == MVT::i64)
228       return FPTOSINT_F80_I64;
229     if (RetVT == MVT::i128)
230       return FPTOSINT_F80_I128;
231   } else if (OpVT == MVT::f128) {
232     if (RetVT == MVT::i32)
233       return FPTOSINT_F128_I32;
234     if (RetVT == MVT::i64)
235       return FPTOSINT_F128_I64;
236     if (RetVT == MVT::i128)
237       return FPTOSINT_F128_I128;
238   } else if (OpVT == MVT::ppcf128) {
239     if (RetVT == MVT::i32)
240       return FPTOSINT_PPCF128_I32;
241     if (RetVT == MVT::i64)
242       return FPTOSINT_PPCF128_I64;
243     if (RetVT == MVT::i128)
244       return FPTOSINT_PPCF128_I128;
245   }
246   return UNKNOWN_LIBCALL;
247 }
248 
249 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
250 /// UNKNOWN_LIBCALL if there is none.
getFPTOUINT(EVT OpVT,EVT RetVT)251 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
252   if (OpVT == MVT::f16) {
253     if (RetVT == MVT::i32)
254       return FPTOUINT_F16_I32;
255     if (RetVT == MVT::i64)
256       return FPTOUINT_F16_I64;
257     if (RetVT == MVT::i128)
258       return FPTOUINT_F16_I128;
259   } else if (OpVT == MVT::f32) {
260     if (RetVT == MVT::i32)
261       return FPTOUINT_F32_I32;
262     if (RetVT == MVT::i64)
263       return FPTOUINT_F32_I64;
264     if (RetVT == MVT::i128)
265       return FPTOUINT_F32_I128;
266   } else if (OpVT == MVT::f64) {
267     if (RetVT == MVT::i32)
268       return FPTOUINT_F64_I32;
269     if (RetVT == MVT::i64)
270       return FPTOUINT_F64_I64;
271     if (RetVT == MVT::i128)
272       return FPTOUINT_F64_I128;
273   } else if (OpVT == MVT::f80) {
274     if (RetVT == MVT::i32)
275       return FPTOUINT_F80_I32;
276     if (RetVT == MVT::i64)
277       return FPTOUINT_F80_I64;
278     if (RetVT == MVT::i128)
279       return FPTOUINT_F80_I128;
280   } else if (OpVT == MVT::f128) {
281     if (RetVT == MVT::i32)
282       return FPTOUINT_F128_I32;
283     if (RetVT == MVT::i64)
284       return FPTOUINT_F128_I64;
285     if (RetVT == MVT::i128)
286       return FPTOUINT_F128_I128;
287   } else if (OpVT == MVT::ppcf128) {
288     if (RetVT == MVT::i32)
289       return FPTOUINT_PPCF128_I32;
290     if (RetVT == MVT::i64)
291       return FPTOUINT_PPCF128_I64;
292     if (RetVT == MVT::i128)
293       return FPTOUINT_PPCF128_I128;
294   }
295   return UNKNOWN_LIBCALL;
296 }
297 
298 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
299 /// UNKNOWN_LIBCALL if there is none.
getSINTTOFP(EVT OpVT,EVT RetVT)300 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
301   if (OpVT == MVT::i32) {
302     if (RetVT == MVT::f16)
303       return SINTTOFP_I32_F16;
304     if (RetVT == MVT::f32)
305       return SINTTOFP_I32_F32;
306     if (RetVT == MVT::f64)
307       return SINTTOFP_I32_F64;
308     if (RetVT == MVT::f80)
309       return SINTTOFP_I32_F80;
310     if (RetVT == MVT::f128)
311       return SINTTOFP_I32_F128;
312     if (RetVT == MVT::ppcf128)
313       return SINTTOFP_I32_PPCF128;
314   } else if (OpVT == MVT::i64) {
315     if (RetVT == MVT::bf16)
316       return SINTTOFP_I64_BF16;
317     if (RetVT == MVT::f16)
318       return SINTTOFP_I64_F16;
319     if (RetVT == MVT::f32)
320       return SINTTOFP_I64_F32;
321     if (RetVT == MVT::f64)
322       return SINTTOFP_I64_F64;
323     if (RetVT == MVT::f80)
324       return SINTTOFP_I64_F80;
325     if (RetVT == MVT::f128)
326       return SINTTOFP_I64_F128;
327     if (RetVT == MVT::ppcf128)
328       return SINTTOFP_I64_PPCF128;
329   } else if (OpVT == MVT::i128) {
330     if (RetVT == MVT::f16)
331       return SINTTOFP_I128_F16;
332     if (RetVT == MVT::f32)
333       return SINTTOFP_I128_F32;
334     if (RetVT == MVT::f64)
335       return SINTTOFP_I128_F64;
336     if (RetVT == MVT::f80)
337       return SINTTOFP_I128_F80;
338     if (RetVT == MVT::f128)
339       return SINTTOFP_I128_F128;
340     if (RetVT == MVT::ppcf128)
341       return SINTTOFP_I128_PPCF128;
342   }
343   return UNKNOWN_LIBCALL;
344 }
345 
346 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
347 /// UNKNOWN_LIBCALL if there is none.
getUINTTOFP(EVT OpVT,EVT RetVT)348 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
349   if (OpVT == MVT::i32) {
350     if (RetVT == MVT::f16)
351       return UINTTOFP_I32_F16;
352     if (RetVT == MVT::f32)
353       return UINTTOFP_I32_F32;
354     if (RetVT == MVT::f64)
355       return UINTTOFP_I32_F64;
356     if (RetVT == MVT::f80)
357       return UINTTOFP_I32_F80;
358     if (RetVT == MVT::f128)
359       return UINTTOFP_I32_F128;
360     if (RetVT == MVT::ppcf128)
361       return UINTTOFP_I32_PPCF128;
362   } else if (OpVT == MVT::i64) {
363     if (RetVT == MVT::bf16)
364       return UINTTOFP_I64_BF16;
365     if (RetVT == MVT::f16)
366       return UINTTOFP_I64_F16;
367     if (RetVT == MVT::f32)
368       return UINTTOFP_I64_F32;
369     if (RetVT == MVT::f64)
370       return UINTTOFP_I64_F64;
371     if (RetVT == MVT::f80)
372       return UINTTOFP_I64_F80;
373     if (RetVT == MVT::f128)
374       return UINTTOFP_I64_F128;
375     if (RetVT == MVT::ppcf128)
376       return UINTTOFP_I64_PPCF128;
377   } else if (OpVT == MVT::i128) {
378     if (RetVT == MVT::f16)
379       return UINTTOFP_I128_F16;
380     if (RetVT == MVT::f32)
381       return UINTTOFP_I128_F32;
382     if (RetVT == MVT::f64)
383       return UINTTOFP_I128_F64;
384     if (RetVT == MVT::f80)
385       return UINTTOFP_I128_F80;
386     if (RetVT == MVT::f128)
387       return UINTTOFP_I128_F128;
388     if (RetVT == MVT::ppcf128)
389       return UINTTOFP_I128_PPCF128;
390   }
391   return UNKNOWN_LIBCALL;
392 }
393 
getPOWI(EVT RetVT)394 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) {
395   return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
396                       POWI_PPCF128);
397 }
398 
getPOW(EVT RetVT)399 RTLIB::Libcall RTLIB::getPOW(EVT RetVT) {
400   return getFPLibCall(RetVT, POW_F32, POW_F64, POW_F80, POW_F128, POW_PPCF128);
401 }
402 
getLDEXP(EVT RetVT)403 RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) {
404   return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
405                       LDEXP_PPCF128);
406 }
407 
getFREXP(EVT RetVT)408 RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) {
409   return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
410                       FREXP_PPCF128);
411 }
412 
getSIN(EVT RetVT)413 RTLIB::Libcall RTLIB::getSIN(EVT RetVT) {
414   return getFPLibCall(RetVT, SIN_F32, SIN_F64, SIN_F80, SIN_F128, SIN_PPCF128);
415 }
416 
getCOS(EVT RetVT)417 RTLIB::Libcall RTLIB::getCOS(EVT RetVT) {
418   return getFPLibCall(RetVT, COS_F32, COS_F64, COS_F80, COS_F128, COS_PPCF128);
419 }
420 
getSINCOS(EVT RetVT)421 RTLIB::Libcall RTLIB::getSINCOS(EVT RetVT) {
422   return getFPLibCall(RetVT, SINCOS_F32, SINCOS_F64, SINCOS_F80, SINCOS_F128,
423                       SINCOS_PPCF128);
424 }
425 
getSINCOSPI(EVT RetVT)426 RTLIB::Libcall RTLIB::getSINCOSPI(EVT RetVT) {
427   return getFPLibCall(RetVT, SINCOSPI_F32, SINCOSPI_F64, SINCOSPI_F80,
428                       SINCOSPI_F128, SINCOSPI_PPCF128);
429 }
430 
getMODF(EVT RetVT)431 RTLIB::Libcall RTLIB::getMODF(EVT RetVT) {
432   return getFPLibCall(RetVT, MODF_F32, MODF_F64, MODF_F80, MODF_F128,
433                       MODF_PPCF128);
434 }
435 
getOutlineAtomicHelper(const Libcall (& LC)[5][4],AtomicOrdering Order,uint64_t MemSize)436 RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4],
437                                              AtomicOrdering Order,
438                                              uint64_t MemSize) {
439   unsigned ModeN, ModelN;
440   switch (MemSize) {
441   case 1:
442     ModeN = 0;
443     break;
444   case 2:
445     ModeN = 1;
446     break;
447   case 4:
448     ModeN = 2;
449     break;
450   case 8:
451     ModeN = 3;
452     break;
453   case 16:
454     ModeN = 4;
455     break;
456   default:
457     return RTLIB::UNKNOWN_LIBCALL;
458   }
459 
460   switch (Order) {
461   case AtomicOrdering::Monotonic:
462     ModelN = 0;
463     break;
464   case AtomicOrdering::Acquire:
465     ModelN = 1;
466     break;
467   case AtomicOrdering::Release:
468     ModelN = 2;
469     break;
470   case AtomicOrdering::AcquireRelease:
471   case AtomicOrdering::SequentiallyConsistent:
472     ModelN = 3;
473     break;
474   default:
475     return UNKNOWN_LIBCALL;
476   }
477 
478   return LC[ModeN][ModelN];
479 }
480 
getOUTLINE_ATOMIC(unsigned Opc,AtomicOrdering Order,MVT VT)481 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
482                                         MVT VT) {
483   if (!VT.isScalarInteger())
484     return UNKNOWN_LIBCALL;
485   uint64_t MemSize = VT.getScalarSizeInBits() / 8;
486 
487 #define LCALLS(A, B)                                                           \
488   { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
489 #define LCALL5(A)                                                              \
490   LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
491   switch (Opc) {
492   case ISD::ATOMIC_CMP_SWAP: {
493     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
494     return getOutlineAtomicHelper(LC, Order, MemSize);
495   }
496   case ISD::ATOMIC_SWAP: {
497     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
498     return getOutlineAtomicHelper(LC, Order, MemSize);
499   }
500   case ISD::ATOMIC_LOAD_ADD: {
501     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
502     return getOutlineAtomicHelper(LC, Order, MemSize);
503   }
504   case ISD::ATOMIC_LOAD_OR: {
505     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
506     return getOutlineAtomicHelper(LC, Order, MemSize);
507   }
508   case ISD::ATOMIC_LOAD_CLR: {
509     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
510     return getOutlineAtomicHelper(LC, Order, MemSize);
511   }
512   case ISD::ATOMIC_LOAD_XOR: {
513     const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
514     return getOutlineAtomicHelper(LC, Order, MemSize);
515   }
516   default:
517     return UNKNOWN_LIBCALL;
518   }
519 #undef LCALLS
520 #undef LCALL5
521 }
522 
getSYNC(unsigned Opc,MVT VT)523 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
524 #define OP_TO_LIBCALL(Name, Enum)                                              \
525   case Name:                                                                   \
526     switch (VT.SimpleTy) {                                                     \
527     default:                                                                   \
528       return UNKNOWN_LIBCALL;                                                  \
529     case MVT::i8:                                                              \
530       return Enum##_1;                                                         \
531     case MVT::i16:                                                             \
532       return Enum##_2;                                                         \
533     case MVT::i32:                                                             \
534       return Enum##_4;                                                         \
535     case MVT::i64:                                                             \
536       return Enum##_8;                                                         \
537     case MVT::i128:                                                            \
538       return Enum##_16;                                                        \
539     }
540 
541   switch (Opc) {
542     OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
543     OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
544     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
545     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
546     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
547     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
548     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
549     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
550     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
551     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
552     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
553     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
554   }
555 
556 #undef OP_TO_LIBCALL
557 
558   return UNKNOWN_LIBCALL;
559 }
560 
getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)561 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
562   switch (ElementSize) {
563   case 1:
564     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
565   case 2:
566     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
567   case 4:
568     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
569   case 8:
570     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
571   case 16:
572     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
573   default:
574     return UNKNOWN_LIBCALL;
575   }
576 }
577 
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)578 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
579   switch (ElementSize) {
580   case 1:
581     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
582   case 2:
583     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
584   case 4:
585     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
586   case 8:
587     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
588   case 16:
589     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
590   default:
591     return UNKNOWN_LIBCALL;
592   }
593 }
594 
getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)595 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
596   switch (ElementSize) {
597   case 1:
598     return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
599   case 2:
600     return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
601   case 4:
602     return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
603   case 8:
604     return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
605   case 16:
606     return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
607   default:
608     return UNKNOWN_LIBCALL;
609   }
610 }
611 
getSoftFloatCmpLibcallPredicate(RTLIB::LibcallImpl Impl) const612 ISD::CondCode TargetLoweringBase::getSoftFloatCmpLibcallPredicate(
613     RTLIB::LibcallImpl Impl) const {
614   switch (Impl) {
615   case RTLIB::__aeabi_dcmpeq__une:
616   case RTLIB::__aeabi_fcmpeq__une:
617     // Usage in the eq case, so we have to invert the comparison.
618     return ISD::SETEQ;
619   case RTLIB::__aeabi_dcmpeq__oeq:
620   case RTLIB::__aeabi_fcmpeq__oeq:
621     // Normal comparison to boolean value.
622     return ISD::SETNE;
623   case RTLIB::__aeabi_dcmplt:
624   case RTLIB::__aeabi_dcmple:
625   case RTLIB::__aeabi_dcmpge:
626   case RTLIB::__aeabi_dcmpgt:
627   case RTLIB::__aeabi_dcmpun:
628   case RTLIB::__aeabi_fcmplt:
629   case RTLIB::__aeabi_fcmple:
630   case RTLIB::__aeabi_fcmpge:
631   case RTLIB::__aeabi_fcmpgt:
632     /// The AEABI versions return a typical boolean value, so we can compare
633     /// against the integer result as simply != 0.
634     return ISD::SETNE;
635   default:
636     break;
637   }
638 
639   // Assume libgcc/compiler-rt behavior. Most of the cases are really aliases of
640   // each other, and return a 3-way comparison style result of -1, 0, or 1
641   // depending on lt/eq/gt.
642   //
643   // FIXME: It would be cleaner to directly express this as a 3-way comparison
644   // soft FP libcall instead of individual compares.
645   RTLIB::Libcall LC = RTLIB::RuntimeLibcallsInfo::getLibcallFromImpl(Impl);
646   switch (LC) {
647   case RTLIB::OEQ_F32:
648   case RTLIB::OEQ_F64:
649   case RTLIB::OEQ_F128:
650   case RTLIB::OEQ_PPCF128:
651     return ISD::SETEQ;
652   case RTLIB::UNE_F32:
653   case RTLIB::UNE_F64:
654   case RTLIB::UNE_F128:
655   case RTLIB::UNE_PPCF128:
656     return ISD::SETNE;
657   case RTLIB::OGE_F32:
658   case RTLIB::OGE_F64:
659   case RTLIB::OGE_F128:
660   case RTLIB::OGE_PPCF128:
661     return ISD::SETGE;
662   case RTLIB::OLT_F32:
663   case RTLIB::OLT_F64:
664   case RTLIB::OLT_F128:
665   case RTLIB::OLT_PPCF128:
666     return ISD::SETLT;
667   case RTLIB::OLE_F32:
668   case RTLIB::OLE_F64:
669   case RTLIB::OLE_F128:
670   case RTLIB::OLE_PPCF128:
671     return ISD::SETLE;
672   case RTLIB::OGT_F32:
673   case RTLIB::OGT_F64:
674   case RTLIB::OGT_F128:
675   case RTLIB::OGT_PPCF128:
676     return ISD::SETGT;
677   case RTLIB::UO_F32:
678   case RTLIB::UO_F64:
679   case RTLIB::UO_F128:
680   case RTLIB::UO_PPCF128:
681     return ISD::SETNE;
682   default:
683     llvm_unreachable("not a compare libcall");
684   }
685 }
686 
687 /// NOTE: The TargetMachine owns TLOF.
TargetLoweringBase(const TargetMachine & tm)688 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm)
689     : TM(tm), Libcalls(TM.getTargetTriple(), TM.Options.ExceptionModel,
690                        TM.Options.FloatABIType, TM.Options.EABIVersion,
691                        TM.Options.MCOptions.getABIName()) {
692   initActions();
693 
694   // Perform these initializations only once.
695   MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
696       MaxLoadsPerMemcmp = 8;
697   MaxGluedStoresPerMemcpy = 0;
698   MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
699       MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
700   HasMultipleConditionRegisters = false;
701   HasExtractBitsInsn = false;
702   JumpIsExpensive = JumpIsExpensiveOverride;
703   PredictableSelectIsExpensive = false;
704   EnableExtLdPromotion = false;
705   StackPointerRegisterToSaveRestore = 0;
706   BooleanContents = UndefinedBooleanContent;
707   BooleanFloatContents = UndefinedBooleanContent;
708   BooleanVectorContents = UndefinedBooleanContent;
709   SchedPreferenceInfo = Sched::ILP;
710   GatherAllAliasesMaxDepth = 18;
711   IsStrictFPEnabled = DisableStrictNodeMutation;
712   MaxBytesForAlignment = 0;
713   MaxAtomicSizeInBitsSupported = 0;
714 
715   // Assume that even with libcalls, no target supports wider than 128 bit
716   // division.
717   MaxDivRemBitWidthSupported = 128;
718 
719   MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
720 
721   MinCmpXchgSizeInBits = 0;
722   SupportsUnalignedAtomics = false;
723 }
724 
725 // Define the virtual destructor out-of-line to act as a key method to anchor
726 // debug info (see coding standards).
727 TargetLoweringBase::~TargetLoweringBase() = default;
728 
initActions()729 void TargetLoweringBase::initActions() {
730   // All operations default to being supported.
731   memset(OpActions, 0, sizeof(OpActions));
732   memset(LoadExtActions, 0, sizeof(LoadExtActions));
733   memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
734   memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
735   memset(CondCodeActions, 0, sizeof(CondCodeActions));
736   llvm::fill(RegClassForVT, nullptr);
737   llvm::fill(TargetDAGCombineArray, 0);
738 
739   // Let extending atomic loads be unsupported by default.
740   for (MVT ValVT : MVT::all_valuetypes())
741     for (MVT MemVT : MVT::all_valuetypes())
742       setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT, MemVT,
743                              Expand);
744 
745   // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
746   // remove this and targets should individually set these types if not legal.
747   for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END,
748                                    force_iteration_on_noniterable_enum)) {
749     for (MVT VT : {MVT::i2, MVT::i4})
750       OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
751   }
752   for (MVT AVT : MVT::all_valuetypes()) {
753     for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
754       setTruncStoreAction(AVT, VT, Expand);
755       setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand);
756       setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand);
757     }
758   }
759   for (unsigned IM = (unsigned)ISD::PRE_INC;
760        IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
761     for (MVT VT : {MVT::i2, MVT::i4}) {
762       setIndexedLoadAction(IM, VT, Expand);
763       setIndexedStoreAction(IM, VT, Expand);
764       setIndexedMaskedLoadAction(IM, VT, Expand);
765       setIndexedMaskedStoreAction(IM, VT, Expand);
766     }
767   }
768 
769   for (MVT VT : MVT::fp_valuetypes()) {
770     MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
771     if (IntVT.isValid()) {
772       setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
773       AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
774     }
775   }
776 
777   // Set default actions for various operations.
778   for (MVT VT : MVT::all_valuetypes()) {
779     // Default all indexed load / store to expand.
780     for (unsigned IM = (unsigned)ISD::PRE_INC;
781          IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
782       setIndexedLoadAction(IM, VT, Expand);
783       setIndexedStoreAction(IM, VT, Expand);
784       setIndexedMaskedLoadAction(IM, VT, Expand);
785       setIndexedMaskedStoreAction(IM, VT, Expand);
786     }
787 
788     // Most backends expect to see the node which just returns the value loaded.
789     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
790 
791     // These operations default to expand.
792     setOperationAction({ISD::FGETSIGN,       ISD::CONCAT_VECTORS,
793                         ISD::FMINNUM,        ISD::FMAXNUM,
794                         ISD::FMINNUM_IEEE,   ISD::FMAXNUM_IEEE,
795                         ISD::FMINIMUM,       ISD::FMAXIMUM,
796                         ISD::FMINIMUMNUM,    ISD::FMAXIMUMNUM,
797                         ISD::FMAD,           ISD::SMIN,
798                         ISD::SMAX,           ISD::UMIN,
799                         ISD::UMAX,           ISD::ABS,
800                         ISD::FSHL,           ISD::FSHR,
801                         ISD::SADDSAT,        ISD::UADDSAT,
802                         ISD::SSUBSAT,        ISD::USUBSAT,
803                         ISD::SSHLSAT,        ISD::USHLSAT,
804                         ISD::SMULFIX,        ISD::SMULFIXSAT,
805                         ISD::UMULFIX,        ISD::UMULFIXSAT,
806                         ISD::SDIVFIX,        ISD::SDIVFIXSAT,
807                         ISD::UDIVFIX,        ISD::UDIVFIXSAT,
808                         ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
809                         ISD::IS_FPCLASS,     ISD::FCBRT,
810                         ISD::FLOG,           ISD::FLOG2,
811                         ISD::FLOG10,         ISD::FEXP,
812                         ISD::FEXP2,          ISD::FEXP10,
813                         ISD::FFLOOR,         ISD::FNEARBYINT,
814                         ISD::FCEIL,          ISD::FRINT,
815                         ISD::FTRUNC,         ISD::FROUNDEVEN,
816                         ISD::FTAN,           ISD::FACOS,
817                         ISD::FASIN,          ISD::FATAN,
818                         ISD::FCOSH,          ISD::FSINH,
819                         ISD::FTANH,          ISD::FATAN2},
820                        VT, Expand);
821 
822     // Overflow operations default to expand
823     setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO,
824                         ISD::SMULO, ISD::UMULO},
825                        VT, Expand);
826 
827     // Carry-using overflow operations default to expand.
828     setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY,
829                         ISD::SADDO_CARRY, ISD::SSUBO_CARRY},
830                        VT, Expand);
831 
832     // ADDC/ADDE/SUBC/SUBE default to expand.
833     setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT,
834                        Expand);
835 
836     // [US]CMP default to expand
837     setOperationAction({ISD::UCMP, ISD::SCMP}, VT, Expand);
838 
839     // Halving adds
840     setOperationAction(
841         {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT,
842         Expand);
843 
844     // Absolute difference
845     setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand);
846 
847     // Saturated trunc
848     setOperationAction(ISD::TRUNCATE_SSAT_S, VT, Expand);
849     setOperationAction(ISD::TRUNCATE_SSAT_U, VT, Expand);
850     setOperationAction(ISD::TRUNCATE_USAT_U, VT, Expand);
851 
852     // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
853     setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
854                        Expand);
855 
856     setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand);
857 
858     // These library functions default to expand.
859     setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP,
860                         ISD::FSINCOS, ISD::FSINCOSPI, ISD::FMODF},
861                        VT, Expand);
862 
863     // These operations default to expand for vector types.
864     if (VT.isVector())
865       setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG,
866                           ISD::ANY_EXTEND_VECTOR_INREG,
867                           ISD::SIGN_EXTEND_VECTOR_INREG,
868                           ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR,
869                           ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND},
870                          VT, Expand);
871 
872       // Constrained floating-point operations default to expand.
873 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
874     setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
875 #include "llvm/IR/ConstrainedOps.def"
876 
877     // For most targets @llvm.get.dynamic.area.offset just returns 0.
878     setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
879 
880     // Vector reduction default to expand.
881     setOperationAction(
882         {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD,
883          ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
884          ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
885          ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX,
886          ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM,
887          ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL},
888         VT, Expand);
889 
890     // Named vector shuffles default to expand.
891     setOperationAction(ISD::VECTOR_SPLICE, VT, Expand);
892 
893     // Only some target support this vector operation. Most need to expand it.
894     setOperationAction(ISD::VECTOR_COMPRESS, VT, Expand);
895 
896     // VP operations default to expand.
897 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...)                                   \
898     setOperationAction(ISD::SDOPC, VT, Expand);
899 #include "llvm/IR/VPIntrinsics.def"
900 
901     // Masked vector extracts default to expand.
902     setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Expand);
903 
904     // FP environment operations default to expand.
905     setOperationAction(ISD::GET_FPENV, VT, Expand);
906     setOperationAction(ISD::SET_FPENV, VT, Expand);
907     setOperationAction(ISD::RESET_FPENV, VT, Expand);
908   }
909 
910   // Most targets ignore the @llvm.prefetch intrinsic.
911   setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
912 
913   // Most targets also ignore the @llvm.readcyclecounter intrinsic.
914   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
915 
916   // Most targets also ignore the @llvm.readsteadycounter intrinsic.
917   setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Expand);
918 
919   // ConstantFP nodes default to expand.  Targets can either change this to
920   // Legal, in which case all fp constants are legal, or use isFPImmLegal()
921   // to optimize expansions for certain constants.
922   setOperationAction(ISD::ConstantFP,
923                      {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
924                      Expand);
925 
926   // Insert custom handling default for llvm.canonicalize.*.
927   setOperationAction(ISD::FCANONICALIZE,
928                      {MVT::f16, MVT::f32, MVT::f64, MVT::f128}, Expand);
929 
930   // FIXME: Query RuntimeLibCalls to make the decision.
931   setOperationAction({ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND},
932                      {MVT::f32, MVT::f64, MVT::f128}, LibCall);
933 
934   setOperationAction({ISD::FTAN, ISD::FACOS, ISD::FASIN, ISD::FATAN, ISD::FCOSH,
935                       ISD::FSINH, ISD::FTANH, ISD::FATAN2},
936                      MVT::f16, Promote);
937   // Default ISD::TRAP to expand (which turns it into abort).
938   setOperationAction(ISD::TRAP, MVT::Other, Expand);
939 
940   // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
941   // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
942   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
943 
944   setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
945 
946   setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand);
947   setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand);
948 
949   for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
950     setOperationAction(ISD::GET_FPMODE, VT, Expand);
951     setOperationAction(ISD::SET_FPMODE, VT, Expand);
952   }
953   setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand);
954 
955   // This one by default will call __clear_cache unless the target
956   // wants something different.
957   setOperationAction(ISD::CLEAR_CACHE, MVT::Other, LibCall);
958 }
959 
getScalarShiftAmountTy(const DataLayout & DL,EVT) const960 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
961                                                EVT) const {
962   return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
963 }
964 
getShiftAmountTy(EVT LHSTy,const DataLayout & DL) const965 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
966                                          const DataLayout &DL) const {
967   assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
968   if (LHSTy.isVector())
969     return LHSTy;
970   MVT ShiftVT = getScalarShiftAmountTy(DL, LHSTy);
971   // If any possible shift value won't fit in the prefered type, just use
972   // something safe. Assume it will be legalized when the shift is expanded.
973   if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
974     ShiftVT = MVT::i32;
975   assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
976          "ShiftVT is still too small!");
977   return ShiftVT;
978 }
979 
canOpTrap(unsigned Op,EVT VT) const980 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
981   assert(isTypeLegal(VT));
982   switch (Op) {
983   default:
984     return false;
985   case ISD::SDIV:
986   case ISD::UDIV:
987   case ISD::SREM:
988   case ISD::UREM:
989     return true;
990   }
991 }
992 
isFreeAddrSpaceCast(unsigned SrcAS,unsigned DestAS) const993 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
994                                              unsigned DestAS) const {
995   return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
996 }
997 
getBitWidthForCttzElements(Type * RetTy,ElementCount EC,bool ZeroIsPoison,const ConstantRange * VScaleRange) const998 unsigned TargetLoweringBase::getBitWidthForCttzElements(
999     Type *RetTy, ElementCount EC, bool ZeroIsPoison,
1000     const ConstantRange *VScaleRange) const {
1001   // Find the smallest "sensible" element type to use for the expansion.
1002   ConstantRange CR(APInt(64, EC.getKnownMinValue()));
1003   if (EC.isScalable())
1004     CR = CR.umul_sat(*VScaleRange);
1005 
1006   if (ZeroIsPoison)
1007     CR = CR.subtract(APInt(64, 1));
1008 
1009   unsigned EltWidth = RetTy->getScalarSizeInBits();
1010   EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
1011   EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
1012 
1013   return EltWidth;
1014 }
1015 
setJumpIsExpensive(bool isExpensive)1016 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
1017   // If the command-line option was specified, ignore this request.
1018   if (!JumpIsExpensiveOverride.getNumOccurrences())
1019     JumpIsExpensive = isExpensive;
1020 }
1021 
1022 TargetLoweringBase::LegalizeKind
getTypeConversion(LLVMContext & Context,EVT VT) const1023 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
1024   // If this is a simple type, use the ComputeRegisterProp mechanism.
1025   if (VT.isSimple()) {
1026     MVT SVT = VT.getSimpleVT();
1027     assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
1028     MVT NVT = TransformToType[SVT.SimpleTy];
1029     LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1030 
1031     assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1032             LA == TypeSoftPromoteHalf ||
1033             (NVT.isVector() ||
1034              ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
1035            "Promote may not follow Expand or Promote");
1036 
1037     if (LA == TypeSplitVector)
1038       return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
1039     if (LA == TypeScalarizeVector)
1040       return LegalizeKind(LA, SVT.getVectorElementType());
1041     return LegalizeKind(LA, NVT);
1042   }
1043 
1044   // Handle Extended Scalar Types.
1045   if (!VT.isVector()) {
1046     assert(VT.isInteger() && "Float types must be simple");
1047     unsigned BitSize = VT.getSizeInBits();
1048     // First promote to a power-of-two size, then expand if necessary.
1049     if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1050       EVT NVT = VT.getRoundIntegerType(Context);
1051       assert(NVT != VT && "Unable to round integer VT");
1052       LegalizeKind NextStep = getTypeConversion(Context, NVT);
1053       // Avoid multi-step promotion.
1054       if (NextStep.first == TypePromoteInteger)
1055         return NextStep;
1056       // Return rounded integer type.
1057       return LegalizeKind(TypePromoteInteger, NVT);
1058     }
1059 
1060     return LegalizeKind(TypeExpandInteger,
1061                         EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1062   }
1063 
1064   // Handle vector types.
1065   ElementCount NumElts = VT.getVectorElementCount();
1066   EVT EltVT = VT.getVectorElementType();
1067 
1068   // Vectors with only one element are always scalarized.
1069   if (NumElts.isScalar())
1070     return LegalizeKind(TypeScalarizeVector, EltVT);
1071 
1072   // Try to widen vector elements until the element type is a power of two and
1073   // promote it to a legal type later on, for example:
1074   // <3 x i8> -> <4 x i8> -> <4 x i32>
1075   if (EltVT.isInteger()) {
1076     // Vectors with a number of elements that is not a power of two are always
1077     // widened, for example <3 x i8> -> <4 x i8>.
1078     if (!VT.isPow2VectorType()) {
1079       NumElts = NumElts.coefficientNextPowerOf2();
1080       EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1081       return LegalizeKind(TypeWidenVector, NVT);
1082     }
1083 
1084     // Examine the element type.
1085     LegalizeKind LK = getTypeConversion(Context, EltVT);
1086 
1087     // If type is to be expanded, split the vector.
1088     //  <4 x i140> -> <2 x i140>
1089     if (LK.first == TypeExpandInteger) {
1090       if (NumElts.isScalable() && NumElts.getKnownMinValue() == 1)
1091         return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1092       return LegalizeKind(TypeSplitVector,
1093                           VT.getHalfNumVectorElementsVT(Context));
1094     }
1095 
1096     // Promote the integer element types until a legal vector type is found
1097     // or until the element integer type is too big. If a legal type was not
1098     // found, fallback to the usual mechanism of widening/splitting the
1099     // vector.
1100     EVT OldEltVT = EltVT;
1101     while (true) {
1102       // Increase the bitwidth of the element to the next pow-of-two
1103       // (which is greater than 8 bits).
1104       EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1105                   .getRoundIntegerType(Context);
1106 
1107       // Stop trying when getting a non-simple element type.
1108       // Note that vector elements may be greater than legal vector element
1109       // types. Example: X86 XMM registers hold 64bit element on 32bit
1110       // systems.
1111       if (!EltVT.isSimple())
1112         break;
1113 
1114       // Build a new vector type and check if it is legal.
1115       MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1116       // Found a legal promoted vector type.
1117       if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1118         return LegalizeKind(TypePromoteInteger,
1119                             EVT::getVectorVT(Context, EltVT, NumElts));
1120     }
1121 
1122     // Reset the type to the unexpanded type if we did not find a legal vector
1123     // type with a promoted vector element type.
1124     EltVT = OldEltVT;
1125   }
1126 
1127   // Try to widen the vector until a legal type is found.
1128   // If there is no wider legal type, split the vector.
1129   while (true) {
1130     // Round up to the next power of 2.
1131     NumElts = NumElts.coefficientNextPowerOf2();
1132 
1133     // If there is no simple vector type with this many elements then there
1134     // cannot be a larger legal vector type.  Note that this assumes that
1135     // there are no skipped intermediate vector types in the simple types.
1136     if (!EltVT.isSimple())
1137       break;
1138     MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1139     if (LargerVector == MVT())
1140       break;
1141 
1142     // If this type is legal then widen the vector.
1143     if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1144       return LegalizeKind(TypeWidenVector, LargerVector);
1145   }
1146 
1147   // Widen odd vectors to next power of two.
1148   if (!VT.isPow2VectorType()) {
1149     EVT NVT = VT.getPow2VectorType(Context);
1150     return LegalizeKind(TypeWidenVector, NVT);
1151   }
1152 
1153   if (VT.getVectorElementCount() == ElementCount::getScalable(1))
1154     return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1155 
1156   // Vectors with illegal element types are expanded.
1157   EVT NVT = EVT::getVectorVT(Context, EltVT,
1158                              VT.getVectorElementCount().divideCoefficientBy(2));
1159   return LegalizeKind(TypeSplitVector, NVT);
1160 }
1161 
getVectorTypeBreakdownMVT(MVT VT,MVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT,TargetLoweringBase * TLI)1162 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1163                                           unsigned &NumIntermediates,
1164                                           MVT &RegisterVT,
1165                                           TargetLoweringBase *TLI) {
1166   // Figure out the right, legal destination reg to copy into.
1167   ElementCount EC = VT.getVectorElementCount();
1168   MVT EltTy = VT.getVectorElementType();
1169 
1170   unsigned NumVectorRegs = 1;
1171 
1172   // Scalable vectors cannot be scalarized, so splitting or widening is
1173   // required.
1174   if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1175     llvm_unreachable(
1176         "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1177 
1178   // FIXME: We don't support non-power-of-2-sized vectors for now.
1179   // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1180   if (!isPowerOf2_32(EC.getKnownMinValue())) {
1181     // Split EC to unit size (scalable property is preserved).
1182     NumVectorRegs = EC.getKnownMinValue();
1183     EC = ElementCount::getFixed(1);
1184   }
1185 
1186   // Divide the input until we get to a supported size. This will
1187   // always end up with an EC that represent a scalar or a scalable
1188   // scalar.
1189   while (EC.getKnownMinValue() > 1 &&
1190          !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1191     EC = EC.divideCoefficientBy(2);
1192     NumVectorRegs <<= 1;
1193   }
1194 
1195   NumIntermediates = NumVectorRegs;
1196 
1197   MVT NewVT = MVT::getVectorVT(EltTy, EC);
1198   if (!TLI->isTypeLegal(NewVT))
1199     NewVT = EltTy;
1200   IntermediateVT = NewVT;
1201 
1202   unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1203 
1204   // Convert sizes such as i33 to i64.
1205   LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1206 
1207   MVT DestVT = TLI->getRegisterType(NewVT);
1208   RegisterVT = DestVT;
1209   if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
1210     return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1211 
1212   // Otherwise, promotion or legal types use the same number of registers as
1213   // the vector decimated to the appropriate level.
1214   return NumVectorRegs;
1215 }
1216 
1217 /// isLegalRC - Return true if the value types that can be represented by the
1218 /// specified register class are all legal.
isLegalRC(const TargetRegisterInfo & TRI,const TargetRegisterClass & RC) const1219 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1220                                    const TargetRegisterClass &RC) const {
1221   for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1222     if (isTypeLegal(*I))
1223       return true;
1224   return false;
1225 }
1226 
1227 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1228 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1229 MachineBasicBlock *
emitPatchPoint(MachineInstr & InitialMI,MachineBasicBlock * MBB) const1230 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1231                                    MachineBasicBlock *MBB) const {
1232   MachineInstr *MI = &InitialMI;
1233   MachineFunction &MF = *MI->getMF();
1234   MachineFrameInfo &MFI = MF.getFrameInfo();
1235 
1236   // We're handling multiple types of operands here:
1237   // PATCHPOINT MetaArgs - live-in, read only, direct
1238   // STATEPOINT Deopt Spill - live-through, read only, indirect
1239   // STATEPOINT Deopt Alloca - live-through, read only, direct
1240   // (We're currently conservative and mark the deopt slots read/write in
1241   // practice.)
1242   // STATEPOINT GC Spill - live-through, read/write, indirect
1243   // STATEPOINT GC Alloca - live-through, read/write, direct
1244   // The live-in vs live-through is handled already (the live through ones are
1245   // all stack slots), but we need to handle the different type of stackmap
1246   // operands and memory effects here.
1247 
1248   if (llvm::none_of(MI->operands(),
1249                     [](MachineOperand &Operand) { return Operand.isFI(); }))
1250     return MBB;
1251 
1252   MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1253 
1254   // Inherit previous memory operands.
1255   MIB.cloneMemRefs(*MI);
1256 
1257   for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1258     MachineOperand &MO = MI->getOperand(i);
1259     if (!MO.isFI()) {
1260       // Index of Def operand this Use it tied to.
1261       // Since Defs are coming before Uses, if Use is tied, then
1262       // index of Def must be smaller that index of that Use.
1263       // Also, Defs preserve their position in new MI.
1264       unsigned TiedTo = i;
1265       if (MO.isReg() && MO.isTied())
1266         TiedTo = MI->findTiedOperandIdx(i);
1267       MIB.add(MO);
1268       if (TiedTo < i)
1269         MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1270       continue;
1271     }
1272 
1273     // foldMemoryOperand builds a new MI after replacing a single FI operand
1274     // with the canonical set of five x86 addressing-mode operands.
1275     int FI = MO.getIndex();
1276 
1277     // Add frame index operands recognized by stackmaps.cpp
1278     if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1279       // indirect-mem-ref tag, size, #FI, offset.
1280       // Used for spills inserted by StatepointLowering.  This codepath is not
1281       // used for patchpoints/stackmaps at all, for these spilling is done via
1282       // foldMemoryOperand callback only.
1283       assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1284       MIB.addImm(StackMaps::IndirectMemRefOp);
1285       MIB.addImm(MFI.getObjectSize(FI));
1286       MIB.add(MO);
1287       MIB.addImm(0);
1288     } else {
1289       // direct-mem-ref tag, #FI, offset.
1290       // Used by patchpoint, and direct alloca arguments to statepoints
1291       MIB.addImm(StackMaps::DirectMemRefOp);
1292       MIB.add(MO);
1293       MIB.addImm(0);
1294     }
1295 
1296     assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1297 
1298     // Add a new memory operand for this FI.
1299     assert(MFI.getObjectOffset(FI) != -1);
1300 
1301     // Note: STATEPOINT MMOs are added during SelectionDAG.  STACKMAP, and
1302     // PATCHPOINT should be updated to do the same. (TODO)
1303     if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1304       auto Flags = MachineMemOperand::MOLoad;
1305       MachineMemOperand *MMO = MF.getMachineMemOperand(
1306           MachinePointerInfo::getFixedStack(MF, FI), Flags,
1307           MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1308       MIB->addMemOperand(MF, MMO);
1309     }
1310   }
1311   MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1312   MI->eraseFromParent();
1313   return MBB;
1314 }
1315 
1316 /// findRepresentativeClass - Return the largest legal super-reg register class
1317 /// of the register class for the specified type and its associated "cost".
1318 // This function is in TargetLowering because it uses RegClassForVT which would
1319 // need to be moved to TargetRegisterInfo and would necessitate moving
1320 // isTypeLegal over as well - a massive change that would just require
1321 // TargetLowering having a TargetRegisterInfo class member that it would use.
1322 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const1323 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1324                                             MVT VT) const {
1325   const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1326   if (!RC)
1327     return std::make_pair(RC, 0);
1328 
1329   // Compute the set of all super-register classes.
1330   BitVector SuperRegRC(TRI->getNumRegClasses());
1331   for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1332     SuperRegRC.setBitsInMask(RCI.getMask());
1333 
1334   // Find the first legal register class with the largest spill size.
1335   const TargetRegisterClass *BestRC = RC;
1336   for (unsigned i : SuperRegRC.set_bits()) {
1337     const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1338     // We want the largest possible spill size.
1339     if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1340       continue;
1341     if (!isLegalRC(*TRI, *SuperRC))
1342       continue;
1343     BestRC = SuperRC;
1344   }
1345   return std::make_pair(BestRC, 1);
1346 }
1347 
1348 /// computeRegisterProperties - Once all of the register classes are added,
1349 /// this allows us to compute derived properties we expose.
computeRegisterProperties(const TargetRegisterInfo * TRI)1350 void TargetLoweringBase::computeRegisterProperties(
1351     const TargetRegisterInfo *TRI) {
1352   // Everything defaults to needing one register.
1353   for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1354     NumRegistersForVT[i] = 1;
1355     RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1356   }
1357   // ...except isVoid, which doesn't need any registers.
1358   NumRegistersForVT[MVT::isVoid] = 0;
1359 
1360   // Find the largest integer register class.
1361   unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1362   for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1363     assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1364 
1365   // Every integer value type larger than this largest register takes twice as
1366   // many registers to represent as the previous ValueType.
1367   for (unsigned ExpandedReg = LargestIntReg + 1;
1368        ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1369     NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1370     RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1371     TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1372     ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1373                                    TypeExpandInteger);
1374   }
1375 
1376   // Inspect all of the ValueType's smaller than the largest integer
1377   // register to see which ones need promotion.
1378   unsigned LegalIntReg = LargestIntReg;
1379   for (unsigned IntReg = LargestIntReg - 1;
1380        IntReg >= (unsigned)MVT::i1; --IntReg) {
1381     MVT IVT = (MVT::SimpleValueType)IntReg;
1382     if (isTypeLegal(IVT)) {
1383       LegalIntReg = IntReg;
1384     } else {
1385       RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1386         (MVT::SimpleValueType)LegalIntReg;
1387       ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1388     }
1389   }
1390 
1391   // ppcf128 type is really two f64's.
1392   if (!isTypeLegal(MVT::ppcf128)) {
1393     if (isTypeLegal(MVT::f64)) {
1394       NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1395       RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1396       TransformToType[MVT::ppcf128] = MVT::f64;
1397       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1398     } else {
1399       NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1400       RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1401       TransformToType[MVT::ppcf128] = MVT::i128;
1402       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1403     }
1404   }
1405 
1406   // Decide how to handle f128. If the target does not have native f128 support,
1407   // expand it to i128 and we will be generating soft float library calls.
1408   if (!isTypeLegal(MVT::f128)) {
1409     NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1410     RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1411     TransformToType[MVT::f128] = MVT::i128;
1412     ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1413   }
1414 
1415   // Decide how to handle f80. If the target does not have native f80 support,
1416   // expand it to i96 and we will be generating soft float library calls.
1417   if (!isTypeLegal(MVT::f80)) {
1418     NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1419     RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1420     TransformToType[MVT::f80] = MVT::i32;
1421     ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1422   }
1423 
1424   // Decide how to handle f64. If the target does not have native f64 support,
1425   // expand it to i64 and we will be generating soft float library calls.
1426   if (!isTypeLegal(MVT::f64)) {
1427     NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1428     RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1429     TransformToType[MVT::f64] = MVT::i64;
1430     ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1431   }
1432 
1433   // Decide how to handle f32. If the target does not have native f32 support,
1434   // expand it to i32 and we will be generating soft float library calls.
1435   if (!isTypeLegal(MVT::f32)) {
1436     NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1437     RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1438     TransformToType[MVT::f32] = MVT::i32;
1439     ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1440   }
1441 
1442   // Decide how to handle f16. If the target does not have native f16 support,
1443   // promote it to f32, because there are no f16 library calls (except for
1444   // conversions).
1445   if (!isTypeLegal(MVT::f16)) {
1446     // Allow targets to control how we legalize half.
1447     bool SoftPromoteHalfType = softPromoteHalfType();
1448     bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType();
1449 
1450     if (!UseFPRegsForHalfType) {
1451       NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1452       RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1453     } else {
1454       NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1455       RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1456     }
1457     TransformToType[MVT::f16] = MVT::f32;
1458     if (SoftPromoteHalfType) {
1459       ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1460     } else {
1461       ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1462     }
1463   }
1464 
1465   // Decide how to handle bf16. If the target does not have native bf16 support,
1466   // promote it to f32, because there are no bf16 library calls (except for
1467   // converting from f32 to bf16).
1468   if (!isTypeLegal(MVT::bf16)) {
1469     NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1470     RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1471     TransformToType[MVT::bf16] = MVT::f32;
1472     ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1473   }
1474 
1475   // Loop over all of the vector value types to see which need transformations.
1476   for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1477        i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1478     MVT VT = (MVT::SimpleValueType) i;
1479     if (isTypeLegal(VT))
1480       continue;
1481 
1482     MVT EltVT = VT.getVectorElementType();
1483     ElementCount EC = VT.getVectorElementCount();
1484     bool IsLegalWiderType = false;
1485     bool IsScalable = VT.isScalableVector();
1486     LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1487     switch (PreferredAction) {
1488     case TypePromoteInteger: {
1489       MVT::SimpleValueType EndVT = IsScalable ?
1490                                    MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1491                                    MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1492       // Try to promote the elements of integer vectors. If no legal
1493       // promotion was found, fall through to the widen-vector method.
1494       for (unsigned nVT = i + 1;
1495            (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1496         MVT SVT = (MVT::SimpleValueType) nVT;
1497         // Promote vectors of integers to vectors with the same number
1498         // of elements, with a wider element type.
1499         if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1500             SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1501           TransformToType[i] = SVT;
1502           RegisterTypeForVT[i] = SVT;
1503           NumRegistersForVT[i] = 1;
1504           ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1505           IsLegalWiderType = true;
1506           break;
1507         }
1508       }
1509       if (IsLegalWiderType)
1510         break;
1511       [[fallthrough]];
1512     }
1513 
1514     case TypeWidenVector:
1515       if (isPowerOf2_32(EC.getKnownMinValue())) {
1516         // Try to widen the vector.
1517         for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1518           MVT SVT = (MVT::SimpleValueType) nVT;
1519           if (SVT.getVectorElementType() == EltVT &&
1520               SVT.isScalableVector() == IsScalable &&
1521               SVT.getVectorElementCount().getKnownMinValue() >
1522                   EC.getKnownMinValue() &&
1523               isTypeLegal(SVT)) {
1524             TransformToType[i] = SVT;
1525             RegisterTypeForVT[i] = SVT;
1526             NumRegistersForVT[i] = 1;
1527             ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1528             IsLegalWiderType = true;
1529             break;
1530           }
1531         }
1532         if (IsLegalWiderType)
1533           break;
1534       } else {
1535         // Only widen to the next power of 2 to keep consistency with EVT.
1536         MVT NVT = VT.getPow2VectorType();
1537         if (isTypeLegal(NVT)) {
1538           TransformToType[i] = NVT;
1539           ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1540           RegisterTypeForVT[i] = NVT;
1541           NumRegistersForVT[i] = 1;
1542           break;
1543         }
1544       }
1545       [[fallthrough]];
1546 
1547     case TypeSplitVector:
1548     case TypeScalarizeVector: {
1549       MVT IntermediateVT;
1550       MVT RegisterVT;
1551       unsigned NumIntermediates;
1552       unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1553           NumIntermediates, RegisterVT, this);
1554       NumRegistersForVT[i] = NumRegisters;
1555       assert(NumRegistersForVT[i] == NumRegisters &&
1556              "NumRegistersForVT size cannot represent NumRegisters!");
1557       RegisterTypeForVT[i] = RegisterVT;
1558 
1559       MVT NVT = VT.getPow2VectorType();
1560       if (NVT == VT) {
1561         // Type is already a power of 2.  The default action is to split.
1562         TransformToType[i] = MVT::Other;
1563         if (PreferredAction == TypeScalarizeVector)
1564           ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1565         else if (PreferredAction == TypeSplitVector)
1566           ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1567         else if (EC.getKnownMinValue() > 1)
1568           ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1569         else
1570           ValueTypeActions.setTypeAction(VT, EC.isScalable()
1571                                                  ? TypeScalarizeScalableVector
1572                                                  : TypeScalarizeVector);
1573       } else {
1574         TransformToType[i] = NVT;
1575         ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1576       }
1577       break;
1578     }
1579     default:
1580       llvm_unreachable("Unknown vector legalization action!");
1581     }
1582   }
1583 
1584   // Determine the 'representative' register class for each value type.
1585   // An representative register class is the largest (meaning one which is
1586   // not a sub-register class / subreg register class) legal register class for
1587   // a group of value types. For example, on i386, i8, i16, and i32
1588   // representative would be GR32; while on x86_64 it's GR64.
1589   for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1590     const TargetRegisterClass* RRC;
1591     uint8_t Cost;
1592     std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1593     RepRegClassForVT[i] = RRC;
1594     RepRegClassCostForVT[i] = Cost;
1595   }
1596 }
1597 
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const1598 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1599                                            EVT VT) const {
1600   assert(!VT.isVector() && "No default SetCC type for vectors!");
1601   return getPointerTy(DL).SimpleTy;
1602 }
1603 
getCmpLibcallReturnType() const1604 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1605   return MVT::i32; // return the default value
1606 }
1607 
1608 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1609 /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
1610 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1611 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1612 ///
1613 /// This method returns the number of registers needed, and the VT for each
1614 /// register.  It also returns the VT and quantity of the intermediate values
1615 /// before they are promoted/expanded.
getVectorTypeBreakdown(LLVMContext & Context,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const1616 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
1617                                                     EVT VT, EVT &IntermediateVT,
1618                                                     unsigned &NumIntermediates,
1619                                                     MVT &RegisterVT) const {
1620   ElementCount EltCnt = VT.getVectorElementCount();
1621 
1622   // If there is a wider vector type with the same element type as this one,
1623   // or a promoted vector type that has the same number of elements which
1624   // are wider, then we should convert to that legal vector type.
1625   // This handles things like <2 x float> -> <4 x float> and
1626   // <4 x i1> -> <4 x i32>.
1627   LegalizeTypeAction TA = getTypeAction(Context, VT);
1628   if (!EltCnt.isScalar() &&
1629       (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1630     EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1631     if (isTypeLegal(RegisterEVT)) {
1632       IntermediateVT = RegisterEVT;
1633       RegisterVT = RegisterEVT.getSimpleVT();
1634       NumIntermediates = 1;
1635       return 1;
1636     }
1637   }
1638 
1639   // Figure out the right, legal destination reg to copy into.
1640   EVT EltTy = VT.getVectorElementType();
1641 
1642   unsigned NumVectorRegs = 1;
1643 
1644   // Scalable vectors cannot be scalarized, so handle the legalisation of the
1645   // types like done elsewhere in SelectionDAG.
1646   if (EltCnt.isScalable()) {
1647     LegalizeKind LK;
1648     EVT PartVT = VT;
1649     do {
1650       // Iterate until we've found a legal (part) type to hold VT.
1651       LK = getTypeConversion(Context, PartVT);
1652       PartVT = LK.second;
1653     } while (LK.first != TypeLegal);
1654 
1655     if (!PartVT.isVector()) {
1656       report_fatal_error(
1657           "Don't know how to legalize this scalable vector type");
1658     }
1659 
1660     NumIntermediates =
1661         divideCeil(VT.getVectorElementCount().getKnownMinValue(),
1662                    PartVT.getVectorElementCount().getKnownMinValue());
1663     IntermediateVT = PartVT;
1664     RegisterVT = getRegisterType(Context, IntermediateVT);
1665     return NumIntermediates;
1666   }
1667 
1668   // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally
1669   // we could break down into LHS/RHS like LegalizeDAG does.
1670   if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1671     NumVectorRegs = EltCnt.getKnownMinValue();
1672     EltCnt = ElementCount::getFixed(1);
1673   }
1674 
1675   // Divide the input until we get to a supported size.  This will always
1676   // end with a scalar if the target doesn't support vectors.
1677   while (EltCnt.getKnownMinValue() > 1 &&
1678          !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1679     EltCnt = EltCnt.divideCoefficientBy(2);
1680     NumVectorRegs <<= 1;
1681   }
1682 
1683   NumIntermediates = NumVectorRegs;
1684 
1685   EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1686   if (!isTypeLegal(NewVT))
1687     NewVT = EltTy;
1688   IntermediateVT = NewVT;
1689 
1690   MVT DestVT = getRegisterType(Context, NewVT);
1691   RegisterVT = DestVT;
1692 
1693   if (EVT(DestVT).bitsLT(NewVT)) {  // Value is expanded, e.g. i64 -> i16.
1694     TypeSize NewVTSize = NewVT.getSizeInBits();
1695     // Convert sizes such as i33 to i64.
1696     if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
1697       NewVTSize = NewVTSize.coefficientNextPowerOf2();
1698     return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1699   }
1700 
1701   // Otherwise, promotion or legal types use the same number of registers as
1702   // the vector decimated to the appropriate level.
1703   return NumVectorRegs;
1704 }
1705 
isSuitableForJumpTable(const SwitchInst * SI,uint64_t NumCases,uint64_t Range,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI) const1706 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1707                                                 uint64_t NumCases,
1708                                                 uint64_t Range,
1709                                                 ProfileSummaryInfo *PSI,
1710                                                 BlockFrequencyInfo *BFI) const {
1711   // FIXME: This function check the maximum table size and density, but the
1712   // minimum size is not checked. It would be nice if the minimum size is
1713   // also combined within this function. Currently, the minimum size check is
1714   // performed in findJumpTable() in SelectionDAGBuiler and
1715   // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1716   const bool OptForSize =
1717       llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1718   const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1719   const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1720 
1721   // Check whether the number of cases is small enough and
1722   // the range is dense enough for a jump table.
1723   return (OptForSize || Range <= MaxJumpTableSize) &&
1724          (NumCases * 100 >= Range * MinDensity);
1725 }
1726 
getPreferredSwitchConditionType(LLVMContext & Context,EVT ConditionVT) const1727 MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context,
1728                                                         EVT ConditionVT) const {
1729   return getRegisterType(Context, ConditionVT);
1730 }
1731 
1732 /// Get the EVTs and ArgFlags collections that represent the legalized return
1733 /// type of the given function.  This does not require a DAG or a return value,
1734 /// and is suitable for use before any DAGs for the function are constructed.
1735 /// TODO: Move this out of TargetLowering.cpp.
GetReturnInfo(CallingConv::ID CC,Type * ReturnType,AttributeList attr,SmallVectorImpl<ISD::OutputArg> & Outs,const TargetLowering & TLI,const DataLayout & DL)1736 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1737                          AttributeList attr,
1738                          SmallVectorImpl<ISD::OutputArg> &Outs,
1739                          const TargetLowering &TLI, const DataLayout &DL) {
1740   SmallVector<EVT, 4> ValueVTs;
1741   ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1742   unsigned NumValues = ValueVTs.size();
1743   if (NumValues == 0) return;
1744 
1745   for (unsigned j = 0, f = NumValues; j != f; ++j) {
1746     EVT VT = ValueVTs[j];
1747     ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1748 
1749     if (attr.hasRetAttr(Attribute::SExt))
1750       ExtendKind = ISD::SIGN_EXTEND;
1751     else if (attr.hasRetAttr(Attribute::ZExt))
1752       ExtendKind = ISD::ZERO_EXTEND;
1753 
1754     if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1755       VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
1756 
1757     unsigned NumParts =
1758         TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1759     MVT PartVT =
1760         TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1761 
1762     // 'inreg' on function refers to return value
1763     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1764     if (attr.hasRetAttr(Attribute::InReg))
1765       Flags.setInReg();
1766 
1767     // Propagate extension type if any
1768     if (attr.hasRetAttr(Attribute::SExt))
1769       Flags.setSExt();
1770     else if (attr.hasRetAttr(Attribute::ZExt))
1771       Flags.setZExt();
1772 
1773     for (unsigned i = 0; i < NumParts; ++i)
1774       Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1775   }
1776 }
1777 
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const1778 Align TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1779                                                 const DataLayout &DL) const {
1780   return DL.getABITypeAlign(Ty);
1781 }
1782 
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const1783 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1784     LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1785     Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
1786   // Check if the specified alignment is sufficient based on the data layout.
1787   // TODO: While using the data layout works in practice, a better solution
1788   // would be to implement this check directly (make this a virtual function).
1789   // For example, the ABI alignment may change based on software platform while
1790   // this function should only be affected by hardware implementation.
1791   Type *Ty = VT.getTypeForEVT(Context);
1792   if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1793     // Assume that an access that meets the ABI-specified alignment is fast.
1794     if (Fast != nullptr)
1795       *Fast = 1;
1796     return true;
1797   }
1798 
1799   // This is a misaligned access.
1800   return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1801 }
1802 
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,unsigned * Fast) const1803 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1804     LLVMContext &Context, const DataLayout &DL, EVT VT,
1805     const MachineMemOperand &MMO, unsigned *Fast) const {
1806   return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1807                                         MMO.getAlign(), MMO.getFlags(), Fast);
1808 }
1809 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const1810 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1811                                             const DataLayout &DL, EVT VT,
1812                                             unsigned AddrSpace, Align Alignment,
1813                                             MachineMemOperand::Flags Flags,
1814                                             unsigned *Fast) const {
1815   return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1816                                         Flags, Fast);
1817 }
1818 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,unsigned * Fast) const1819 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1820                                             const DataLayout &DL, EVT VT,
1821                                             const MachineMemOperand &MMO,
1822                                             unsigned *Fast) const {
1823   return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1824                             MMO.getFlags(), Fast);
1825 }
1826 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,LLT Ty,const MachineMemOperand & MMO,unsigned * Fast) const1827 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1828                                             const DataLayout &DL, LLT Ty,
1829                                             const MachineMemOperand &MMO,
1830                                             unsigned *Fast) const {
1831   EVT VT = getApproximateEVTForLLT(Ty, Context);
1832   return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1833                             MMO.getFlags(), Fast);
1834 }
1835 
1836 //===----------------------------------------------------------------------===//
1837 //  TargetTransformInfo Helpers
1838 //===----------------------------------------------------------------------===//
1839 
InstructionOpcodeToISD(unsigned Opcode) const1840 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1841   enum InstructionOpcodes {
1842 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1843 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1844 #include "llvm/IR/Instruction.def"
1845   };
1846   switch (static_cast<InstructionOpcodes>(Opcode)) {
1847   case Ret:            return 0;
1848   case Br:             return 0;
1849   case Switch:         return 0;
1850   case IndirectBr:     return 0;
1851   case Invoke:         return 0;
1852   case CallBr:         return 0;
1853   case Resume:         return 0;
1854   case Unreachable:    return 0;
1855   case CleanupRet:     return 0;
1856   case CatchRet:       return 0;
1857   case CatchPad:       return 0;
1858   case CatchSwitch:    return 0;
1859   case CleanupPad:     return 0;
1860   case FNeg:           return ISD::FNEG;
1861   case Add:            return ISD::ADD;
1862   case FAdd:           return ISD::FADD;
1863   case Sub:            return ISD::SUB;
1864   case FSub:           return ISD::FSUB;
1865   case Mul:            return ISD::MUL;
1866   case FMul:           return ISD::FMUL;
1867   case UDiv:           return ISD::UDIV;
1868   case SDiv:           return ISD::SDIV;
1869   case FDiv:           return ISD::FDIV;
1870   case URem:           return ISD::UREM;
1871   case SRem:           return ISD::SREM;
1872   case FRem:           return ISD::FREM;
1873   case Shl:            return ISD::SHL;
1874   case LShr:           return ISD::SRL;
1875   case AShr:           return ISD::SRA;
1876   case And:            return ISD::AND;
1877   case Or:             return ISD::OR;
1878   case Xor:            return ISD::XOR;
1879   case Alloca:         return 0;
1880   case Load:           return ISD::LOAD;
1881   case Store:          return ISD::STORE;
1882   case GetElementPtr:  return 0;
1883   case Fence:          return 0;
1884   case AtomicCmpXchg:  return 0;
1885   case AtomicRMW:      return 0;
1886   case Trunc:          return ISD::TRUNCATE;
1887   case ZExt:           return ISD::ZERO_EXTEND;
1888   case SExt:           return ISD::SIGN_EXTEND;
1889   case FPToUI:         return ISD::FP_TO_UINT;
1890   case FPToSI:         return ISD::FP_TO_SINT;
1891   case UIToFP:         return ISD::UINT_TO_FP;
1892   case SIToFP:         return ISD::SINT_TO_FP;
1893   case FPTrunc:        return ISD::FP_ROUND;
1894   case FPExt:          return ISD::FP_EXTEND;
1895   case PtrToInt:       return ISD::BITCAST;
1896   case IntToPtr:       return ISD::BITCAST;
1897   case BitCast:        return ISD::BITCAST;
1898   case AddrSpaceCast:  return ISD::ADDRSPACECAST;
1899   case ICmp:           return ISD::SETCC;
1900   case FCmp:           return ISD::SETCC;
1901   case PHI:            return 0;
1902   case Call:           return 0;
1903   case Select:         return ISD::SELECT;
1904   case UserOp1:        return 0;
1905   case UserOp2:        return 0;
1906   case VAArg:          return 0;
1907   case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1908   case InsertElement:  return ISD::INSERT_VECTOR_ELT;
1909   case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
1910   case ExtractValue:   return ISD::MERGE_VALUES;
1911   case InsertValue:    return ISD::MERGE_VALUES;
1912   case LandingPad:     return 0;
1913   case Freeze:         return ISD::FREEZE;
1914   }
1915 
1916   llvm_unreachable("Unknown instruction type encountered!");
1917 }
1918 
IntrinsicIDToISD(Intrinsic::ID ID) const1919 int TargetLoweringBase::IntrinsicIDToISD(Intrinsic::ID ID) const {
1920   switch (ID) {
1921   case Intrinsic::exp:
1922     return ISD::FEXP;
1923   case Intrinsic::exp2:
1924     return ISD::FEXP2;
1925   case Intrinsic::log:
1926     return ISD::FLOG;
1927   default:
1928     return ISD::DELETED_NODE;
1929   }
1930 }
1931 
1932 Value *
getDefaultSafeStackPointerLocation(IRBuilderBase & IRB,bool UseTLS) const1933 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
1934                                                        bool UseTLS) const {
1935   // compiler-rt provides a variable with a magic name.  Targets that do not
1936   // link with compiler-rt may also provide such a variable.
1937   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1938   const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1939   auto UnsafeStackPtr =
1940       dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1941 
1942   const DataLayout &DL = M->getDataLayout();
1943   PointerType *StackPtrTy = DL.getAllocaPtrType(M->getContext());
1944 
1945   if (!UnsafeStackPtr) {
1946     auto TLSModel = UseTLS ?
1947         GlobalValue::InitialExecTLSModel :
1948         GlobalValue::NotThreadLocal;
1949     // The global variable is not defined yet, define it ourselves.
1950     // We use the initial-exec TLS model because we do not support the
1951     // variable living anywhere other than in the main executable.
1952     UnsafeStackPtr = new GlobalVariable(
1953         *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1954         UnsafeStackPtrVar, nullptr, TLSModel);
1955   } else {
1956     // The variable exists, check its type and attributes.
1957     //
1958     // FIXME: Move to IR verifier.
1959     if (UnsafeStackPtr->getValueType() != StackPtrTy)
1960       report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1961     if (UseTLS != UnsafeStackPtr->isThreadLocal())
1962       report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1963                          (UseTLS ? "" : "not ") + "be thread-local");
1964   }
1965   return UnsafeStackPtr;
1966 }
1967 
1968 Value *
getSafeStackPointerLocation(IRBuilderBase & IRB) const1969 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
1970   if (!TM.getTargetTriple().isAndroid())
1971     return getDefaultSafeStackPointerLocation(IRB, true);
1972 
1973   // Android provides a libc function to retrieve the address of the current
1974   // thread's unsafe stack pointer.
1975   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1976   auto *PtrTy = PointerType::getUnqual(M->getContext());
1977   FunctionCallee Fn =
1978       M->getOrInsertFunction("__safestack_pointer_address", PtrTy);
1979   return IRB.CreateCall(Fn);
1980 }
1981 
1982 //===----------------------------------------------------------------------===//
1983 //  Loop Strength Reduction hooks
1984 //===----------------------------------------------------------------------===//
1985 
1986 /// isLegalAddressingMode - Return true if the addressing mode represented
1987 /// by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const1988 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1989                                                const AddrMode &AM, Type *Ty,
1990                                                unsigned AS, Instruction *I) const {
1991   // The default implementation of this implements a conservative RISCy, r+r and
1992   // r+i addr mode.
1993 
1994   // Scalable offsets not supported
1995   if (AM.ScalableOffset)
1996     return false;
1997 
1998   // Allows a sign-extended 16-bit immediate field.
1999   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
2000     return false;
2001 
2002   // No global is ever allowed as a base.
2003   if (AM.BaseGV)
2004     return false;
2005 
2006   // Only support r+r,
2007   switch (AM.Scale) {
2008   case 0:  // "r+i" or just "i", depending on HasBaseReg.
2009     break;
2010   case 1:
2011     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
2012       return false;
2013     // Otherwise we have r+r or r+i.
2014     break;
2015   case 2:
2016     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
2017       return false;
2018     // Allow 2*r as r+r.
2019     break;
2020   default: // Don't allow n * r
2021     return false;
2022   }
2023 
2024   return true;
2025 }
2026 
2027 //===----------------------------------------------------------------------===//
2028 //  Stack Protector
2029 //===----------------------------------------------------------------------===//
2030 
2031 // For OpenBSD return its special guard variable. Otherwise return nullptr,
2032 // so that SelectionDAG handle SSP.
getIRStackGuard(IRBuilderBase & IRB) const2033 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const {
2034   if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
2035     Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
2036     const DataLayout &DL = M.getDataLayout();
2037     PointerType *PtrTy =
2038         PointerType::get(M.getContext(), DL.getDefaultGlobalsAddressSpace());
2039     GlobalVariable *G = M.getOrInsertGlobal("__guard_local", PtrTy);
2040     G->setVisibility(GlobalValue::HiddenVisibility);
2041     return G;
2042   }
2043   return nullptr;
2044 }
2045 
2046 // Currently only support "standard" __stack_chk_guard.
2047 // TODO: add LOAD_STACK_GUARD support.
insertSSPDeclarations(Module & M) const2048 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
2049   if (!M.getNamedValue("__stack_chk_guard")) {
2050     auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
2051                                   false, GlobalVariable::ExternalLinkage,
2052                                   nullptr, "__stack_chk_guard");
2053 
2054     // FreeBSD has "__stack_chk_guard" defined externally on libc.so
2055     if (M.getDirectAccessExternalData() &&
2056         !TM.getTargetTriple().isWindowsGNUEnvironment() &&
2057         !(TM.getTargetTriple().isPPC64() &&
2058           TM.getTargetTriple().isOSFreeBSD()) &&
2059         (!TM.getTargetTriple().isOSDarwin() ||
2060          TM.getRelocationModel() == Reloc::Static))
2061       GV->setDSOLocal(true);
2062   }
2063 }
2064 
2065 // Currently only support "standard" __stack_chk_guard.
2066 // TODO: add LOAD_STACK_GUARD support.
getSDagStackGuard(const Module & M) const2067 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
2068   if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
2069     return M.getNamedValue("__guard_local");
2070   }
2071   return M.getNamedValue("__stack_chk_guard");
2072 }
2073 
getSSPStackGuardCheck(const Module & M) const2074 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
2075   return nullptr;
2076 }
2077 
getMinimumJumpTableEntries() const2078 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
2079   return MinimumJumpTableEntries;
2080 }
2081 
setMinimumJumpTableEntries(unsigned Val)2082 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
2083   MinimumJumpTableEntries = Val;
2084 }
2085 
getMinimumJumpTableDensity(bool OptForSize) const2086 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2087   return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2088 }
2089 
getMaximumJumpTableSize() const2090 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
2091   return MaximumJumpTableSize;
2092 }
2093 
setMaximumJumpTableSize(unsigned Val)2094 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
2095   MaximumJumpTableSize = Val;
2096 }
2097 
isJumpTableRelative() const2098 bool TargetLoweringBase::isJumpTableRelative() const {
2099   return getTargetMachine().isPositionIndependent();
2100 }
2101 
getPrefLoopAlignment(MachineLoop * ML) const2102 Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const {
2103   if (TM.Options.LoopAlignment)
2104     return Align(TM.Options.LoopAlignment);
2105   return PrefLoopAlignment;
2106 }
2107 
getMaxPermittedBytesForAlignment(MachineBasicBlock * MBB) const2108 unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment(
2109     MachineBasicBlock *MBB) const {
2110   return MaxBytesForAlignment;
2111 }
2112 
2113 //===----------------------------------------------------------------------===//
2114 //  Reciprocal Estimates
2115 //===----------------------------------------------------------------------===//
2116 
2117 /// Get the reciprocal estimate attribute string for a function that will
2118 /// override the target defaults.
getRecipEstimateForFunc(MachineFunction & MF)2119 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2120   const Function &F = MF.getFunction();
2121   return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2122 }
2123 
2124 /// Construct a string for the given reciprocal operation of the given type.
2125 /// This string should match the corresponding option to the front-end's
2126 /// "-mrecip" flag assuming those strings have been passed through in an
2127 /// attribute string. For example, "vec-divf" for a division of a vXf32.
getReciprocalOpName(bool IsSqrt,EVT VT)2128 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2129   std::string Name = VT.isVector() ? "vec-" : "";
2130 
2131   Name += IsSqrt ? "sqrt" : "div";
2132 
2133   // TODO: Handle other float types?
2134   if (VT.getScalarType() == MVT::f64) {
2135     Name += "d";
2136   } else if (VT.getScalarType() == MVT::f16) {
2137     Name += "h";
2138   } else {
2139     assert(VT.getScalarType() == MVT::f32 &&
2140            "Unexpected FP type for reciprocal estimate");
2141     Name += "f";
2142   }
2143 
2144   return Name;
2145 }
2146 
2147 /// Return the character position and value (a single numeric character) of a
2148 /// customized refinement operation in the input string if it exists. Return
2149 /// false if there is no customized refinement step count.
parseRefinementStep(StringRef In,size_t & Position,uint8_t & Value)2150 static bool parseRefinementStep(StringRef In, size_t &Position,
2151                                 uint8_t &Value) {
2152   const char RefStepToken = ':';
2153   Position = In.find(RefStepToken);
2154   if (Position == StringRef::npos)
2155     return false;
2156 
2157   StringRef RefStepString = In.substr(Position + 1);
2158   // Allow exactly one numeric character for the additional refinement
2159   // step parameter.
2160   if (RefStepString.size() == 1) {
2161     char RefStepChar = RefStepString[0];
2162     if (isDigit(RefStepChar)) {
2163       Value = RefStepChar - '0';
2164       return true;
2165     }
2166   }
2167   report_fatal_error("Invalid refinement step for -recip.");
2168 }
2169 
2170 /// For the input attribute string, return one of the ReciprocalEstimate enum
2171 /// status values (enabled, disabled, or not specified) for this operation on
2172 /// the specified data type.
getOpEnabled(bool IsSqrt,EVT VT,StringRef Override)2173 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2174   if (Override.empty())
2175     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2176 
2177   SmallVector<StringRef, 4> OverrideVector;
2178   Override.split(OverrideVector, ',');
2179   unsigned NumArgs = OverrideVector.size();
2180 
2181   // Check if "all", "none", or "default" was specified.
2182   if (NumArgs == 1) {
2183     // Look for an optional setting of the number of refinement steps needed
2184     // for this type of reciprocal operation.
2185     size_t RefPos;
2186     uint8_t RefSteps;
2187     if (parseRefinementStep(Override, RefPos, RefSteps)) {
2188       // Split the string for further processing.
2189       Override = Override.substr(0, RefPos);
2190     }
2191 
2192     // All reciprocal types are enabled.
2193     if (Override == "all")
2194       return TargetLoweringBase::ReciprocalEstimate::Enabled;
2195 
2196     // All reciprocal types are disabled.
2197     if (Override == "none")
2198       return TargetLoweringBase::ReciprocalEstimate::Disabled;
2199 
2200     // Target defaults for enablement are used.
2201     if (Override == "default")
2202       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2203   }
2204 
2205   // The attribute string may omit the size suffix ('f'/'d').
2206   std::string VTName = getReciprocalOpName(IsSqrt, VT);
2207   std::string VTNameNoSize = VTName;
2208   VTNameNoSize.pop_back();
2209   static const char DisabledPrefix = '!';
2210 
2211   for (StringRef RecipType : OverrideVector) {
2212     size_t RefPos;
2213     uint8_t RefSteps;
2214     if (parseRefinementStep(RecipType, RefPos, RefSteps))
2215       RecipType = RecipType.substr(0, RefPos);
2216 
2217     // Ignore the disablement token for string matching.
2218     bool IsDisabled = RecipType[0] == DisabledPrefix;
2219     if (IsDisabled)
2220       RecipType = RecipType.substr(1);
2221 
2222     if (RecipType == VTName || RecipType == VTNameNoSize)
2223       return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2224                         : TargetLoweringBase::ReciprocalEstimate::Enabled;
2225   }
2226 
2227   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2228 }
2229 
2230 /// For the input attribute string, return the customized refinement step count
2231 /// for this operation on the specified data type. If the step count does not
2232 /// exist, return the ReciprocalEstimate enum value for unspecified.
getOpRefinementSteps(bool IsSqrt,EVT VT,StringRef Override)2233 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2234   if (Override.empty())
2235     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2236 
2237   SmallVector<StringRef, 4> OverrideVector;
2238   Override.split(OverrideVector, ',');
2239   unsigned NumArgs = OverrideVector.size();
2240 
2241   // Check if "all", "default", or "none" was specified.
2242   if (NumArgs == 1) {
2243     // Look for an optional setting of the number of refinement steps needed
2244     // for this type of reciprocal operation.
2245     size_t RefPos;
2246     uint8_t RefSteps;
2247     if (!parseRefinementStep(Override, RefPos, RefSteps))
2248       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2249 
2250     // Split the string for further processing.
2251     Override = Override.substr(0, RefPos);
2252     assert(Override != "none" &&
2253            "Disabled reciprocals, but specifed refinement steps?");
2254 
2255     // If this is a general override, return the specified number of steps.
2256     if (Override == "all" || Override == "default")
2257       return RefSteps;
2258   }
2259 
2260   // The attribute string may omit the size suffix ('f'/'d').
2261   std::string VTName = getReciprocalOpName(IsSqrt, VT);
2262   std::string VTNameNoSize = VTName;
2263   VTNameNoSize.pop_back();
2264 
2265   for (StringRef RecipType : OverrideVector) {
2266     size_t RefPos;
2267     uint8_t RefSteps;
2268     if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2269       continue;
2270 
2271     RecipType = RecipType.substr(0, RefPos);
2272     if (RecipType == VTName || RecipType == VTNameNoSize)
2273       return RefSteps;
2274   }
2275 
2276   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2277 }
2278 
getRecipEstimateSqrtEnabled(EVT VT,MachineFunction & MF) const2279 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2280                                                     MachineFunction &MF) const {
2281   return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2282 }
2283 
getRecipEstimateDivEnabled(EVT VT,MachineFunction & MF) const2284 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2285                                                    MachineFunction &MF) const {
2286   return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2287 }
2288 
getSqrtRefinementSteps(EVT VT,MachineFunction & MF) const2289 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2290                                                MachineFunction &MF) const {
2291   return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2292 }
2293 
getDivRefinementSteps(EVT VT,MachineFunction & MF) const2294 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2295                                               MachineFunction &MF) const {
2296   return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2297 }
2298 
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO) const2299 bool TargetLoweringBase::isLoadBitCastBeneficial(
2300     EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2301     const MachineMemOperand &MMO) const {
2302   // Single-element vectors are scalarized, so we should generally avoid having
2303   // any memory operations on such types, as they would get scalarized too.
2304   if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2305       BitcastVT.getVectorNumElements() == 1)
2306     return false;
2307 
2308   // Don't do if we could do an indexed load on the original type, but not on
2309   // the new one.
2310   if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2311     return true;
2312 
2313   MVT LoadMVT = LoadVT.getSimpleVT();
2314 
2315   // Don't bother doing this if it's just going to be promoted again later, as
2316   // doing so might interfere with other combines.
2317   if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2318       getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2319     return false;
2320 
2321   unsigned Fast = 0;
2322   return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2323                             MMO, &Fast) &&
2324          Fast;
2325 }
2326 
finalizeLowering(MachineFunction & MF) const2327 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2328   MF.getRegInfo().freezeReservedRegs();
2329 }
2330 
getLoadMemOperandFlags(const LoadInst & LI,const DataLayout & DL,AssumptionCache * AC,const TargetLibraryInfo * LibInfo) const2331 MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags(
2332     const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2333     const TargetLibraryInfo *LibInfo) const {
2334   MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2335   if (LI.isVolatile())
2336     Flags |= MachineMemOperand::MOVolatile;
2337 
2338   if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2339     Flags |= MachineMemOperand::MONonTemporal;
2340 
2341   if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2342     Flags |= MachineMemOperand::MOInvariant;
2343 
2344   if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(),
2345                                          LI.getAlign(), DL, &LI, AC,
2346                                          /*DT=*/nullptr, LibInfo))
2347     Flags |= MachineMemOperand::MODereferenceable;
2348 
2349   Flags |= getTargetMMOFlags(LI);
2350   return Flags;
2351 }
2352 
2353 MachineMemOperand::Flags
getStoreMemOperandFlags(const StoreInst & SI,const DataLayout & DL) const2354 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2355                                             const DataLayout &DL) const {
2356   MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2357 
2358   if (SI.isVolatile())
2359     Flags |= MachineMemOperand::MOVolatile;
2360 
2361   if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2362     Flags |= MachineMemOperand::MONonTemporal;
2363 
2364   // FIXME: Not preserving dereferenceable
2365   Flags |= getTargetMMOFlags(SI);
2366   return Flags;
2367 }
2368 
2369 MachineMemOperand::Flags
getAtomicMemOperandFlags(const Instruction & AI,const DataLayout & DL) const2370 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2371                                              const DataLayout &DL) const {
2372   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2373 
2374   if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2375     if (RMW->isVolatile())
2376       Flags |= MachineMemOperand::MOVolatile;
2377   } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2378     if (CmpX->isVolatile())
2379       Flags |= MachineMemOperand::MOVolatile;
2380   } else
2381     llvm_unreachable("not an atomic instruction");
2382 
2383   // FIXME: Not preserving dereferenceable
2384   Flags |= getTargetMMOFlags(AI);
2385   return Flags;
2386 }
2387 
emitLeadingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const2388 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
2389                                                   Instruction *Inst,
2390                                                   AtomicOrdering Ord) const {
2391   if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2392     return Builder.CreateFence(Ord);
2393   else
2394     return nullptr;
2395 }
2396 
emitTrailingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const2397 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
2398                                                    Instruction *Inst,
2399                                                    AtomicOrdering Ord) const {
2400   if (isAcquireOrStronger(Ord))
2401     return Builder.CreateFence(Ord);
2402   else
2403     return nullptr;
2404 }
2405 
2406 //===----------------------------------------------------------------------===//
2407 //  GlobalISel Hooks
2408 //===----------------------------------------------------------------------===//
2409 
shouldLocalize(const MachineInstr & MI,const TargetTransformInfo * TTI) const2410 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2411                                         const TargetTransformInfo *TTI) const {
2412   auto &MF = *MI.getMF();
2413   auto &MRI = MF.getRegInfo();
2414   // Assuming a spill and reload of a value has a cost of 1 instruction each,
2415   // this helper function computes the maximum number of uses we should consider
2416   // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2417   // break even in terms of code size when the original MI has 2 users vs
2418   // choosing to potentially spill. Any more than 2 users we we have a net code
2419   // size increase. This doesn't take into account register pressure though.
2420   auto maxUses = [](unsigned RematCost) {
2421     // A cost of 1 means remats are basically free.
2422     if (RematCost == 1)
2423       return std::numeric_limits<unsigned>::max();
2424     if (RematCost == 2)
2425       return 2U;
2426 
2427     // Remat is too expensive, only sink if there's one user.
2428     if (RematCost > 2)
2429       return 1U;
2430     llvm_unreachable("Unexpected remat cost");
2431   };
2432 
2433   switch (MI.getOpcode()) {
2434   default:
2435     return false;
2436   // Constants-like instructions should be close to their users.
2437   // We don't want long live-ranges for them.
2438   case TargetOpcode::G_CONSTANT:
2439   case TargetOpcode::G_FCONSTANT:
2440   case TargetOpcode::G_FRAME_INDEX:
2441   case TargetOpcode::G_INTTOPTR:
2442     return true;
2443   case TargetOpcode::G_GLOBAL_VALUE: {
2444     unsigned RematCost = TTI->getGISelRematGlobalCost();
2445     Register Reg = MI.getOperand(0).getReg();
2446     unsigned MaxUses = maxUses(RematCost);
2447     if (MaxUses == UINT_MAX)
2448       return true; // Remats are "free" so always localize.
2449     return MRI.hasAtMostUserInstrs(Reg, MaxUses);
2450   }
2451   }
2452 }
2453