1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/Register.h"
21 #include "llvm/CodeGenTypes/LowLevelType.h"
22 #include "llvm/IR/DebugLoc.h"
23 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Casting.h"
25 #include <cstdint>
26
27 namespace llvm {
28
29 class AnalysisUsage;
30 class LostDebugLocObserver;
31 class MachineBasicBlock;
32 class BlockFrequencyInfo;
33 class GISelKnownBits;
34 class MachineFunction;
35 class MachineInstr;
36 class MachineIRBuilder;
37 class MachineOperand;
38 class MachineOptimizationRemarkEmitter;
39 class MachineOptimizationRemarkMissed;
40 struct MachinePointerInfo;
41 class MachineRegisterInfo;
42 class MCInstrDesc;
43 class ProfileSummaryInfo;
44 class RegisterBankInfo;
45 class TargetInstrInfo;
46 class TargetLowering;
47 class TargetPassConfig;
48 class TargetRegisterInfo;
49 class TargetRegisterClass;
50 class ConstantFP;
51 class APFloat;
52
53 // Convenience macros for dealing with vector reduction opcodes.
54 #define GISEL_VECREDUCE_CASES_ALL \
55 case TargetOpcode::G_VECREDUCE_SEQ_FADD: \
56 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \
57 case TargetOpcode::G_VECREDUCE_FADD: \
58 case TargetOpcode::G_VECREDUCE_FMUL: \
59 case TargetOpcode::G_VECREDUCE_FMAX: \
60 case TargetOpcode::G_VECREDUCE_FMIN: \
61 case TargetOpcode::G_VECREDUCE_FMAXIMUM: \
62 case TargetOpcode::G_VECREDUCE_FMINIMUM: \
63 case TargetOpcode::G_VECREDUCE_ADD: \
64 case TargetOpcode::G_VECREDUCE_MUL: \
65 case TargetOpcode::G_VECREDUCE_AND: \
66 case TargetOpcode::G_VECREDUCE_OR: \
67 case TargetOpcode::G_VECREDUCE_XOR: \
68 case TargetOpcode::G_VECREDUCE_SMAX: \
69 case TargetOpcode::G_VECREDUCE_SMIN: \
70 case TargetOpcode::G_VECREDUCE_UMAX: \
71 case TargetOpcode::G_VECREDUCE_UMIN:
72
73 #define GISEL_VECREDUCE_CASES_NONSEQ \
74 case TargetOpcode::G_VECREDUCE_FADD: \
75 case TargetOpcode::G_VECREDUCE_FMUL: \
76 case TargetOpcode::G_VECREDUCE_FMAX: \
77 case TargetOpcode::G_VECREDUCE_FMIN: \
78 case TargetOpcode::G_VECREDUCE_FMAXIMUM: \
79 case TargetOpcode::G_VECREDUCE_FMINIMUM: \
80 case TargetOpcode::G_VECREDUCE_ADD: \
81 case TargetOpcode::G_VECREDUCE_MUL: \
82 case TargetOpcode::G_VECREDUCE_AND: \
83 case TargetOpcode::G_VECREDUCE_OR: \
84 case TargetOpcode::G_VECREDUCE_XOR: \
85 case TargetOpcode::G_VECREDUCE_SMAX: \
86 case TargetOpcode::G_VECREDUCE_SMIN: \
87 case TargetOpcode::G_VECREDUCE_UMAX: \
88 case TargetOpcode::G_VECREDUCE_UMIN:
89
90 /// Try to constrain Reg to the specified register class. If this fails,
91 /// create a new virtual register in the correct class.
92 ///
93 /// \return The virtual register constrained to the right register class.
94 Register constrainRegToClass(MachineRegisterInfo &MRI,
95 const TargetInstrInfo &TII,
96 const RegisterBankInfo &RBI, Register Reg,
97 const TargetRegisterClass &RegClass);
98
99 /// Constrain the Register operand OpIdx, so that it is now constrained to the
100 /// TargetRegisterClass passed as an argument (RegClass).
101 /// If this fails, create a new virtual register in the correct class and insert
102 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
103 /// In both cases, the function also updates the register of RegMo. The debug
104 /// location of \p InsertPt is used for the new copy.
105 ///
106 /// \return The virtual register constrained to the right register class.
107 Register constrainOperandRegClass(const MachineFunction &MF,
108 const TargetRegisterInfo &TRI,
109 MachineRegisterInfo &MRI,
110 const TargetInstrInfo &TII,
111 const RegisterBankInfo &RBI,
112 MachineInstr &InsertPt,
113 const TargetRegisterClass &RegClass,
114 MachineOperand &RegMO);
115
116 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
117 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
118 /// correct class and insert a COPY before \p InsertPt if it is a use or after
119 /// if it is a definition. In both cases, the function also updates the register
120 /// of RegMo.
121 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
122 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
123 /// InsertPt is used for the new copy.
124 ///
125 /// \return The virtual register constrained to the right register class.
126 Register constrainOperandRegClass(const MachineFunction &MF,
127 const TargetRegisterInfo &TRI,
128 MachineRegisterInfo &MRI,
129 const TargetInstrInfo &TII,
130 const RegisterBankInfo &RBI,
131 MachineInstr &InsertPt, const MCInstrDesc &II,
132 MachineOperand &RegMO, unsigned OpIdx);
133
134 /// Mutate the newly-selected instruction \p I to constrain its (possibly
135 /// generic) virtual register operands to the instruction's register class.
136 /// This could involve inserting COPYs before (for uses) or after (for defs).
137 /// This requires the number of operands to match the instruction description.
138 /// \returns whether operand regclass constraining succeeded.
139 ///
140 // FIXME: Not all instructions have the same number of operands. We should
141 // probably expose a constrain helper per operand and let the target selector
142 // constrain individual registers, like fast-isel.
143 bool constrainSelectedInstRegOperands(MachineInstr &I,
144 const TargetInstrInfo &TII,
145 const TargetRegisterInfo &TRI,
146 const RegisterBankInfo &RBI);
147
148 /// Check if DstReg can be replaced with SrcReg depending on the register
149 /// constraints.
150 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
151
152 /// Check whether an instruction \p MI is dead: it only defines dead virtual
153 /// registers, and doesn't have other side effects.
154 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
155
156 /// Report an ISel error as a missed optimization remark to the LLVMContext's
157 /// diagnostic stream. Set the FailedISel MachineFunction property.
158 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
159 MachineOptimizationRemarkEmitter &MORE,
160 MachineOptimizationRemarkMissed &R);
161
162 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
163 MachineOptimizationRemarkEmitter &MORE,
164 const char *PassName, StringRef Msg,
165 const MachineInstr &MI);
166
167 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
168 /// diagnostic stream.
169 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
170 MachineOptimizationRemarkEmitter &MORE,
171 MachineOptimizationRemarkMissed &R);
172
173 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
174 std::optional<APInt> getIConstantVRegVal(Register VReg,
175 const MachineRegisterInfo &MRI);
176
177 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
178 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
179 const MachineRegisterInfo &MRI);
180
181 /// Simple struct used to hold a constant integer value and a virtual
182 /// register.
183 struct ValueAndVReg {
184 APInt Value;
185 Register VReg;
186 };
187
188 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
189 /// on a G_CONSTANT returns its APInt value and def register.
190 std::optional<ValueAndVReg>
191 getIConstantVRegValWithLookThrough(Register VReg,
192 const MachineRegisterInfo &MRI,
193 bool LookThroughInstrs = true);
194
195 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
196 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
197 std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
198 Register VReg, const MachineRegisterInfo &MRI,
199 bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
200
201 struct FPValueAndVReg {
202 APFloat Value;
203 Register VReg;
204 };
205
206 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
207 /// on a G_FCONSTANT returns its APFloat value and def register.
208 std::optional<FPValueAndVReg>
209 getFConstantVRegValWithLookThrough(Register VReg,
210 const MachineRegisterInfo &MRI,
211 bool LookThroughInstrs = true);
212
213 const ConstantFP* getConstantFPVRegVal(Register VReg,
214 const MachineRegisterInfo &MRI);
215
216 /// See if Reg is defined by an single def instruction that is
217 /// Opcode. Also try to do trivial folding if it's a COPY with
218 /// same types. Returns null otherwise.
219 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
220 const MachineRegisterInfo &MRI);
221
222 /// Simple struct used to hold a Register value and the instruction which
223 /// defines it.
224 struct DefinitionAndSourceRegister {
225 MachineInstr *MI;
226 Register Reg;
227 };
228
229 /// Find the def instruction for \p Reg, and underlying value Register folding
230 /// away any copies.
231 ///
232 /// Also walks through hints such as G_ASSERT_ZEXT.
233 std::optional<DefinitionAndSourceRegister>
234 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
235
236 /// Find the def instruction for \p Reg, folding away any trivial copies. May
237 /// return nullptr if \p Reg is not a generic virtual register.
238 ///
239 /// Also walks through hints such as G_ASSERT_ZEXT.
240 MachineInstr *getDefIgnoringCopies(Register Reg,
241 const MachineRegisterInfo &MRI);
242
243 /// Find the source register for \p Reg, folding away any trivial copies. It
244 /// will be an output register of the instruction that getDefIgnoringCopies
245 /// returns. May return an invalid register if \p Reg is not a generic virtual
246 /// register.
247 ///
248 /// Also walks through hints such as G_ASSERT_ZEXT.
249 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
250
251 /// Helper function to split a wide generic register into bitwise blocks with
252 /// the given Type (which implies the number of blocks needed). The generic
253 /// registers created are appended to Ops, starting at bit 0 of Reg.
254 void extractParts(Register Reg, LLT Ty, int NumParts,
255 SmallVectorImpl<Register> &VRegs,
256 MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
257
258 /// Version which handles irregular splits.
259 bool extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
260 SmallVectorImpl<Register> &VRegs,
261 SmallVectorImpl<Register> &LeftoverVRegs,
262 MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
263
264 /// Version which handles irregular sub-vector splits.
265 void extractVectorParts(Register Reg, unsigned NumElts,
266 SmallVectorImpl<Register> &VRegs,
267 MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
268
269 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
270 /// See if Reg is defined by an single def instruction of type T
271 /// Also try to do trivial folding if it's a COPY with
272 /// same types. Returns null otherwise.
273 template <class T>
getOpcodeDef(Register Reg,const MachineRegisterInfo & MRI)274 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
275 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
276 return dyn_cast_or_null<T>(DefMI);
277 }
278
279 /// Returns an APFloat from Val converted to the appropriate size.
280 APFloat getAPFloatFromSize(double Val, unsigned Size);
281
282 /// Modify analysis usage so it preserves passes required for the SelectionDAG
283 /// fallback.
284 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
285
286 std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
287 const Register Op2,
288 const MachineRegisterInfo &MRI);
289 std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
290 const Register Op2,
291 const MachineRegisterInfo &MRI);
292
293 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
294 /// Returns an empty vector on failure.
295 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
296 const Register Op2,
297 const MachineRegisterInfo &MRI);
298
299 std::optional<APInt> ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
300 const Register Op0,
301 const MachineRegisterInfo &MRI);
302
303 std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
304 uint64_t Imm,
305 const MachineRegisterInfo &MRI);
306
307 std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
308 Register Src,
309 const MachineRegisterInfo &MRI);
310
311 /// Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on \p
312 /// Src. If \p Src is a vector then it tries to do an element-wise constant
313 /// fold.
314 std::optional<SmallVector<unsigned>>
315 ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
316 std::function<unsigned(APInt)> CB);
317
318 std::optional<SmallVector<APInt>>
319 ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
320 const MachineRegisterInfo &MRI);
321
322 /// Test if the given value is known to have exactly one bit set. This differs
323 /// from computeKnownBits in that it doesn't necessarily determine which bit is
324 /// set.
325 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
326 GISelKnownBits *KnownBits = nullptr);
327
328 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
329 /// this returns if \p Val can be assumed to never be a signaling NaN.
330 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
331 bool SNaN = false);
332
333 /// Returns true if \p Val can be assumed to never be a signaling NaN.
isKnownNeverSNaN(Register Val,const MachineRegisterInfo & MRI)334 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
335 return isKnownNeverNaN(Val, MRI, true);
336 }
337
338 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
339
340 /// Return a virtual register corresponding to the incoming argument register \p
341 /// PhysReg. This register is expected to have class \p RC, and optional type \p
342 /// RegTy. This assumes all references to the register will use the same type.
343 ///
344 /// If there is an existing live-in argument register, it will be returned.
345 /// This will also ensure there is a valid copy
346 Register getFunctionLiveInPhysReg(MachineFunction &MF,
347 const TargetInstrInfo &TII,
348 MCRegister PhysReg,
349 const TargetRegisterClass &RC,
350 const DebugLoc &DL, LLT RegTy = LLT());
351
352 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by
353 /// changing the number of vector elements or scalar bitwidth. The intent is a
354 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
355 /// \p OrigTy elements, and unmerged into \p TargetTy. It is an error to call
356 /// this function where one argument is a fixed vector and the other is a
357 /// scalable vector, since it is illegal to build a G_{MERGE|UNMERGE}_VALUES
358 /// between fixed and scalable vectors.
359 LLVM_READNONE
360 LLT getLCMType(LLT OrigTy, LLT TargetTy);
361
362 LLVM_READNONE
363 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
364 /// multiple of TargetTy.
365 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
366
367 /// Return a type where the total size is the greatest common divisor of \p
368 /// OrigTy and \p TargetTy. This will try to either change the number of vector
369 /// elements, or bitwidth of scalars. The intent is the result type can be used
370 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
371 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
372 /// with intermediate casts) can re-form \p TargetTy.
373 ///
374 /// If these are vectors with different element types, this will try to produce
375 /// a vector with a compatible total size, but the element type of \p OrigTy. If
376 /// this can't be satisfied, this will produce a scalar smaller than the
377 /// original vector elements. It is an error to call this function where
378 /// one argument is a fixed vector and the other is a scalable vector, since it
379 /// is illegal to build a G_{MERGE|UNMERGE}_VALUES between fixed and scalable
380 /// vectors.
381 ///
382 /// In the worst case, this returns LLT::scalar(1)
383 LLVM_READNONE
384 LLT getGCDType(LLT OrigTy, LLT TargetTy);
385
386 /// Represents a value which can be a Register or a constant.
387 ///
388 /// This is useful in situations where an instruction may have an interesting
389 /// register operand or interesting constant operand. For a concrete example,
390 /// \see getVectorSplat.
391 class RegOrConstant {
392 int64_t Cst;
393 Register Reg;
394 bool IsReg;
395
396 public:
RegOrConstant(Register Reg)397 explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
RegOrConstant(int64_t Cst)398 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
isReg()399 bool isReg() const { return IsReg; }
isCst()400 bool isCst() const { return !IsReg; }
getReg()401 Register getReg() const {
402 assert(isReg() && "Expected a register!");
403 return Reg;
404 }
getCst()405 int64_t getCst() const {
406 assert(isCst() && "Expected a constant!");
407 return Cst;
408 }
409 };
410
411 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
412 /// If \p MI is not a splat, returns std::nullopt.
413 std::optional<int> getSplatIndex(MachineInstr &MI);
414
415 /// \returns the scalar integral splat value of \p Reg if possible.
416 std::optional<APInt> getIConstantSplatVal(const Register Reg,
417 const MachineRegisterInfo &MRI);
418
419 /// \returns the scalar integral splat value defined by \p MI if possible.
420 std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
421 const MachineRegisterInfo &MRI);
422
423 /// \returns the scalar sign extended integral splat value of \p Reg if
424 /// possible.
425 std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
426 const MachineRegisterInfo &MRI);
427
428 /// \returns the scalar sign extended integral splat value defined by \p MI if
429 /// possible.
430 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
431 const MachineRegisterInfo &MRI);
432
433 /// Returns a floating point scalar constant of a build vector splat if it
434 /// exists. When \p AllowUndef == true some elements can be undef but not all.
435 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
436 const MachineRegisterInfo &MRI,
437 bool AllowUndef = true);
438
439 /// Return true if the specified register is defined by G_BUILD_VECTOR or
440 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
441 bool isBuildVectorConstantSplat(const Register Reg,
442 const MachineRegisterInfo &MRI,
443 int64_t SplatValue, bool AllowUndef);
444
445 /// Return true if the specified instruction is a G_BUILD_VECTOR or
446 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
447 bool isBuildVectorConstantSplat(const MachineInstr &MI,
448 const MachineRegisterInfo &MRI,
449 int64_t SplatValue, bool AllowUndef);
450
451 /// Return true if the specified instruction is a G_BUILD_VECTOR or
452 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
453 bool isBuildVectorAllZeros(const MachineInstr &MI,
454 const MachineRegisterInfo &MRI,
455 bool AllowUndef = false);
456
457 /// Return true if the specified instruction is a G_BUILD_VECTOR or
458 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
459 bool isBuildVectorAllOnes(const MachineInstr &MI,
460 const MachineRegisterInfo &MRI,
461 bool AllowUndef = false);
462
463 /// Return true if the specified instruction is known to be a constant, or a
464 /// vector of constants.
465 ///
466 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
467 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
468 /// such as G_GLOBAL_VALUE will also be considered.
469 bool isConstantOrConstantVector(const MachineInstr &MI,
470 const MachineRegisterInfo &MRI,
471 bool AllowFP = true,
472 bool AllowOpaqueConstants = true);
473
474 /// Return true if the value is a constant 0 integer or a splatted vector of a
475 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
476 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
477 /// for null values.
478 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
479 bool AllowUndefs = false);
480
481 /// Return true if the value is a constant -1 integer or a splatted vector of a
482 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
483 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
484 const MachineRegisterInfo &MRI,
485 bool AllowUndefs = false);
486
487 /// \returns a value when \p MI is a vector splat. The splat can be either a
488 /// Register or a constant.
489 ///
490 /// Examples:
491 ///
492 /// \code
493 /// %reg = COPY $physreg
494 /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
495 /// \endcode
496 ///
497 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
498 /// containing %reg.
499 ///
500 /// \code
501 /// %cst = G_CONSTANT iN 4
502 /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
503 /// \endcode
504 ///
505 /// In the above case, this will return a RegOrConstant containing 4.
506 std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
507 const MachineRegisterInfo &MRI);
508
509 /// Determines if \p MI defines a constant integer or a build vector of
510 /// constant integers. Treats undef values as constants.
511 bool isConstantOrConstantVector(MachineInstr &MI,
512 const MachineRegisterInfo &MRI);
513
514 /// Determines if \p MI defines a constant integer or a splat vector of
515 /// constant integers.
516 /// \returns the scalar constant or std::nullopt.
517 std::optional<APInt>
518 isConstantOrConstantSplatVector(MachineInstr &MI,
519 const MachineRegisterInfo &MRI);
520
521 /// Attempt to match a unary predicate against a scalar/splat constant or every
522 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
523 /// value was undef.
524 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
525 std::function<bool(const Constant *ConstVal)> Match,
526 bool AllowUndefs = false);
527
528 /// Returns true if given the TargetLowering's boolean contents information,
529 /// the value \p Val contains a true value.
530 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
531 bool IsFP);
532 /// \returns true if given the TargetLowering's boolean contents information,
533 /// the value \p Val contains a false value.
534 bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
535 bool IsFP);
536
537 /// Returns an integer representing true, as defined by the
538 /// TargetBooleanContents.
539 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
540
541 /// Returns true if the given block should be optimized for size.
542 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
543 BlockFrequencyInfo *BFI);
544
545 using SmallInstListTy = GISelWorkList<4>;
546 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
547 LostDebugLocObserver *LocObserver,
548 SmallInstListTy &DeadInstChain);
549 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
550 LostDebugLocObserver *LocObserver = nullptr);
551 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
552 LostDebugLocObserver *LocObserver = nullptr);
553
554 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
555 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
556 void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
557
558 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
559 /// having only floating-point operands.
560 bool isPreISelGenericFloatingPointOpcode(unsigned Opc);
561
562 /// Returns true if \p Reg can create undef or poison from non-undef &
563 /// non-poison operands. \p ConsiderFlagsAndMetadata controls whether poison
564 /// producing flags and metadata on the instruction are considered. This can be
565 /// used to see if the instruction could still introduce undef or poison even
566 /// without poison generating flags and metadata which might be on the
567 /// instruction.
568 bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
569 bool ConsiderFlagsAndMetadata = true);
570
571 /// Returns true if \p Reg can create poison from non-poison operands.
572 bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI,
573 bool ConsiderFlagsAndMetadata = true);
574
575 /// Returns true if \p Reg cannot be poison and undef.
576 bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
577 const MachineRegisterInfo &MRI,
578 unsigned Depth = 0);
579
580 /// Returns true if \p Reg cannot be poison, but may be undef.
581 bool isGuaranteedNotToBePoison(Register Reg, const MachineRegisterInfo &MRI,
582 unsigned Depth = 0);
583
584 /// Returns true if \p Reg cannot be undef, but may be poison.
585 bool isGuaranteedNotToBeUndef(Register Reg, const MachineRegisterInfo &MRI,
586 unsigned Depth = 0);
587
588 /// Get the type back from LLT. It won't be 100 percent accurate but returns an
589 /// estimate of the type.
590 Type *getTypeForLLT(LLT Ty, LLVMContext &C);
591
592 } // End namespace llvm.
593 #endif
594