xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/Analysis.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CODEGEN_ANALYSIS_H
14 #define LLVM_CODEGEN_ANALYSIS_H
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/CodeGen/ISDOpcodes.h"
19 #include "llvm/IR/Instructions.h"
20 
21 namespace llvm {
22 template <typename T> class SmallVectorImpl;
23 class GlobalValue;
24 class LLT;
25 class MachineBasicBlock;
26 class MachineFunction;
27 class TargetLoweringBase;
28 class TargetLowering;
29 class TargetMachine;
30 struct EVT;
31 
32 /// Compute the linearized index of a member in a nested
33 /// aggregate/struct/array.
34 ///
35 /// Given an LLVM IR aggregate type and a sequence of insertvalue or
36 /// extractvalue indices that identify a member, return the linearized index of
37 /// the start of the member, i.e the number of element in memory before the
38 /// sought one. This is disconnected from the number of bytes.
39 ///
40 /// \param Ty is the type indexed by \p Indices.
41 /// \param Indices is an optional pointer in the indices list to the current
42 /// index.
43 /// \param IndicesEnd is the end of the indices list.
44 /// \param CurIndex is the current index in the recursion.
45 ///
46 /// \returns \p CurIndex plus the linear index in \p Ty  the indices list.
47 unsigned ComputeLinearIndex(Type *Ty,
48                             const unsigned *Indices,
49                             const unsigned *IndicesEnd,
50                             unsigned CurIndex = 0);
51 
52 inline unsigned ComputeLinearIndex(Type *Ty,
53                                    ArrayRef<unsigned> Indices,
54                                    unsigned CurIndex = 0) {
55   return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
56 }
57 
58 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
59 /// EVTs that represent all the individual underlying
60 /// non-aggregate types that comprise it.
61 ///
62 /// If Offsets is non-null, it points to a vector to be filled in
63 /// with the in-memory offsets of each of the individual values.
64 ///
65 void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
66                      SmallVectorImpl<EVT> &ValueVTs,
67                      SmallVectorImpl<EVT> *MemVTs,
68                      SmallVectorImpl<TypeSize> *Offsets = nullptr,
69                      TypeSize StartingOffset = TypeSize::getZero());
70 void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
71                      SmallVectorImpl<EVT> &ValueVTs,
72                      SmallVectorImpl<EVT> *MemVTs,
73                      SmallVectorImpl<uint64_t> *FixedOffsets,
74                      uint64_t StartingOffset);
75 
76 /// Variant of ComputeValueVTs that don't produce memory VTs.
77 inline void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
78                             Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
79                             SmallVectorImpl<TypeSize> *Offsets = nullptr,
80                             TypeSize StartingOffset = TypeSize::getZero()) {
81   ComputeValueVTs(TLI, DL, Ty, ValueVTs, nullptr, Offsets, StartingOffset);
82 }
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * FixedOffsets,uint64_t StartingOffset)83 inline void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
84                             Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
85                             SmallVectorImpl<uint64_t> *FixedOffsets,
86                             uint64_t StartingOffset) {
87   ComputeValueVTs(TLI, DL, Ty, ValueVTs, nullptr, FixedOffsets, StartingOffset);
88 }
89 
90 /// computeValueLLTs - Given an LLVM IR type, compute a sequence of
91 /// LLTs that represent all the individual underlying
92 /// non-aggregate types that comprise it.
93 ///
94 /// If Offsets is non-null, it points to a vector to be filled in
95 /// with the in-memory offsets of each of the individual values.
96 ///
97 void computeValueLLTs(const DataLayout &DL, Type &Ty,
98                       SmallVectorImpl<LLT> &ValueTys,
99                       SmallVectorImpl<uint64_t> *Offsets = nullptr,
100                       uint64_t StartingOffset = 0);
101 
102 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
103 GlobalValue *ExtractTypeInfo(Value *V);
104 
105 /// getFCmpCondCode - Return the ISD condition code corresponding to
106 /// the given LLVM IR floating-point condition code.  This includes
107 /// consideration of global floating-point math flags.
108 ///
109 ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
110 
111 /// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
112 /// return the equivalent code if we're allowed to assume that NaNs won't occur.
113 ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
114 
115 /// getICmpCondCode - Return the ISD condition code corresponding to
116 /// the given LLVM IR integer condition code.
117 ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
118 
119 /// getICmpCondCode - Return the LLVM IR integer condition code
120 /// corresponding to the given ISD integer condition code.
121 ICmpInst::Predicate getICmpCondCode(ISD::CondCode Pred);
122 
123 /// Test if the given instruction is in a position to be optimized
124 /// with a tail-call. This roughly means that it's in a block with
125 /// a return and there's nothing that needs to be scheduled
126 /// between it and the return.
127 ///
128 /// This function only tests target-independent requirements.
129 bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM,
130                           bool ReturnsFirstArg = false);
131 
132 /// Test if given that the input instruction is in the tail call position, if
133 /// there is an attribute mismatch between the caller and the callee that will
134 /// inhibit tail call optimizations.
135 /// \p AllowDifferingSizes is an output parameter which, if forming a tail call
136 /// is permitted, determines whether it's permitted only if the size of the
137 /// caller's and callee's return types match exactly.
138 bool attributesPermitTailCall(const Function *F, const Instruction *I,
139                               const ReturnInst *Ret,
140                               const TargetLoweringBase &TLI,
141                               bool *AllowDifferingSizes = nullptr);
142 
143 /// Test if given that the input instruction is in the tail call position if the
144 /// return type or any attributes of the function will inhibit tail call
145 /// optimization.
146 bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
147                                      const ReturnInst *Ret,
148                                      const TargetLoweringBase &TLI,
149                                      bool ReturnsFirstArg = false);
150 
151 /// Returns true if the parent of \p CI returns CI's first argument after
152 /// calling \p CI.
153 bool funcReturnsFirstArgOfCall(const CallInst &CI);
154 
155 DenseMap<const MachineBasicBlock *, int>
156 getEHScopeMembership(const MachineFunction &MF);
157 
158 } // End llvm namespace
159 
160 #endif
161