xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/Analysis.cpp (revision a3c35da61bb201168575f1d18f4ca3e96937d35c)
1  //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2  //
3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  // See https://llvm.org/LICENSE.txt for license information.
5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  //
7  //===----------------------------------------------------------------------===//
8  //
9  // This file defines several CodeGen-specific LLVM IR analysis utilities.
10  //
11  //===----------------------------------------------------------------------===//
12  
13  #include "llvm/CodeGen/Analysis.h"
14  #include "llvm/Analysis/ValueTracking.h"
15  #include "llvm/CodeGen/MachineFunction.h"
16  #include "llvm/CodeGen/TargetInstrInfo.h"
17  #include "llvm/CodeGen/TargetLowering.h"
18  #include "llvm/CodeGen/TargetSubtargetInfo.h"
19  #include "llvm/IR/DataLayout.h"
20  #include "llvm/IR/DerivedTypes.h"
21  #include "llvm/IR/Function.h"
22  #include "llvm/IR/Instructions.h"
23  #include "llvm/IR/IntrinsicInst.h"
24  #include "llvm/IR/LLVMContext.h"
25  #include "llvm/IR/Module.h"
26  #include "llvm/Support/ErrorHandling.h"
27  #include "llvm/Support/MathExtras.h"
28  #include "llvm/Transforms/Utils/GlobalStatus.h"
29  
30  using namespace llvm;
31  
32  /// Compute the linearized index of a member in a nested aggregate/struct/array
33  /// by recursing and accumulating CurIndex as long as there are indices in the
34  /// index list.
35  unsigned llvm::ComputeLinearIndex(Type *Ty,
36                                    const unsigned *Indices,
37                                    const unsigned *IndicesEnd,
38                                    unsigned CurIndex) {
39    // Base case: We're done.
40    if (Indices && Indices == IndicesEnd)
41      return CurIndex;
42  
43    // Given a struct type, recursively traverse the elements.
44    if (StructType *STy = dyn_cast<StructType>(Ty)) {
45      for (StructType::element_iterator EB = STy->element_begin(),
46                                        EI = EB,
47                                        EE = STy->element_end();
48          EI != EE; ++EI) {
49        if (Indices && *Indices == unsigned(EI - EB))
50          return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
51        CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
52      }
53      assert(!Indices && "Unexpected out of bound");
54      return CurIndex;
55    }
56    // Given an array type, recursively traverse the elements.
57    else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
58      Type *EltTy = ATy->getElementType();
59      unsigned NumElts = ATy->getNumElements();
60      // Compute the Linear offset when jumping one element of the array
61      unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
62      if (Indices) {
63        assert(*Indices < NumElts && "Unexpected out of bound");
64        // If the indice is inside the array, compute the index to the requested
65        // elt and recurse inside the element with the end of the indices list
66        CurIndex += EltLinearOffset* *Indices;
67        return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
68      }
69      CurIndex += EltLinearOffset*NumElts;
70      return CurIndex;
71    }
72    // We haven't found the type we're looking for, so keep searching.
73    return CurIndex + 1;
74  }
75  
76  /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
77  /// EVTs that represent all the individual underlying
78  /// non-aggregate types that comprise it.
79  ///
80  /// If Offsets is non-null, it points to a vector to be filled in
81  /// with the in-memory offsets of each of the individual values.
82  ///
83  void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
84                             Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
85                             SmallVectorImpl<EVT> *MemVTs,
86                             SmallVectorImpl<uint64_t> *Offsets,
87                             uint64_t StartingOffset) {
88    // Given a struct type, recursively traverse the elements.
89    if (StructType *STy = dyn_cast<StructType>(Ty)) {
90      const StructLayout *SL = DL.getStructLayout(STy);
91      for (StructType::element_iterator EB = STy->element_begin(),
92                                        EI = EB,
93                                        EE = STy->element_end();
94           EI != EE; ++EI)
95        ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
96                        StartingOffset + SL->getElementOffset(EI - EB));
97      return;
98    }
99    // Given an array type, recursively traverse the elements.
100    if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
101      Type *EltTy = ATy->getElementType();
102      uint64_t EltSize = DL.getTypeAllocSize(EltTy);
103      for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
104        ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
105                        StartingOffset + i * EltSize);
106      return;
107    }
108    // Interpret void as zero return values.
109    if (Ty->isVoidTy())
110      return;
111    // Base case: we can get an EVT for this LLVM IR type.
112    ValueVTs.push_back(TLI.getValueType(DL, Ty));
113    if (MemVTs)
114      MemVTs->push_back(TLI.getMemValueType(DL, Ty));
115    if (Offsets)
116      Offsets->push_back(StartingOffset);
117  }
118  
119  void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
120                             Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
121                             SmallVectorImpl<uint64_t> *Offsets,
122                             uint64_t StartingOffset) {
123    return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
124                           StartingOffset);
125  }
126  
127  void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
128                              SmallVectorImpl<LLT> &ValueTys,
129                              SmallVectorImpl<uint64_t> *Offsets,
130                              uint64_t StartingOffset) {
131    // Given a struct type, recursively traverse the elements.
132    if (StructType *STy = dyn_cast<StructType>(&Ty)) {
133      const StructLayout *SL = DL.getStructLayout(STy);
134      for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
135        computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
136                         StartingOffset + SL->getElementOffset(I));
137      return;
138    }
139    // Given an array type, recursively traverse the elements.
140    if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
141      Type *EltTy = ATy->getElementType();
142      uint64_t EltSize = DL.getTypeAllocSize(EltTy);
143      for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
144        computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
145                         StartingOffset + i * EltSize);
146      return;
147    }
148    // Interpret void as zero return values.
149    if (Ty.isVoidTy())
150      return;
151    // Base case: we can get an LLT for this LLVM IR type.
152    ValueTys.push_back(getLLTForType(Ty, DL));
153    if (Offsets != nullptr)
154      Offsets->push_back(StartingOffset * 8);
155  }
156  
157  /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
158  GlobalValue *llvm::ExtractTypeInfo(Value *V) {
159    V = V->stripPointerCasts();
160    GlobalValue *GV = dyn_cast<GlobalValue>(V);
161    GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
162  
163    if (Var && Var->getName() == "llvm.eh.catch.all.value") {
164      assert(Var->hasInitializer() &&
165             "The EH catch-all value must have an initializer");
166      Value *Init = Var->getInitializer();
167      GV = dyn_cast<GlobalValue>(Init);
168      if (!GV) V = cast<ConstantPointerNull>(Init);
169    }
170  
171    assert((GV || isa<ConstantPointerNull>(V)) &&
172           "TypeInfo must be a global variable or NULL");
173    return GV;
174  }
175  
176  /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
177  /// processed uses a memory 'm' constraint.
178  bool
179  llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
180                                  const TargetLowering &TLI) {
181    for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
182      InlineAsm::ConstraintInfo &CI = CInfos[i];
183      for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
184        TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
185        if (CType == TargetLowering::C_Memory)
186          return true;
187      }
188  
189      // Indirect operand accesses access memory.
190      if (CI.isIndirect)
191        return true;
192    }
193  
194    return false;
195  }
196  
197  /// getFCmpCondCode - Return the ISD condition code corresponding to
198  /// the given LLVM IR floating-point condition code.  This includes
199  /// consideration of global floating-point math flags.
200  ///
201  ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
202    switch (Pred) {
203    case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
204    case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
205    case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
206    case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
207    case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
208    case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
209    case FCmpInst::FCMP_ONE:   return ISD::SETONE;
210    case FCmpInst::FCMP_ORD:   return ISD::SETO;
211    case FCmpInst::FCMP_UNO:   return ISD::SETUO;
212    case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
213    case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
214    case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
215    case FCmpInst::FCMP_ULT:   return ISD::SETULT;
216    case FCmpInst::FCMP_ULE:   return ISD::SETULE;
217    case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
218    case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
219    default: llvm_unreachable("Invalid FCmp predicate opcode!");
220    }
221  }
222  
223  ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
224    switch (CC) {
225      case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
226      case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
227      case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
228      case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
229      case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
230      case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
231      default: return CC;
232    }
233  }
234  
235  /// getICmpCondCode - Return the ISD condition code corresponding to
236  /// the given LLVM IR integer condition code.
237  ///
238  ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
239    switch (Pred) {
240    case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
241    case ICmpInst::ICMP_NE:  return ISD::SETNE;
242    case ICmpInst::ICMP_SLE: return ISD::SETLE;
243    case ICmpInst::ICMP_ULE: return ISD::SETULE;
244    case ICmpInst::ICMP_SGE: return ISD::SETGE;
245    case ICmpInst::ICMP_UGE: return ISD::SETUGE;
246    case ICmpInst::ICMP_SLT: return ISD::SETLT;
247    case ICmpInst::ICMP_ULT: return ISD::SETULT;
248    case ICmpInst::ICMP_SGT: return ISD::SETGT;
249    case ICmpInst::ICMP_UGT: return ISD::SETUGT;
250    default:
251      llvm_unreachable("Invalid ICmp predicate opcode!");
252    }
253  }
254  
255  static bool isNoopBitcast(Type *T1, Type *T2,
256                            const TargetLoweringBase& TLI) {
257    return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
258           (isa<VectorType>(T1) && isa<VectorType>(T2) &&
259            TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
260  }
261  
262  /// Look through operations that will be free to find the earliest source of
263  /// this value.
264  ///
265  /// @param ValLoc If V has aggegate type, we will be interested in a particular
266  /// scalar component. This records its address; the reverse of this list gives a
267  /// sequence of indices appropriate for an extractvalue to locate the important
268  /// value. This value is updated during the function and on exit will indicate
269  /// similar information for the Value returned.
270  ///
271  /// @param DataBits If this function looks through truncate instructions, this
272  /// will record the smallest size attained.
273  static const Value *getNoopInput(const Value *V,
274                                   SmallVectorImpl<unsigned> &ValLoc,
275                                   unsigned &DataBits,
276                                   const TargetLoweringBase &TLI,
277                                   const DataLayout &DL) {
278    while (true) {
279      // Try to look through V1; if V1 is not an instruction, it can't be looked
280      // through.
281      const Instruction *I = dyn_cast<Instruction>(V);
282      if (!I || I->getNumOperands() == 0) return V;
283      const Value *NoopInput = nullptr;
284  
285      Value *Op = I->getOperand(0);
286      if (isa<BitCastInst>(I)) {
287        // Look through truly no-op bitcasts.
288        if (isNoopBitcast(Op->getType(), I->getType(), TLI))
289          NoopInput = Op;
290      } else if (isa<GetElementPtrInst>(I)) {
291        // Look through getelementptr
292        if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
293          NoopInput = Op;
294      } else if (isa<IntToPtrInst>(I)) {
295        // Look through inttoptr.
296        // Make sure this isn't a truncating or extending cast.  We could
297        // support this eventually, but don't bother for now.
298        if (!isa<VectorType>(I->getType()) &&
299            DL.getPointerSizeInBits() ==
300                cast<IntegerType>(Op->getType())->getBitWidth())
301          NoopInput = Op;
302      } else if (isa<PtrToIntInst>(I)) {
303        // Look through ptrtoint.
304        // Make sure this isn't a truncating or extending cast.  We could
305        // support this eventually, but don't bother for now.
306        if (!isa<VectorType>(I->getType()) &&
307            DL.getPointerSizeInBits() ==
308                cast<IntegerType>(I->getType())->getBitWidth())
309          NoopInput = Op;
310      } else if (isa<TruncInst>(I) &&
311                 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
312        DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
313        NoopInput = Op;
314      } else if (auto CS = ImmutableCallSite(I)) {
315        const Value *ReturnedOp = CS.getReturnedArgOperand();
316        if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
317          NoopInput = ReturnedOp;
318      } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
319        // Value may come from either the aggregate or the scalar
320        ArrayRef<unsigned> InsertLoc = IVI->getIndices();
321        if (ValLoc.size() >= InsertLoc.size() &&
322            std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
323          // The type being inserted is a nested sub-type of the aggregate; we
324          // have to remove those initial indices to get the location we're
325          // interested in for the operand.
326          ValLoc.resize(ValLoc.size() - InsertLoc.size());
327          NoopInput = IVI->getInsertedValueOperand();
328        } else {
329          // The struct we're inserting into has the value we're interested in, no
330          // change of address.
331          NoopInput = Op;
332        }
333      } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
334        // The part we're interested in will inevitably be some sub-section of the
335        // previous aggregate. Combine the two paths to obtain the true address of
336        // our element.
337        ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
338        ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
339        NoopInput = Op;
340      }
341      // Terminate if we couldn't find anything to look through.
342      if (!NoopInput)
343        return V;
344  
345      V = NoopInput;
346    }
347  }
348  
349  /// Return true if this scalar return value only has bits discarded on its path
350  /// from the "tail call" to the "ret". This includes the obvious noop
351  /// instructions handled by getNoopInput above as well as free truncations (or
352  /// extensions prior to the call).
353  static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
354                                   SmallVectorImpl<unsigned> &RetIndices,
355                                   SmallVectorImpl<unsigned> &CallIndices,
356                                   bool AllowDifferingSizes,
357                                   const TargetLoweringBase &TLI,
358                                   const DataLayout &DL) {
359  
360    // Trace the sub-value needed by the return value as far back up the graph as
361    // possible, in the hope that it will intersect with the value produced by the
362    // call. In the simple case with no "returned" attribute, the hope is actually
363    // that we end up back at the tail call instruction itself.
364    unsigned BitsRequired = UINT_MAX;
365    RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
366  
367    // If this slot in the value returned is undef, it doesn't matter what the
368    // call puts there, it'll be fine.
369    if (isa<UndefValue>(RetVal))
370      return true;
371  
372    // Now do a similar search up through the graph to find where the value
373    // actually returned by the "tail call" comes from. In the simple case without
374    // a "returned" attribute, the search will be blocked immediately and the loop
375    // a Noop.
376    unsigned BitsProvided = UINT_MAX;
377    CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
378  
379    // There's no hope if we can't actually trace them to (the same part of!) the
380    // same value.
381    if (CallVal != RetVal || CallIndices != RetIndices)
382      return false;
383  
384    // However, intervening truncates may have made the call non-tail. Make sure
385    // all the bits that are needed by the "ret" have been provided by the "tail
386    // call". FIXME: with sufficiently cunning bit-tracking, we could look through
387    // extensions too.
388    if (BitsProvided < BitsRequired ||
389        (!AllowDifferingSizes && BitsProvided != BitsRequired))
390      return false;
391  
392    return true;
393  }
394  
395  /// For an aggregate type, determine whether a given index is within bounds or
396  /// not.
397  static bool indexReallyValid(CompositeType *T, unsigned Idx) {
398    if (ArrayType *AT = dyn_cast<ArrayType>(T))
399      return Idx < AT->getNumElements();
400  
401    return Idx < cast<StructType>(T)->getNumElements();
402  }
403  
404  /// Move the given iterators to the next leaf type in depth first traversal.
405  ///
406  /// Performs a depth-first traversal of the type as specified by its arguments,
407  /// stopping at the next leaf node (which may be a legitimate scalar type or an
408  /// empty struct or array).
409  ///
410  /// @param SubTypes List of the partial components making up the type from
411  /// outermost to innermost non-empty aggregate. The element currently
412  /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
413  ///
414  /// @param Path Set of extractvalue indices leading from the outermost type
415  /// (SubTypes[0]) to the leaf node currently represented.
416  ///
417  /// @returns true if a new type was found, false otherwise. Calling this
418  /// function again on a finished iterator will repeatedly return
419  /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
420  /// aggregate or a non-aggregate
421  static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
422                                    SmallVectorImpl<unsigned> &Path) {
423    // First march back up the tree until we can successfully increment one of the
424    // coordinates in Path.
425    while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
426      Path.pop_back();
427      SubTypes.pop_back();
428    }
429  
430    // If we reached the top, then the iterator is done.
431    if (Path.empty())
432      return false;
433  
434    // We know there's *some* valid leaf now, so march back down the tree picking
435    // out the left-most element at each node.
436    ++Path.back();
437    Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
438    while (DeeperType->isAggregateType()) {
439      CompositeType *CT = cast<CompositeType>(DeeperType);
440      if (!indexReallyValid(CT, 0))
441        return true;
442  
443      SubTypes.push_back(CT);
444      Path.push_back(0);
445  
446      DeeperType = CT->getTypeAtIndex(0U);
447    }
448  
449    return true;
450  }
451  
452  /// Find the first non-empty, scalar-like type in Next and setup the iterator
453  /// components.
454  ///
455  /// Assuming Next is an aggregate of some kind, this function will traverse the
456  /// tree from left to right (i.e. depth-first) looking for the first
457  /// non-aggregate type which will play a role in function return.
458  ///
459  /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
460  /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
461  /// i32 in that type.
462  static bool firstRealType(Type *Next,
463                            SmallVectorImpl<CompositeType *> &SubTypes,
464                            SmallVectorImpl<unsigned> &Path) {
465    // First initialise the iterator components to the first "leaf" node
466    // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
467    // despite nominally being an aggregate).
468    while (Next->isAggregateType() &&
469           indexReallyValid(cast<CompositeType>(Next), 0)) {
470      SubTypes.push_back(cast<CompositeType>(Next));
471      Path.push_back(0);
472      Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
473    }
474  
475    // If there's no Path now, Next was originally scalar already (or empty
476    // leaf). We're done.
477    if (Path.empty())
478      return true;
479  
480    // Otherwise, use normal iteration to keep looking through the tree until we
481    // find a non-aggregate type.
482    while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
483      if (!advanceToNextLeafType(SubTypes, Path))
484        return false;
485    }
486  
487    return true;
488  }
489  
490  /// Set the iterator data-structures to the next non-empty, non-aggregate
491  /// subtype.
492  static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
493                           SmallVectorImpl<unsigned> &Path) {
494    do {
495      if (!advanceToNextLeafType(SubTypes, Path))
496        return false;
497  
498      assert(!Path.empty() && "found a leaf but didn't set the path?");
499    } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
500  
501    return true;
502  }
503  
504  
505  /// Test if the given instruction is in a position to be optimized
506  /// with a tail-call. This roughly means that it's in a block with
507  /// a return and there's nothing that needs to be scheduled
508  /// between it and the return.
509  ///
510  /// This function only tests target-independent requirements.
511  bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
512    const Instruction *I = CS.getInstruction();
513    const BasicBlock *ExitBB = I->getParent();
514    const Instruction *Term = ExitBB->getTerminator();
515    const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
516  
517    // The block must end in a return statement or unreachable.
518    //
519    // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
520    // an unreachable, for now. The way tailcall optimization is currently
521    // implemented means it will add an epilogue followed by a jump. That is
522    // not profitable. Also, if the callee is a special function (e.g.
523    // longjmp on x86), it can end up causing miscompilation that has not
524    // been fully understood.
525    if (!Ret &&
526        (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
527      return false;
528  
529    // If I will have a chain, make sure no other instruction that will have a
530    // chain interposes between I and the return.
531    if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
532        !isSafeToSpeculativelyExecute(I))
533      for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
534        if (&*BBI == I)
535          break;
536        // Debug info intrinsics do not get in the way of tail call optimization.
537        if (isa<DbgInfoIntrinsic>(BBI))
538          continue;
539        // A lifetime end intrinsic should not stop tail call optimization.
540        if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
541          if (II->getIntrinsicID() == Intrinsic::lifetime_end)
542            continue;
543        if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
544            !isSafeToSpeculativelyExecute(&*BBI))
545          return false;
546      }
547  
548    const Function *F = ExitBB->getParent();
549    return returnTypeIsEligibleForTailCall(
550        F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
551  }
552  
553  bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
554                                      const ReturnInst *Ret,
555                                      const TargetLoweringBase &TLI,
556                                      bool *AllowDifferingSizes) {
557    // ADS may be null, so don't write to it directly.
558    bool DummyADS;
559    bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
560    ADS = true;
561  
562    AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
563    AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
564                            AttributeList::ReturnIndex);
565  
566    // NoAlias and NonNull are completely benign as far as calling convention
567    // goes, they shouldn't affect whether the call is a tail call.
568    CallerAttrs.removeAttribute(Attribute::NoAlias);
569    CalleeAttrs.removeAttribute(Attribute::NoAlias);
570    CallerAttrs.removeAttribute(Attribute::NonNull);
571    CalleeAttrs.removeAttribute(Attribute::NonNull);
572  
573    if (CallerAttrs.contains(Attribute::ZExt)) {
574      if (!CalleeAttrs.contains(Attribute::ZExt))
575        return false;
576  
577      ADS = false;
578      CallerAttrs.removeAttribute(Attribute::ZExt);
579      CalleeAttrs.removeAttribute(Attribute::ZExt);
580    } else if (CallerAttrs.contains(Attribute::SExt)) {
581      if (!CalleeAttrs.contains(Attribute::SExt))
582        return false;
583  
584      ADS = false;
585      CallerAttrs.removeAttribute(Attribute::SExt);
586      CalleeAttrs.removeAttribute(Attribute::SExt);
587    }
588  
589    // Drop sext and zext return attributes if the result is not used.
590    // This enables tail calls for code like:
591    //
592    // define void @caller() {
593    // entry:
594    //   %unused_result = tail call zeroext i1 @callee()
595    //   br label %retlabel
596    // retlabel:
597    //   ret void
598    // }
599    if (I->use_empty()) {
600      CalleeAttrs.removeAttribute(Attribute::SExt);
601      CalleeAttrs.removeAttribute(Attribute::ZExt);
602    }
603  
604    // If they're still different, there's some facet we don't understand
605    // (currently only "inreg", but in future who knows). It may be OK but the
606    // only safe option is to reject the tail call.
607    return CallerAttrs == CalleeAttrs;
608  }
609  
610  bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
611                                             const Instruction *I,
612                                             const ReturnInst *Ret,
613                                             const TargetLoweringBase &TLI) {
614    // If the block ends with a void return or unreachable, it doesn't matter
615    // what the call's return type is.
616    if (!Ret || Ret->getNumOperands() == 0) return true;
617  
618    // If the return value is undef, it doesn't matter what the call's
619    // return type is.
620    if (isa<UndefValue>(Ret->getOperand(0))) return true;
621  
622    // Make sure the attributes attached to each return are compatible.
623    bool AllowDifferingSizes;
624    if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
625      return false;
626  
627    const Value *RetVal = Ret->getOperand(0), *CallVal = I;
628    // Intrinsic like llvm.memcpy has no return value, but the expanded
629    // libcall may or may not have return value. On most platforms, it
630    // will be expanded as memcpy in libc, which returns the first
631    // argument. On other platforms like arm-none-eabi, memcpy may be
632    // expanded as library call without return value, like __aeabi_memcpy.
633    const CallInst *Call = cast<CallInst>(I);
634    if (Function *F = Call->getCalledFunction()) {
635      Intrinsic::ID IID = F->getIntrinsicID();
636      if (((IID == Intrinsic::memcpy &&
637            TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
638           (IID == Intrinsic::memmove &&
639            TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
640           (IID == Intrinsic::memset &&
641            TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
642          RetVal == Call->getArgOperand(0))
643        return true;
644    }
645  
646    SmallVector<unsigned, 4> RetPath, CallPath;
647    SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
648  
649    bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
650    bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
651  
652    // Nothing's actually returned, it doesn't matter what the callee put there
653    // it's a valid tail call.
654    if (RetEmpty)
655      return true;
656  
657    // Iterate pairwise through each of the value types making up the tail call
658    // and the corresponding return. For each one we want to know whether it's
659    // essentially going directly from the tail call to the ret, via operations
660    // that end up not generating any code.
661    //
662    // We allow a certain amount of covariance here. For example it's permitted
663    // for the tail call to define more bits than the ret actually cares about
664    // (e.g. via a truncate).
665    do {
666      if (CallEmpty) {
667        // We've exhausted the values produced by the tail call instruction, the
668        // rest are essentially undef. The type doesn't really matter, but we need
669        // *something*.
670        Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
671        CallVal = UndefValue::get(SlotType);
672      }
673  
674      // The manipulations performed when we're looking through an insertvalue or
675      // an extractvalue would happen at the front of the RetPath list, so since
676      // we have to copy it anyway it's more efficient to create a reversed copy.
677      SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
678      SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
679  
680      // Finally, we can check whether the value produced by the tail call at this
681      // index is compatible with the value we return.
682      if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
683                                AllowDifferingSizes, TLI,
684                                F->getParent()->getDataLayout()))
685        return false;
686  
687      CallEmpty  = !nextRealType(CallSubTypes, CallPath);
688    } while(nextRealType(RetSubTypes, RetPath));
689  
690    return true;
691  }
692  
693  static void collectEHScopeMembers(
694      DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
695      const MachineBasicBlock *MBB) {
696    SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
697    while (!Worklist.empty()) {
698      const MachineBasicBlock *Visiting = Worklist.pop_back_val();
699      // Don't follow blocks which start new scopes.
700      if (Visiting->isEHPad() && Visiting != MBB)
701        continue;
702  
703      // Add this MBB to our scope.
704      auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
705  
706      // Don't revisit blocks.
707      if (!P.second) {
708        assert(P.first->second == EHScope && "MBB is part of two scopes!");
709        continue;
710      }
711  
712      // Returns are boundaries where scope transfer can occur, don't follow
713      // successors.
714      if (Visiting->isEHScopeReturnBlock())
715        continue;
716  
717      for (const MachineBasicBlock *Succ : Visiting->successors())
718        Worklist.push_back(Succ);
719    }
720  }
721  
722  DenseMap<const MachineBasicBlock *, int>
723  llvm::getEHScopeMembership(const MachineFunction &MF) {
724    DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
725  
726    // We don't have anything to do if there aren't any EH pads.
727    if (!MF.hasEHScopes())
728      return EHScopeMembership;
729  
730    int EntryBBNumber = MF.front().getNumber();
731    bool IsSEH = isAsynchronousEHPersonality(
732        classifyEHPersonality(MF.getFunction().getPersonalityFn()));
733  
734    const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
735    SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
736    SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
737    SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
738    SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
739    for (const MachineBasicBlock &MBB : MF) {
740      if (MBB.isEHScopeEntry()) {
741        EHScopeBlocks.push_back(&MBB);
742      } else if (IsSEH && MBB.isEHPad()) {
743        SEHCatchPads.push_back(&MBB);
744      } else if (MBB.pred_empty()) {
745        UnreachableBlocks.push_back(&MBB);
746      }
747  
748      MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
749  
750      // CatchPads are not scopes for SEH so do not consider CatchRet to
751      // transfer control to another scope.
752      if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
753        continue;
754  
755      // FIXME: SEH CatchPads are not necessarily in the parent function:
756      // they could be inside a finally block.
757      const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
758      const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
759      CatchRetSuccessors.push_back(
760          {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
761    }
762  
763    // We don't have anything to do if there aren't any EH pads.
764    if (EHScopeBlocks.empty())
765      return EHScopeMembership;
766  
767    // Identify all the basic blocks reachable from the function entry.
768    collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
769    // All blocks not part of a scope are in the parent function.
770    for (const MachineBasicBlock *MBB : UnreachableBlocks)
771      collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
772    // Next, identify all the blocks inside the scopes.
773    for (const MachineBasicBlock *MBB : EHScopeBlocks)
774      collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
775    // SEH CatchPads aren't really scopes, handle them separately.
776    for (const MachineBasicBlock *MBB : SEHCatchPads)
777      collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
778    // Finally, identify all the targets of a catchret.
779    for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
780         CatchRetSuccessors)
781      collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
782                            CatchRetPair.first);
783    return EHScopeMembership;
784  }
785