xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/FunctionComparator.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- FunctionComparator.h - Function Comparator -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the FunctionComparator and GlobalNumberState classes
10 // which are used by the MergeFunctions pass for comparing functions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/FunctionComparator.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/IR/Attributes.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/Metadata.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include <cassert>
45 #include <cstddef>
46 #include <cstdint>
47 #include <utility>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "functioncomparator"
52 
53 int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
54   if (L < R)
55     return -1;
56   if (L > R)
57     return 1;
58   return 0;
59 }
60 
61 int FunctionComparator::cmpAligns(Align L, Align R) const {
62   if (L.value() < R.value())
63     return -1;
64   if (L.value() > R.value())
65     return 1;
66   return 0;
67 }
68 
69 int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
70   if ((int)L < (int)R)
71     return -1;
72   if ((int)L > (int)R)
73     return 1;
74   return 0;
75 }
76 
77 int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
78   if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
79     return Res;
80   if (L.ugt(R))
81     return 1;
82   if (R.ugt(L))
83     return -1;
84   return 0;
85 }
86 
87 int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
88   // Floats are ordered first by semantics (i.e. float, double, half, etc.),
89   // then by value interpreted as a bitstring (aka APInt).
90   const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
91   if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
92                            APFloat::semanticsPrecision(SR)))
93     return Res;
94   if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
95                            APFloat::semanticsMaxExponent(SR)))
96     return Res;
97   if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
98                            APFloat::semanticsMinExponent(SR)))
99     return Res;
100   if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
101                            APFloat::semanticsSizeInBits(SR)))
102     return Res;
103   return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
104 }
105 
106 int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
107   // Prevent heavy comparison, compare sizes first.
108   if (int Res = cmpNumbers(L.size(), R.size()))
109     return Res;
110 
111   // Compare strings lexicographically only when it is necessary: only when
112   // strings are equal in size.
113   return std::clamp(L.compare(R), -1, 1);
114 }
115 
116 int FunctionComparator::cmpAttrs(const AttributeList L,
117                                  const AttributeList R) const {
118   if (int Res = cmpNumbers(L.getNumAttrSets(), R.getNumAttrSets()))
119     return Res;
120 
121   for (unsigned i : L.indexes()) {
122     AttributeSet LAS = L.getAttributes(i);
123     AttributeSet RAS = R.getAttributes(i);
124     AttributeSet::iterator LI = LAS.begin(), LE = LAS.end();
125     AttributeSet::iterator RI = RAS.begin(), RE = RAS.end();
126     for (; LI != LE && RI != RE; ++LI, ++RI) {
127       Attribute LA = *LI;
128       Attribute RA = *RI;
129       if (LA.isTypeAttribute() && RA.isTypeAttribute()) {
130         if (LA.getKindAsEnum() != RA.getKindAsEnum())
131           return cmpNumbers(LA.getKindAsEnum(), RA.getKindAsEnum());
132 
133         Type *TyL = LA.getValueAsType();
134         Type *TyR = RA.getValueAsType();
135         if (TyL && TyR) {
136           if (int Res = cmpTypes(TyL, TyR))
137             return Res;
138           continue;
139         }
140 
141         // Two pointers, at least one null, so the comparison result is
142         // independent of the value of a real pointer.
143         if (int Res = cmpNumbers((uint64_t)TyL, (uint64_t)TyR))
144           return Res;
145         continue;
146       }
147       if (LA < RA)
148         return -1;
149       if (RA < LA)
150         return 1;
151     }
152     if (LI != LE)
153       return 1;
154     if (RI != RE)
155       return -1;
156   }
157   return 0;
158 }
159 
160 int FunctionComparator::cmpMetadata(const Metadata *L,
161                                     const Metadata *R) const {
162   // TODO: the following routine coerce the metadata contents into constants
163   // or MDStrings before comparison.
164   // It ignores any other cases, so that the metadata nodes are considered
165   // equal even though this is not correct.
166   // We should structurally compare the metadata nodes to be perfect here.
167 
168   auto *MDStringL = dyn_cast<MDString>(L);
169   auto *MDStringR = dyn_cast<MDString>(R);
170   if (MDStringL && MDStringR) {
171     if (MDStringL == MDStringR)
172       return 0;
173     return MDStringL->getString().compare(MDStringR->getString());
174   }
175   if (MDStringR)
176     return -1;
177   if (MDStringL)
178     return 1;
179 
180   auto *CL = dyn_cast<ConstantAsMetadata>(L);
181   auto *CR = dyn_cast<ConstantAsMetadata>(R);
182   if (CL == CR)
183     return 0;
184   if (!CL)
185     return -1;
186   if (!CR)
187     return 1;
188   return cmpConstants(CL->getValue(), CR->getValue());
189 }
190 
191 int FunctionComparator::cmpMDNode(const MDNode *L, const MDNode *R) const {
192   if (L == R)
193     return 0;
194   if (!L)
195     return -1;
196   if (!R)
197     return 1;
198   // TODO: Note that as this is metadata, it is possible to drop and/or merge
199   // this data when considering functions to merge. Thus this comparison would
200   // return 0 (i.e. equivalent), but merging would become more complicated
201   // because the ranges would need to be unioned. It is not likely that
202   // functions differ ONLY in this metadata if they are actually the same
203   // function semantically.
204   if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
205     return Res;
206   for (size_t I = 0; I < L->getNumOperands(); ++I)
207     if (int Res = cmpMetadata(L->getOperand(I), R->getOperand(I)))
208       return Res;
209   return 0;
210 }
211 
212 int FunctionComparator::cmpInstMetadata(Instruction const *L,
213                                         Instruction const *R) const {
214   /// These metadata affects the other optimization passes by making assertions
215   /// or constraints.
216   /// Values that carry different expectations should be considered different.
217   SmallVector<std::pair<unsigned, MDNode *>> MDL, MDR;
218   L->getAllMetadataOtherThanDebugLoc(MDL);
219   R->getAllMetadataOtherThanDebugLoc(MDR);
220   if (MDL.size() > MDR.size())
221     return 1;
222   else if (MDL.size() < MDR.size())
223     return -1;
224   for (size_t I = 0, N = MDL.size(); I < N; ++I) {
225     auto const [KeyL, ML] = MDL[I];
226     auto const [KeyR, MR] = MDR[I];
227     if (int Res = cmpNumbers(KeyL, KeyR))
228       return Res;
229     if (int Res = cmpMDNode(ML, MR))
230       return Res;
231   }
232   return 0;
233 }
234 
235 int FunctionComparator::cmpOperandBundlesSchema(const CallBase &LCS,
236                                                 const CallBase &RCS) const {
237   assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!");
238 
239   if (int Res =
240           cmpNumbers(LCS.getNumOperandBundles(), RCS.getNumOperandBundles()))
241     return Res;
242 
243   for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) {
244     auto OBL = LCS.getOperandBundleAt(I);
245     auto OBR = RCS.getOperandBundleAt(I);
246 
247     if (int Res = OBL.getTagName().compare(OBR.getTagName()))
248       return Res;
249 
250     if (int Res = cmpNumbers(OBL.Inputs.size(), OBR.Inputs.size()))
251       return Res;
252   }
253 
254   return 0;
255 }
256 
257 /// Constants comparison:
258 /// 1. Check whether type of L constant could be losslessly bitcasted to R
259 /// type.
260 /// 2. Compare constant contents.
261 /// For more details see declaration comments.
262 int FunctionComparator::cmpConstants(const Constant *L,
263                                      const Constant *R) const {
264   Type *TyL = L->getType();
265   Type *TyR = R->getType();
266 
267   // Check whether types are bitcastable. This part is just re-factored
268   // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
269   // we also pack into result which type is "less" for us.
270   int TypesRes = cmpTypes(TyL, TyR);
271   if (TypesRes != 0) {
272     // Types are different, but check whether we can bitcast them.
273     if (!TyL->isFirstClassType()) {
274       if (TyR->isFirstClassType())
275         return -1;
276       // Neither TyL nor TyR are values of first class type. Return the result
277       // of comparing the types
278       return TypesRes;
279     }
280     if (!TyR->isFirstClassType()) {
281       if (TyL->isFirstClassType())
282         return 1;
283       return TypesRes;
284     }
285 
286     // Vector -> Vector conversions are always lossless if the two vector types
287     // have the same size, otherwise not.
288     unsigned TyLWidth = 0;
289     unsigned TyRWidth = 0;
290 
291     if (auto *VecTyL = dyn_cast<VectorType>(TyL))
292       TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedValue();
293     if (auto *VecTyR = dyn_cast<VectorType>(TyR))
294       TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedValue();
295 
296     if (TyLWidth != TyRWidth)
297       return cmpNumbers(TyLWidth, TyRWidth);
298 
299     // Zero bit-width means neither TyL nor TyR are vectors.
300     if (!TyLWidth) {
301       PointerType *PTyL = dyn_cast<PointerType>(TyL);
302       PointerType *PTyR = dyn_cast<PointerType>(TyR);
303       if (PTyL && PTyR) {
304         unsigned AddrSpaceL = PTyL->getAddressSpace();
305         unsigned AddrSpaceR = PTyR->getAddressSpace();
306         if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
307           return Res;
308       }
309       if (PTyL)
310         return 1;
311       if (PTyR)
312         return -1;
313 
314       // TyL and TyR aren't vectors, nor pointers. We don't know how to
315       // bitcast them.
316       return TypesRes;
317     }
318   }
319 
320   // OK, types are bitcastable, now check constant contents.
321 
322   if (L->isNullValue() && R->isNullValue())
323     return TypesRes;
324   if (L->isNullValue() && !R->isNullValue())
325     return 1;
326   if (!L->isNullValue() && R->isNullValue())
327     return -1;
328 
329   auto GlobalValueL = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(L));
330   auto GlobalValueR = const_cast<GlobalValue *>(dyn_cast<GlobalValue>(R));
331   if (GlobalValueL && GlobalValueR) {
332     return cmpGlobalValues(GlobalValueL, GlobalValueR);
333   }
334 
335   if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
336     return Res;
337 
338   if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
339     const auto *SeqR = cast<ConstantDataSequential>(R);
340     // This handles ConstantDataArray and ConstantDataVector. Note that we
341     // compare the two raw data arrays, which might differ depending on the host
342     // endianness. This isn't a problem though, because the endiness of a module
343     // will affect the order of the constants, but this order is the same
344     // for a given input module and host platform.
345     return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
346   }
347 
348   switch (L->getValueID()) {
349   case Value::UndefValueVal:
350   case Value::PoisonValueVal:
351   case Value::ConstantTokenNoneVal:
352     return TypesRes;
353   case Value::ConstantIntVal: {
354     const APInt &LInt = cast<ConstantInt>(L)->getValue();
355     const APInt &RInt = cast<ConstantInt>(R)->getValue();
356     return cmpAPInts(LInt, RInt);
357   }
358   case Value::ConstantFPVal: {
359     const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
360     const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
361     return cmpAPFloats(LAPF, RAPF);
362   }
363   case Value::ConstantArrayVal: {
364     const ConstantArray *LA = cast<ConstantArray>(L);
365     const ConstantArray *RA = cast<ConstantArray>(R);
366     uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
367     uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
368     if (int Res = cmpNumbers(NumElementsL, NumElementsR))
369       return Res;
370     for (uint64_t i = 0; i < NumElementsL; ++i) {
371       if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
372                                  cast<Constant>(RA->getOperand(i))))
373         return Res;
374     }
375     return 0;
376   }
377   case Value::ConstantStructVal: {
378     const ConstantStruct *LS = cast<ConstantStruct>(L);
379     const ConstantStruct *RS = cast<ConstantStruct>(R);
380     unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
381     unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
382     if (int Res = cmpNumbers(NumElementsL, NumElementsR))
383       return Res;
384     for (unsigned i = 0; i != NumElementsL; ++i) {
385       if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
386                                  cast<Constant>(RS->getOperand(i))))
387         return Res;
388     }
389     return 0;
390   }
391   case Value::ConstantVectorVal: {
392     const ConstantVector *LV = cast<ConstantVector>(L);
393     const ConstantVector *RV = cast<ConstantVector>(R);
394     unsigned NumElementsL = cast<FixedVectorType>(TyL)->getNumElements();
395     unsigned NumElementsR = cast<FixedVectorType>(TyR)->getNumElements();
396     if (int Res = cmpNumbers(NumElementsL, NumElementsR))
397       return Res;
398     for (uint64_t i = 0; i < NumElementsL; ++i) {
399       if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
400                                  cast<Constant>(RV->getOperand(i))))
401         return Res;
402     }
403     return 0;
404   }
405   case Value::ConstantExprVal: {
406     const ConstantExpr *LE = cast<ConstantExpr>(L);
407     const ConstantExpr *RE = cast<ConstantExpr>(R);
408     if (int Res = cmpNumbers(LE->getOpcode(), RE->getOpcode()))
409       return Res;
410     unsigned NumOperandsL = LE->getNumOperands();
411     unsigned NumOperandsR = RE->getNumOperands();
412     if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
413       return Res;
414     for (unsigned i = 0; i < NumOperandsL; ++i) {
415       if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
416                                  cast<Constant>(RE->getOperand(i))))
417         return Res;
418     }
419     if (LE->isCompare())
420       if (int Res = cmpNumbers(LE->getPredicate(), RE->getPredicate()))
421         return Res;
422     if (auto *GEPL = dyn_cast<GEPOperator>(LE)) {
423       auto *GEPR = cast<GEPOperator>(RE);
424       if (int Res = cmpTypes(GEPL->getSourceElementType(),
425                              GEPR->getSourceElementType()))
426         return Res;
427       if (int Res = cmpNumbers(GEPL->isInBounds(), GEPR->isInBounds()))
428         return Res;
429       if (int Res = cmpNumbers(GEPL->getInRangeIndex().value_or(unsigned(-1)),
430                                GEPR->getInRangeIndex().value_or(unsigned(-1))))
431         return Res;
432     }
433     if (auto *OBOL = dyn_cast<OverflowingBinaryOperator>(LE)) {
434       auto *OBOR = cast<OverflowingBinaryOperator>(RE);
435       if (int Res =
436               cmpNumbers(OBOL->hasNoUnsignedWrap(), OBOR->hasNoUnsignedWrap()))
437         return Res;
438       if (int Res =
439               cmpNumbers(OBOL->hasNoSignedWrap(), OBOR->hasNoSignedWrap()))
440         return Res;
441     }
442     return 0;
443   }
444   case Value::BlockAddressVal: {
445     const BlockAddress *LBA = cast<BlockAddress>(L);
446     const BlockAddress *RBA = cast<BlockAddress>(R);
447     if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
448       return Res;
449     if (LBA->getFunction() == RBA->getFunction()) {
450       // They are BBs in the same function. Order by which comes first in the
451       // BB order of the function. This order is deterministic.
452       Function *F = LBA->getFunction();
453       BasicBlock *LBB = LBA->getBasicBlock();
454       BasicBlock *RBB = RBA->getBasicBlock();
455       if (LBB == RBB)
456         return 0;
457       for (BasicBlock &BB : *F) {
458         if (&BB == LBB) {
459           assert(&BB != RBB);
460           return -1;
461         }
462         if (&BB == RBB)
463           return 1;
464       }
465       llvm_unreachable("Basic Block Address does not point to a basic block in "
466                        "its function.");
467       return -1;
468     } else {
469       // cmpValues said the functions are the same. So because they aren't
470       // literally the same pointer, they must respectively be the left and
471       // right functions.
472       assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
473       // cmpValues will tell us if these are equivalent BasicBlocks, in the
474       // context of their respective functions.
475       return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
476     }
477   }
478   case Value::DSOLocalEquivalentVal: {
479     // dso_local_equivalent is functionally equivalent to whatever it points to.
480     // This means the behavior of the IR should be the exact same as if the
481     // function was referenced directly rather than through a
482     // dso_local_equivalent.
483     const auto *LEquiv = cast<DSOLocalEquivalent>(L);
484     const auto *REquiv = cast<DSOLocalEquivalent>(R);
485     return cmpGlobalValues(LEquiv->getGlobalValue(), REquiv->getGlobalValue());
486   }
487   default: // Unknown constant, abort.
488     LLVM_DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
489     llvm_unreachable("Constant ValueID not recognized.");
490     return -1;
491   }
492 }
493 
494 int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
495   uint64_t LNumber = GlobalNumbers->getNumber(L);
496   uint64_t RNumber = GlobalNumbers->getNumber(R);
497   return cmpNumbers(LNumber, RNumber);
498 }
499 
500 /// cmpType - compares two types,
501 /// defines total ordering among the types set.
502 /// See method declaration comments for more details.
503 int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
504   PointerType *PTyL = dyn_cast<PointerType>(TyL);
505   PointerType *PTyR = dyn_cast<PointerType>(TyR);
506 
507   const DataLayout &DL = FnL->getParent()->getDataLayout();
508   if (PTyL && PTyL->getAddressSpace() == 0)
509     TyL = DL.getIntPtrType(TyL);
510   if (PTyR && PTyR->getAddressSpace() == 0)
511     TyR = DL.getIntPtrType(TyR);
512 
513   if (TyL == TyR)
514     return 0;
515 
516   if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
517     return Res;
518 
519   switch (TyL->getTypeID()) {
520   default:
521     llvm_unreachable("Unknown type!");
522   case Type::IntegerTyID:
523     return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
524                       cast<IntegerType>(TyR)->getBitWidth());
525   // TyL == TyR would have returned true earlier, because types are uniqued.
526   case Type::VoidTyID:
527   case Type::FloatTyID:
528   case Type::DoubleTyID:
529   case Type::X86_FP80TyID:
530   case Type::FP128TyID:
531   case Type::PPC_FP128TyID:
532   case Type::LabelTyID:
533   case Type::MetadataTyID:
534   case Type::TokenTyID:
535     return 0;
536 
537   case Type::PointerTyID:
538     assert(PTyL && PTyR && "Both types must be pointers here.");
539     return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
540 
541   case Type::StructTyID: {
542     StructType *STyL = cast<StructType>(TyL);
543     StructType *STyR = cast<StructType>(TyR);
544     if (STyL->getNumElements() != STyR->getNumElements())
545       return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
546 
547     if (STyL->isPacked() != STyR->isPacked())
548       return cmpNumbers(STyL->isPacked(), STyR->isPacked());
549 
550     for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
551       if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
552         return Res;
553     }
554     return 0;
555   }
556 
557   case Type::FunctionTyID: {
558     FunctionType *FTyL = cast<FunctionType>(TyL);
559     FunctionType *FTyR = cast<FunctionType>(TyR);
560     if (FTyL->getNumParams() != FTyR->getNumParams())
561       return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
562 
563     if (FTyL->isVarArg() != FTyR->isVarArg())
564       return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
565 
566     if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
567       return Res;
568 
569     for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
570       if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
571         return Res;
572     }
573     return 0;
574   }
575 
576   case Type::ArrayTyID: {
577     auto *STyL = cast<ArrayType>(TyL);
578     auto *STyR = cast<ArrayType>(TyR);
579     if (STyL->getNumElements() != STyR->getNumElements())
580       return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
581     return cmpTypes(STyL->getElementType(), STyR->getElementType());
582   }
583   case Type::FixedVectorTyID:
584   case Type::ScalableVectorTyID: {
585     auto *STyL = cast<VectorType>(TyL);
586     auto *STyR = cast<VectorType>(TyR);
587     if (STyL->getElementCount().isScalable() !=
588         STyR->getElementCount().isScalable())
589       return cmpNumbers(STyL->getElementCount().isScalable(),
590                         STyR->getElementCount().isScalable());
591     if (STyL->getElementCount() != STyR->getElementCount())
592       return cmpNumbers(STyL->getElementCount().getKnownMinValue(),
593                         STyR->getElementCount().getKnownMinValue());
594     return cmpTypes(STyL->getElementType(), STyR->getElementType());
595   }
596   }
597 }
598 
599 // Determine whether the two operations are the same except that pointer-to-A
600 // and pointer-to-B are equivalent. This should be kept in sync with
601 // Instruction::isSameOperationAs.
602 // Read method declaration comments for more details.
603 int FunctionComparator::cmpOperations(const Instruction *L,
604                                       const Instruction *R,
605                                       bool &needToCmpOperands) const {
606   needToCmpOperands = true;
607   if (int Res = cmpValues(L, R))
608     return Res;
609 
610   // Differences from Instruction::isSameOperationAs:
611   //  * replace type comparison with calls to cmpTypes.
612   //  * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
613   //  * because of the above, we don't test for the tail bit on calls later on.
614   if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
615     return Res;
616 
617   if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(L)) {
618     needToCmpOperands = false;
619     const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(R);
620     if (int Res =
621             cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
622       return Res;
623     return cmpGEPs(GEPL, GEPR);
624   }
625 
626   if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
627     return Res;
628 
629   if (int Res = cmpTypes(L->getType(), R->getType()))
630     return Res;
631 
632   if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
633                            R->getRawSubclassOptionalData()))
634     return Res;
635 
636   // We have two instructions of identical opcode and #operands.  Check to see
637   // if all operands are the same type
638   for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
639     if (int Res =
640             cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
641       return Res;
642   }
643 
644   // Check special state that is a part of some instructions.
645   if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
646     if (int Res = cmpTypes(AI->getAllocatedType(),
647                            cast<AllocaInst>(R)->getAllocatedType()))
648       return Res;
649     return cmpAligns(AI->getAlign(), cast<AllocaInst>(R)->getAlign());
650   }
651   if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
652     if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
653       return Res;
654     if (int Res = cmpAligns(LI->getAlign(), cast<LoadInst>(R)->getAlign()))
655       return Res;
656     if (int Res =
657             cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
658       return Res;
659     if (int Res = cmpNumbers(LI->getSyncScopeID(),
660                              cast<LoadInst>(R)->getSyncScopeID()))
661       return Res;
662     return cmpInstMetadata(L, R);
663   }
664   if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
665     if (int Res =
666             cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
667       return Res;
668     if (int Res = cmpAligns(SI->getAlign(), cast<StoreInst>(R)->getAlign()))
669       return Res;
670     if (int Res =
671             cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
672       return Res;
673     return cmpNumbers(SI->getSyncScopeID(),
674                       cast<StoreInst>(R)->getSyncScopeID());
675   }
676   if (const CmpInst *CI = dyn_cast<CmpInst>(L))
677     return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
678   if (auto *CBL = dyn_cast<CallBase>(L)) {
679     auto *CBR = cast<CallBase>(R);
680     if (int Res = cmpNumbers(CBL->getCallingConv(), CBR->getCallingConv()))
681       return Res;
682     if (int Res = cmpAttrs(CBL->getAttributes(), CBR->getAttributes()))
683       return Res;
684     if (int Res = cmpOperandBundlesSchema(*CBL, *CBR))
685       return Res;
686     if (const CallInst *CI = dyn_cast<CallInst>(L))
687       if (int Res = cmpNumbers(CI->getTailCallKind(),
688                                cast<CallInst>(R)->getTailCallKind()))
689         return Res;
690     return cmpMDNode(L->getMetadata(LLVMContext::MD_range),
691                      R->getMetadata(LLVMContext::MD_range));
692   }
693   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
694     ArrayRef<unsigned> LIndices = IVI->getIndices();
695     ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
696     if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
697       return Res;
698     for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
699       if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
700         return Res;
701     }
702     return 0;
703   }
704   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
705     ArrayRef<unsigned> LIndices = EVI->getIndices();
706     ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
707     if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
708       return Res;
709     for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
710       if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
711         return Res;
712     }
713   }
714   if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
715     if (int Res =
716             cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
717       return Res;
718     return cmpNumbers(FI->getSyncScopeID(),
719                       cast<FenceInst>(R)->getSyncScopeID());
720   }
721   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
722     if (int Res = cmpNumbers(CXI->isVolatile(),
723                              cast<AtomicCmpXchgInst>(R)->isVolatile()))
724       return Res;
725     if (int Res =
726             cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak()))
727       return Res;
728     if (int Res =
729             cmpOrderings(CXI->getSuccessOrdering(),
730                          cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
731       return Res;
732     if (int Res =
733             cmpOrderings(CXI->getFailureOrdering(),
734                          cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
735       return Res;
736     return cmpNumbers(CXI->getSyncScopeID(),
737                       cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
738   }
739   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
740     if (int Res = cmpNumbers(RMWI->getOperation(),
741                              cast<AtomicRMWInst>(R)->getOperation()))
742       return Res;
743     if (int Res = cmpNumbers(RMWI->isVolatile(),
744                              cast<AtomicRMWInst>(R)->isVolatile()))
745       return Res;
746     if (int Res = cmpOrderings(RMWI->getOrdering(),
747                                cast<AtomicRMWInst>(R)->getOrdering()))
748       return Res;
749     return cmpNumbers(RMWI->getSyncScopeID(),
750                       cast<AtomicRMWInst>(R)->getSyncScopeID());
751   }
752   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(L)) {
753     ArrayRef<int> LMask = SVI->getShuffleMask();
754     ArrayRef<int> RMask = cast<ShuffleVectorInst>(R)->getShuffleMask();
755     if (int Res = cmpNumbers(LMask.size(), RMask.size()))
756       return Res;
757     for (size_t i = 0, e = LMask.size(); i != e; ++i) {
758       if (int Res = cmpNumbers(LMask[i], RMask[i]))
759         return Res;
760     }
761   }
762   if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
763     const PHINode *PNR = cast<PHINode>(R);
764     // Ensure that in addition to the incoming values being identical
765     // (checked by the caller of this function), the incoming blocks
766     // are also identical.
767     for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
768       if (int Res =
769               cmpValues(PNL->getIncomingBlock(i), PNR->getIncomingBlock(i)))
770         return Res;
771     }
772   }
773   return 0;
774 }
775 
776 // Determine whether two GEP operations perform the same underlying arithmetic.
777 // Read method declaration comments for more details.
778 int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
779                                 const GEPOperator *GEPR) const {
780   unsigned int ASL = GEPL->getPointerAddressSpace();
781   unsigned int ASR = GEPR->getPointerAddressSpace();
782 
783   if (int Res = cmpNumbers(ASL, ASR))
784     return Res;
785 
786   // When we have target data, we can reduce the GEP down to the value in bytes
787   // added to the address.
788   const DataLayout &DL = FnL->getParent()->getDataLayout();
789   unsigned OffsetBitWidth = DL.getIndexSizeInBits(ASL);
790   APInt OffsetL(OffsetBitWidth, 0), OffsetR(OffsetBitWidth, 0);
791   if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
792       GEPR->accumulateConstantOffset(DL, OffsetR))
793     return cmpAPInts(OffsetL, OffsetR);
794   if (int Res =
795           cmpTypes(GEPL->getSourceElementType(), GEPR->getSourceElementType()))
796     return Res;
797 
798   if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
799     return Res;
800 
801   for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
802     if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
803       return Res;
804   }
805 
806   return 0;
807 }
808 
809 int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
810                                      const InlineAsm *R) const {
811   // InlineAsm's are uniqued. If they are the same pointer, obviously they are
812   // the same, otherwise compare the fields.
813   if (L == R)
814     return 0;
815   if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
816     return Res;
817   if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
818     return Res;
819   if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
820     return Res;
821   if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
822     return Res;
823   if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
824     return Res;
825   if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
826     return Res;
827   assert(L->getFunctionType() != R->getFunctionType());
828   return 0;
829 }
830 
831 /// Compare two values used by the two functions under pair-wise comparison. If
832 /// this is the first time the values are seen, they're added to the mapping so
833 /// that we will detect mismatches on next use.
834 /// See comments in declaration for more details.
835 int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
836   // Catch self-reference case.
837   if (L == FnL) {
838     if (R == FnR)
839       return 0;
840     return -1;
841   }
842   if (R == FnR) {
843     if (L == FnL)
844       return 0;
845     return 1;
846   }
847 
848   const Constant *ConstL = dyn_cast<Constant>(L);
849   const Constant *ConstR = dyn_cast<Constant>(R);
850   if (ConstL && ConstR) {
851     if (L == R)
852       return 0;
853     return cmpConstants(ConstL, ConstR);
854   }
855 
856   if (ConstL)
857     return 1;
858   if (ConstR)
859     return -1;
860 
861   const MetadataAsValue *MetadataValueL = dyn_cast<MetadataAsValue>(L);
862   const MetadataAsValue *MetadataValueR = dyn_cast<MetadataAsValue>(R);
863   if (MetadataValueL && MetadataValueR) {
864     if (MetadataValueL == MetadataValueR)
865       return 0;
866 
867     return cmpMetadata(MetadataValueL->getMetadata(),
868                        MetadataValueR->getMetadata());
869   }
870 
871   if (MetadataValueL)
872     return 1;
873   if (MetadataValueR)
874     return -1;
875 
876   const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
877   const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
878 
879   if (InlineAsmL && InlineAsmR)
880     return cmpInlineAsm(InlineAsmL, InlineAsmR);
881   if (InlineAsmL)
882     return 1;
883   if (InlineAsmR)
884     return -1;
885 
886   auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
887        RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
888 
889   return cmpNumbers(LeftSN.first->second, RightSN.first->second);
890 }
891 
892 // Test whether two basic blocks have equivalent behaviour.
893 int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
894                                        const BasicBlock *BBR) const {
895   BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
896   BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
897 
898   do {
899     bool needToCmpOperands = true;
900     if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
901       return Res;
902     if (needToCmpOperands) {
903       assert(InstL->getNumOperands() == InstR->getNumOperands());
904 
905       for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
906         Value *OpL = InstL->getOperand(i);
907         Value *OpR = InstR->getOperand(i);
908         if (int Res = cmpValues(OpL, OpR))
909           return Res;
910         // cmpValues should ensure this is true.
911         assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
912       }
913     }
914 
915     ++InstL;
916     ++InstR;
917   } while (InstL != InstLE && InstR != InstRE);
918 
919   if (InstL != InstLE && InstR == InstRE)
920     return 1;
921   if (InstL == InstLE && InstR != InstRE)
922     return -1;
923   return 0;
924 }
925 
926 int FunctionComparator::compareSignature() const {
927   if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
928     return Res;
929 
930   if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
931     return Res;
932 
933   if (FnL->hasGC()) {
934     if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
935       return Res;
936   }
937 
938   if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
939     return Res;
940 
941   if (FnL->hasSection()) {
942     if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
943       return Res;
944   }
945 
946   if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
947     return Res;
948 
949   // TODO: if it's internal and only used in direct calls, we could handle this
950   // case too.
951   if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
952     return Res;
953 
954   if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
955     return Res;
956 
957   assert(FnL->arg_size() == FnR->arg_size() &&
958          "Identically typed functions have different numbers of args!");
959 
960   // Visit the arguments so that they get enumerated in the order they're
961   // passed in.
962   for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
963                                     ArgRI = FnR->arg_begin(),
964                                     ArgLE = FnL->arg_end();
965        ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
966     if (cmpValues(&*ArgLI, &*ArgRI) != 0)
967       llvm_unreachable("Arguments repeat!");
968   }
969   return 0;
970 }
971 
972 // Test whether the two functions have equivalent behaviour.
973 int FunctionComparator::compare() {
974   beginCompare();
975 
976   if (int Res = compareSignature())
977     return Res;
978 
979   // We do a CFG-ordered walk since the actual ordering of the blocks in the
980   // linked list is immaterial. Our walk starts at the entry block for both
981   // functions, then takes each block from each terminator in order. As an
982   // artifact, this also means that unreachable blocks are ignored.
983   SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
984   SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
985 
986   FnLBBs.push_back(&FnL->getEntryBlock());
987   FnRBBs.push_back(&FnR->getEntryBlock());
988 
989   VisitedBBs.insert(FnLBBs[0]);
990   while (!FnLBBs.empty()) {
991     const BasicBlock *BBL = FnLBBs.pop_back_val();
992     const BasicBlock *BBR = FnRBBs.pop_back_val();
993 
994     if (int Res = cmpValues(BBL, BBR))
995       return Res;
996 
997     if (int Res = cmpBasicBlocks(BBL, BBR))
998       return Res;
999 
1000     const Instruction *TermL = BBL->getTerminator();
1001     const Instruction *TermR = BBR->getTerminator();
1002 
1003     assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
1004     for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
1005       if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
1006         continue;
1007 
1008       FnLBBs.push_back(TermL->getSuccessor(i));
1009       FnRBBs.push_back(TermR->getSuccessor(i));
1010     }
1011   }
1012   return 0;
1013 }
1014