xref: /freebsd/contrib/llvm-project/llvm/lib/ProfileData/InstrProf.cpp (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 //===- InstrProf.cpp - Instrumented profiling format support --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for clang's instrumentation based PGO and
10 // coverage.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ProfileData/InstrProf.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallString.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/Config/config.h"
22 #include "llvm/IR/Constant.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GlobalValue.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/MDBuilder.h"
30 #include "llvm/IR/Metadata.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/ProfileData/InstrProfReader.h"
34 #include "llvm/Support/Casting.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/Support/Compression.h"
38 #include "llvm/Support/Endian.h"
39 #include "llvm/Support/Error.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/LEB128.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/Path.h"
44 #include "llvm/Support/SwapByteOrder.h"
45 #include "llvm/Support/VirtualFileSystem.h"
46 #include "llvm/TargetParser/Triple.h"
47 #include <algorithm>
48 #include <cassert>
49 #include <cstddef>
50 #include <cstdint>
51 #include <cstring>
52 #include <memory>
53 #include <string>
54 #include <system_error>
55 #include <type_traits>
56 #include <utility>
57 #include <vector>
58 
59 using namespace llvm;
60 
61 static cl::opt<bool> StaticFuncFullModulePrefix(
62     "static-func-full-module-prefix", cl::init(true), cl::Hidden,
63     cl::desc("Use full module build paths in the profile counter names for "
64              "static functions."));
65 
66 // This option is tailored to users that have different top-level directory in
67 // profile-gen and profile-use compilation. Users need to specific the number
68 // of levels to strip. A value larger than the number of directories in the
69 // source file will strip all the directory names and only leave the basename.
70 //
71 // Note current ThinLTO module importing for the indirect-calls assumes
72 // the source directory name not being stripped. A non-zero option value here
73 // can potentially prevent some inter-module indirect-call-promotions.
74 static cl::opt<unsigned> StaticFuncStripDirNamePrefix(
75     "static-func-strip-dirname-prefix", cl::init(0), cl::Hidden,
76     cl::desc("Strip specified level of directory name from source path in "
77              "the profile counter name for static functions."));
78 
79 static std::string getInstrProfErrString(instrprof_error Err,
80                                          const std::string &ErrMsg = "") {
81   std::string Msg;
82   raw_string_ostream OS(Msg);
83 
84   switch (Err) {
85   case instrprof_error::success:
86     OS << "success";
87     break;
88   case instrprof_error::eof:
89     OS << "end of File";
90     break;
91   case instrprof_error::unrecognized_format:
92     OS << "unrecognized instrumentation profile encoding format";
93     break;
94   case instrprof_error::bad_magic:
95     OS << "invalid instrumentation profile data (bad magic)";
96     break;
97   case instrprof_error::bad_header:
98     OS << "invalid instrumentation profile data (file header is corrupt)";
99     break;
100   case instrprof_error::unsupported_version:
101     OS << "unsupported instrumentation profile format version";
102     break;
103   case instrprof_error::unsupported_hash_type:
104     OS << "unsupported instrumentation profile hash type";
105     break;
106   case instrprof_error::too_large:
107     OS << "too much profile data";
108     break;
109   case instrprof_error::truncated:
110     OS << "truncated profile data";
111     break;
112   case instrprof_error::malformed:
113     OS << "malformed instrumentation profile data";
114     break;
115   case instrprof_error::missing_debug_info_for_correlation:
116     OS << "debug info for correlation is required";
117     break;
118   case instrprof_error::unexpected_debug_info_for_correlation:
119     OS << "debug info for correlation is not necessary";
120     break;
121   case instrprof_error::unable_to_correlate_profile:
122     OS << "unable to correlate profile";
123     break;
124   case instrprof_error::invalid_prof:
125     OS << "invalid profile created. Please file a bug "
126           "at: " BUG_REPORT_URL
127           " and include the profraw files that caused this error.";
128     break;
129   case instrprof_error::unknown_function:
130     OS << "no profile data available for function";
131     break;
132   case instrprof_error::hash_mismatch:
133     OS << "function control flow change detected (hash mismatch)";
134     break;
135   case instrprof_error::count_mismatch:
136     OS << "function basic block count change detected (counter mismatch)";
137     break;
138   case instrprof_error::counter_overflow:
139     OS << "counter overflow";
140     break;
141   case instrprof_error::value_site_count_mismatch:
142     OS << "function value site count change detected (counter mismatch)";
143     break;
144   case instrprof_error::compress_failed:
145     OS << "failed to compress data (zlib)";
146     break;
147   case instrprof_error::uncompress_failed:
148     OS << "failed to uncompress data (zlib)";
149     break;
150   case instrprof_error::empty_raw_profile:
151     OS << "empty raw profile file";
152     break;
153   case instrprof_error::zlib_unavailable:
154     OS << "profile uses zlib compression but the profile reader was built "
155           "without zlib support";
156     break;
157   case instrprof_error::raw_profile_version_mismatch:
158     OS << "raw profile version mismatch";
159     break;
160   }
161 
162   // If optional error message is not empty, append it to the message.
163   if (!ErrMsg.empty())
164     OS << ": " << ErrMsg;
165 
166   return OS.str();
167 }
168 
169 namespace {
170 
171 // FIXME: This class is only here to support the transition to llvm::Error. It
172 // will be removed once this transition is complete. Clients should prefer to
173 // deal with the Error value directly, rather than converting to error_code.
174 class InstrProfErrorCategoryType : public std::error_category {
175   const char *name() const noexcept override { return "llvm.instrprof"; }
176 
177   std::string message(int IE) const override {
178     return getInstrProfErrString(static_cast<instrprof_error>(IE));
179   }
180 };
181 
182 } // end anonymous namespace
183 
184 const std::error_category &llvm::instrprof_category() {
185   static InstrProfErrorCategoryType ErrorCategory;
186   return ErrorCategory;
187 }
188 
189 namespace {
190 
191 const char *InstrProfSectNameCommon[] = {
192 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix)      \
193   SectNameCommon,
194 #include "llvm/ProfileData/InstrProfData.inc"
195 };
196 
197 const char *InstrProfSectNameCoff[] = {
198 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix)      \
199   SectNameCoff,
200 #include "llvm/ProfileData/InstrProfData.inc"
201 };
202 
203 const char *InstrProfSectNamePrefix[] = {
204 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix)      \
205   Prefix,
206 #include "llvm/ProfileData/InstrProfData.inc"
207 };
208 
209 } // namespace
210 
211 namespace llvm {
212 
213 cl::opt<bool> DoInstrProfNameCompression(
214     "enable-name-compression",
215     cl::desc("Enable name/filename string compression"), cl::init(true));
216 
217 std::string getInstrProfSectionName(InstrProfSectKind IPSK,
218                                     Triple::ObjectFormatType OF,
219                                     bool AddSegmentInfo) {
220   std::string SectName;
221 
222   if (OF == Triple::MachO && AddSegmentInfo)
223     SectName = InstrProfSectNamePrefix[IPSK];
224 
225   if (OF == Triple::COFF)
226     SectName += InstrProfSectNameCoff[IPSK];
227   else
228     SectName += InstrProfSectNameCommon[IPSK];
229 
230   if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo)
231     SectName += ",regular,live_support";
232 
233   return SectName;
234 }
235 
236 std::string InstrProfError::message() const {
237   return getInstrProfErrString(Err, Msg);
238 }
239 
240 char InstrProfError::ID = 0;
241 
242 std::string getPGOFuncName(StringRef RawFuncName,
243                            GlobalValue::LinkageTypes Linkage,
244                            StringRef FileName,
245                            uint64_t Version LLVM_ATTRIBUTE_UNUSED) {
246   return GlobalValue::getGlobalIdentifier(RawFuncName, Linkage, FileName);
247 }
248 
249 // Strip NumPrefix level of directory name from PathNameStr. If the number of
250 // directory separators is less than NumPrefix, strip all the directories and
251 // leave base file name only.
252 static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) {
253   uint32_t Count = NumPrefix;
254   uint32_t Pos = 0, LastPos = 0;
255   for (auto & CI : PathNameStr) {
256     ++Pos;
257     if (llvm::sys::path::is_separator(CI)) {
258       LastPos = Pos;
259       --Count;
260     }
261     if (Count == 0)
262       break;
263   }
264   return PathNameStr.substr(LastPos);
265 }
266 
267 // Return the PGOFuncName. This function has some special handling when called
268 // in LTO optimization. The following only applies when calling in LTO passes
269 // (when \c InLTO is true): LTO's internalization privatizes many global linkage
270 // symbols. This happens after value profile annotation, but those internal
271 // linkage functions should not have a source prefix.
272 // Additionally, for ThinLTO mode, exported internal functions are promoted
273 // and renamed. We need to ensure that the original internal PGO name is
274 // used when computing the GUID that is compared against the profiled GUIDs.
275 // To differentiate compiler generated internal symbols from original ones,
276 // PGOFuncName meta data are created and attached to the original internal
277 // symbols in the value profile annotation step
278 // (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta
279 // data, its original linkage must be non-internal.
280 std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) {
281   if (!InLTO) {
282     StringRef FileName(F.getParent()->getSourceFileName());
283     uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1;
284     if (StripLevel < StaticFuncStripDirNamePrefix)
285       StripLevel = StaticFuncStripDirNamePrefix;
286     if (StripLevel)
287       FileName = stripDirPrefix(FileName, StripLevel);
288     return getPGOFuncName(F.getName(), F.getLinkage(), FileName, Version);
289   }
290 
291   // In LTO mode (when InLTO is true), first check if there is a meta data.
292   if (MDNode *MD = getPGOFuncNameMetadata(F)) {
293     StringRef S = cast<MDString>(MD->getOperand(0))->getString();
294     return S.str();
295   }
296 
297   // If there is no meta data, the function must be a global before the value
298   // profile annotation pass. Its current linkage may be internal if it is
299   // internalized in LTO mode.
300   return getPGOFuncName(F.getName(), GlobalValue::ExternalLinkage, "");
301 }
302 
303 StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) {
304   if (FileName.empty())
305     return PGOFuncName;
306   // Drop the file name including ':'. See also getPGOFuncName.
307   if (PGOFuncName.startswith(FileName))
308     PGOFuncName = PGOFuncName.drop_front(FileName.size() + 1);
309   return PGOFuncName;
310 }
311 
312 // \p FuncName is the string used as profile lookup key for the function. A
313 // symbol is created to hold the name. Return the legalized symbol name.
314 std::string getPGOFuncNameVarName(StringRef FuncName,
315                                   GlobalValue::LinkageTypes Linkage) {
316   std::string VarName = std::string(getInstrProfNameVarPrefix());
317   VarName += FuncName;
318 
319   if (!GlobalValue::isLocalLinkage(Linkage))
320     return VarName;
321 
322   // Now fix up illegal chars in local VarName that may upset the assembler.
323   const char *InvalidChars = "-:<>/\"'";
324   size_t found = VarName.find_first_of(InvalidChars);
325   while (found != std::string::npos) {
326     VarName[found] = '_';
327     found = VarName.find_first_of(InvalidChars, found + 1);
328   }
329   return VarName;
330 }
331 
332 GlobalVariable *createPGOFuncNameVar(Module &M,
333                                      GlobalValue::LinkageTypes Linkage,
334                                      StringRef PGOFuncName) {
335   // We generally want to match the function's linkage, but available_externally
336   // and extern_weak both have the wrong semantics, and anything that doesn't
337   // need to link across compilation units doesn't need to be visible at all.
338   if (Linkage == GlobalValue::ExternalWeakLinkage)
339     Linkage = GlobalValue::LinkOnceAnyLinkage;
340   else if (Linkage == GlobalValue::AvailableExternallyLinkage)
341     Linkage = GlobalValue::LinkOnceODRLinkage;
342   else if (Linkage == GlobalValue::InternalLinkage ||
343            Linkage == GlobalValue::ExternalLinkage)
344     Linkage = GlobalValue::PrivateLinkage;
345 
346   auto *Value =
347       ConstantDataArray::getString(M.getContext(), PGOFuncName, false);
348   auto FuncNameVar =
349       new GlobalVariable(M, Value->getType(), true, Linkage, Value,
350                          getPGOFuncNameVarName(PGOFuncName, Linkage));
351 
352   // Hide the symbol so that we correctly get a copy for each executable.
353   if (!GlobalValue::isLocalLinkage(FuncNameVar->getLinkage()))
354     FuncNameVar->setVisibility(GlobalValue::HiddenVisibility);
355 
356   return FuncNameVar;
357 }
358 
359 GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) {
360   return createPGOFuncNameVar(*F.getParent(), F.getLinkage(), PGOFuncName);
361 }
362 
363 Error InstrProfSymtab::create(Module &M, bool InLTO) {
364   for (Function &F : M) {
365     // Function may not have a name: like using asm("") to overwrite the name.
366     // Ignore in this case.
367     if (!F.hasName())
368       continue;
369     const std::string &PGOFuncName = getPGOFuncName(F, InLTO);
370     if (Error E = addFuncName(PGOFuncName))
371       return E;
372     MD5FuncMap.emplace_back(Function::getGUID(PGOFuncName), &F);
373     // In ThinLTO, local function may have been promoted to global and have
374     // suffix ".llvm." added to the function name. We need to add the
375     // stripped function name to the symbol table so that we can find a match
376     // from profile.
377     //
378     // We may have other suffixes similar as ".llvm." which are needed to
379     // be stripped before the matching, but ".__uniq." suffix which is used
380     // to differentiate internal linkage functions in different modules
381     // should be kept. Now this is the only suffix with the pattern ".xxx"
382     // which is kept before matching.
383     const std::string UniqSuffix = ".__uniq.";
384     auto pos = PGOFuncName.find(UniqSuffix);
385     // Search '.' after ".__uniq." if ".__uniq." exists, otherwise
386     // search '.' from the beginning.
387     if (pos != std::string::npos)
388       pos += UniqSuffix.length();
389     else
390       pos = 0;
391     pos = PGOFuncName.find('.', pos);
392     if (pos != std::string::npos && pos != 0) {
393       const std::string &OtherFuncName = PGOFuncName.substr(0, pos);
394       if (Error E = addFuncName(OtherFuncName))
395         return E;
396       MD5FuncMap.emplace_back(Function::getGUID(OtherFuncName), &F);
397     }
398   }
399   Sorted = false;
400   finalizeSymtab();
401   return Error::success();
402 }
403 
404 uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) {
405   finalizeSymtab();
406   auto It = partition_point(AddrToMD5Map, [=](std::pair<uint64_t, uint64_t> A) {
407     return A.first < Address;
408   });
409   // Raw function pointer collected by value profiler may be from
410   // external functions that are not instrumented. They won't have
411   // mapping data to be used by the deserializer. Force the value to
412   // be 0 in this case.
413   if (It != AddrToMD5Map.end() && It->first == Address)
414     return (uint64_t)It->second;
415   return 0;
416 }
417 
418 void InstrProfSymtab::dumpNames(raw_ostream &OS) const {
419   SmallVector<StringRef, 0> Sorted(NameTab.keys());
420   llvm::sort(Sorted);
421   for (StringRef S : Sorted)
422     OS << S << '\n';
423 }
424 
425 Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs,
426                                 bool doCompression, std::string &Result) {
427   assert(!NameStrs.empty() && "No name data to emit");
428 
429   uint8_t Header[16], *P = Header;
430   std::string UncompressedNameStrings =
431       join(NameStrs.begin(), NameStrs.end(), getInstrProfNameSeparator());
432 
433   assert(StringRef(UncompressedNameStrings)
434                  .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) &&
435          "PGO name is invalid (contains separator token)");
436 
437   unsigned EncLen = encodeULEB128(UncompressedNameStrings.length(), P);
438   P += EncLen;
439 
440   auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) {
441     EncLen = encodeULEB128(CompressedLen, P);
442     P += EncLen;
443     char *HeaderStr = reinterpret_cast<char *>(&Header[0]);
444     unsigned HeaderLen = P - &Header[0];
445     Result.append(HeaderStr, HeaderLen);
446     Result += InputStr;
447     return Error::success();
448   };
449 
450   if (!doCompression) {
451     return WriteStringToResult(0, UncompressedNameStrings);
452   }
453 
454   SmallVector<uint8_t, 128> CompressedNameStrings;
455   compression::zlib::compress(arrayRefFromStringRef(UncompressedNameStrings),
456                               CompressedNameStrings,
457                               compression::zlib::BestSizeCompression);
458 
459   return WriteStringToResult(CompressedNameStrings.size(),
460                              toStringRef(CompressedNameStrings));
461 }
462 
463 StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) {
464   auto *Arr = cast<ConstantDataArray>(NameVar->getInitializer());
465   StringRef NameStr =
466       Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
467   return NameStr;
468 }
469 
470 Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
471                                 std::string &Result, bool doCompression) {
472   std::vector<std::string> NameStrs;
473   for (auto *NameVar : NameVars) {
474     NameStrs.push_back(std::string(getPGOFuncNameVarInitializer(NameVar)));
475   }
476   return collectPGOFuncNameStrings(
477       NameStrs, compression::zlib::isAvailable() && doCompression, Result);
478 }
479 
480 Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab) {
481   const uint8_t *P = NameStrings.bytes_begin();
482   const uint8_t *EndP = NameStrings.bytes_end();
483   while (P < EndP) {
484     uint32_t N;
485     uint64_t UncompressedSize = decodeULEB128(P, &N);
486     P += N;
487     uint64_t CompressedSize = decodeULEB128(P, &N);
488     P += N;
489     bool isCompressed = (CompressedSize != 0);
490     SmallVector<uint8_t, 128> UncompressedNameStrings;
491     StringRef NameStrings;
492     if (isCompressed) {
493       if (!llvm::compression::zlib::isAvailable())
494         return make_error<InstrProfError>(instrprof_error::zlib_unavailable);
495 
496       if (Error E = compression::zlib::decompress(ArrayRef(P, CompressedSize),
497                                                   UncompressedNameStrings,
498                                                   UncompressedSize)) {
499         consumeError(std::move(E));
500         return make_error<InstrProfError>(instrprof_error::uncompress_failed);
501       }
502       P += CompressedSize;
503       NameStrings = toStringRef(UncompressedNameStrings);
504     } else {
505       NameStrings =
506           StringRef(reinterpret_cast<const char *>(P), UncompressedSize);
507       P += UncompressedSize;
508     }
509     // Now parse the name strings.
510     SmallVector<StringRef, 0> Names;
511     NameStrings.split(Names, getInstrProfNameSeparator());
512     for (StringRef &Name : Names)
513       if (Error E = Symtab.addFuncName(Name))
514         return E;
515 
516     while (P < EndP && *P == 0)
517       P++;
518   }
519   return Error::success();
520 }
521 
522 void InstrProfRecord::accumulateCounts(CountSumOrPercent &Sum) const {
523   uint64_t FuncSum = 0;
524   Sum.NumEntries += Counts.size();
525   for (uint64_t Count : Counts)
526     FuncSum += Count;
527   Sum.CountSum += FuncSum;
528 
529   for (uint32_t VK = IPVK_First; VK <= IPVK_Last; ++VK) {
530     uint64_t KindSum = 0;
531     uint32_t NumValueSites = getNumValueSites(VK);
532     for (size_t I = 0; I < NumValueSites; ++I) {
533       uint32_t NV = getNumValueDataForSite(VK, I);
534       std::unique_ptr<InstrProfValueData[]> VD = getValueForSite(VK, I);
535       for (uint32_t V = 0; V < NV; V++)
536         KindSum += VD[V].Count;
537     }
538     Sum.ValueCounts[VK] += KindSum;
539   }
540 }
541 
542 void InstrProfValueSiteRecord::overlap(InstrProfValueSiteRecord &Input,
543                                        uint32_t ValueKind,
544                                        OverlapStats &Overlap,
545                                        OverlapStats &FuncLevelOverlap) {
546   this->sortByTargetValues();
547   Input.sortByTargetValues();
548   double Score = 0.0f, FuncLevelScore = 0.0f;
549   auto I = ValueData.begin();
550   auto IE = ValueData.end();
551   auto J = Input.ValueData.begin();
552   auto JE = Input.ValueData.end();
553   while (I != IE && J != JE) {
554     if (I->Value == J->Value) {
555       Score += OverlapStats::score(I->Count, J->Count,
556                                    Overlap.Base.ValueCounts[ValueKind],
557                                    Overlap.Test.ValueCounts[ValueKind]);
558       FuncLevelScore += OverlapStats::score(
559           I->Count, J->Count, FuncLevelOverlap.Base.ValueCounts[ValueKind],
560           FuncLevelOverlap.Test.ValueCounts[ValueKind]);
561       ++I;
562     } else if (I->Value < J->Value) {
563       ++I;
564       continue;
565     }
566     ++J;
567   }
568   Overlap.Overlap.ValueCounts[ValueKind] += Score;
569   FuncLevelOverlap.Overlap.ValueCounts[ValueKind] += FuncLevelScore;
570 }
571 
572 // Return false on mismatch.
573 void InstrProfRecord::overlapValueProfData(uint32_t ValueKind,
574                                            InstrProfRecord &Other,
575                                            OverlapStats &Overlap,
576                                            OverlapStats &FuncLevelOverlap) {
577   uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
578   assert(ThisNumValueSites == Other.getNumValueSites(ValueKind));
579   if (!ThisNumValueSites)
580     return;
581 
582   std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
583       getOrCreateValueSitesForKind(ValueKind);
584   MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
585       Other.getValueSitesForKind(ValueKind);
586   for (uint32_t I = 0; I < ThisNumValueSites; I++)
587     ThisSiteRecords[I].overlap(OtherSiteRecords[I], ValueKind, Overlap,
588                                FuncLevelOverlap);
589 }
590 
591 void InstrProfRecord::overlap(InstrProfRecord &Other, OverlapStats &Overlap,
592                               OverlapStats &FuncLevelOverlap,
593                               uint64_t ValueCutoff) {
594   // FuncLevel CountSum for other should already computed and nonzero.
595   assert(FuncLevelOverlap.Test.CountSum >= 1.0f);
596   accumulateCounts(FuncLevelOverlap.Base);
597   bool Mismatch = (Counts.size() != Other.Counts.size());
598 
599   // Check if the value profiles mismatch.
600   if (!Mismatch) {
601     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) {
602       uint32_t ThisNumValueSites = getNumValueSites(Kind);
603       uint32_t OtherNumValueSites = Other.getNumValueSites(Kind);
604       if (ThisNumValueSites != OtherNumValueSites) {
605         Mismatch = true;
606         break;
607       }
608     }
609   }
610   if (Mismatch) {
611     Overlap.addOneMismatch(FuncLevelOverlap.Test);
612     return;
613   }
614 
615   // Compute overlap for value counts.
616   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
617     overlapValueProfData(Kind, Other, Overlap, FuncLevelOverlap);
618 
619   double Score = 0.0;
620   uint64_t MaxCount = 0;
621   // Compute overlap for edge counts.
622   for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
623     Score += OverlapStats::score(Counts[I], Other.Counts[I],
624                                  Overlap.Base.CountSum, Overlap.Test.CountSum);
625     MaxCount = std::max(Other.Counts[I], MaxCount);
626   }
627   Overlap.Overlap.CountSum += Score;
628   Overlap.Overlap.NumEntries += 1;
629 
630   if (MaxCount >= ValueCutoff) {
631     double FuncScore = 0.0;
632     for (size_t I = 0, E = Other.Counts.size(); I < E; ++I)
633       FuncScore += OverlapStats::score(Counts[I], Other.Counts[I],
634                                        FuncLevelOverlap.Base.CountSum,
635                                        FuncLevelOverlap.Test.CountSum);
636     FuncLevelOverlap.Overlap.CountSum = FuncScore;
637     FuncLevelOverlap.Overlap.NumEntries = Other.Counts.size();
638     FuncLevelOverlap.Valid = true;
639   }
640 }
641 
642 void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input,
643                                      uint64_t Weight,
644                                      function_ref<void(instrprof_error)> Warn) {
645   this->sortByTargetValues();
646   Input.sortByTargetValues();
647   auto I = ValueData.begin();
648   auto IE = ValueData.end();
649   for (const InstrProfValueData &J : Input.ValueData) {
650     while (I != IE && I->Value < J.Value)
651       ++I;
652     if (I != IE && I->Value == J.Value) {
653       bool Overflowed;
654       I->Count = SaturatingMultiplyAdd(J.Count, Weight, I->Count, &Overflowed);
655       if (Overflowed)
656         Warn(instrprof_error::counter_overflow);
657       ++I;
658       continue;
659     }
660     ValueData.insert(I, J);
661   }
662 }
663 
664 void InstrProfValueSiteRecord::scale(uint64_t N, uint64_t D,
665                                      function_ref<void(instrprof_error)> Warn) {
666   for (InstrProfValueData &I : ValueData) {
667     bool Overflowed;
668     I.Count = SaturatingMultiply(I.Count, N, &Overflowed) / D;
669     if (Overflowed)
670       Warn(instrprof_error::counter_overflow);
671   }
672 }
673 
674 // Merge Value Profile data from Src record to this record for ValueKind.
675 // Scale merged value counts by \p Weight.
676 void InstrProfRecord::mergeValueProfData(
677     uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight,
678     function_ref<void(instrprof_error)> Warn) {
679   uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
680   uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind);
681   if (ThisNumValueSites != OtherNumValueSites) {
682     Warn(instrprof_error::value_site_count_mismatch);
683     return;
684   }
685   if (!ThisNumValueSites)
686     return;
687   std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
688       getOrCreateValueSitesForKind(ValueKind);
689   MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
690       Src.getValueSitesForKind(ValueKind);
691   for (uint32_t I = 0; I < ThisNumValueSites; I++)
692     ThisSiteRecords[I].merge(OtherSiteRecords[I], Weight, Warn);
693 }
694 
695 void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight,
696                             function_ref<void(instrprof_error)> Warn) {
697   // If the number of counters doesn't match we either have bad data
698   // or a hash collision.
699   if (Counts.size() != Other.Counts.size()) {
700     Warn(instrprof_error::count_mismatch);
701     return;
702   }
703 
704   // Special handling of the first count as the PseudoCount.
705   CountPseudoKind OtherKind = Other.getCountPseudoKind();
706   CountPseudoKind ThisKind = getCountPseudoKind();
707   if (OtherKind != NotPseudo || ThisKind != NotPseudo) {
708     // We don't allow the merge of a profile with pseudo counts and
709     // a normal profile (i.e. without pesudo counts).
710     // Profile supplimenation should be done after the profile merge.
711     if (OtherKind == NotPseudo || ThisKind == NotPseudo) {
712       Warn(instrprof_error::count_mismatch);
713       return;
714     }
715     if (OtherKind == PseudoHot || ThisKind == PseudoHot)
716       setPseudoCount(PseudoHot);
717     else
718       setPseudoCount(PseudoWarm);
719     return;
720   }
721 
722   for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
723     bool Overflowed;
724     uint64_t Value =
725         SaturatingMultiplyAdd(Other.Counts[I], Weight, Counts[I], &Overflowed);
726     if (Value > getInstrMaxCountValue()) {
727       Value = getInstrMaxCountValue();
728       Overflowed = true;
729     }
730     Counts[I] = Value;
731     if (Overflowed)
732       Warn(instrprof_error::counter_overflow);
733   }
734 
735   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
736     mergeValueProfData(Kind, Other, Weight, Warn);
737 }
738 
739 void InstrProfRecord::scaleValueProfData(
740     uint32_t ValueKind, uint64_t N, uint64_t D,
741     function_ref<void(instrprof_error)> Warn) {
742   for (auto &R : getValueSitesForKind(ValueKind))
743     R.scale(N, D, Warn);
744 }
745 
746 void InstrProfRecord::scale(uint64_t N, uint64_t D,
747                             function_ref<void(instrprof_error)> Warn) {
748   assert(D != 0 && "D cannot be 0");
749   for (auto &Count : this->Counts) {
750     bool Overflowed;
751     Count = SaturatingMultiply(Count, N, &Overflowed) / D;
752     if (Count > getInstrMaxCountValue()) {
753       Count = getInstrMaxCountValue();
754       Overflowed = true;
755     }
756     if (Overflowed)
757       Warn(instrprof_error::counter_overflow);
758   }
759   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
760     scaleValueProfData(Kind, N, D, Warn);
761 }
762 
763 // Map indirect call target name hash to name string.
764 uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind,
765                                      InstrProfSymtab *SymTab) {
766   if (!SymTab)
767     return Value;
768 
769   if (ValueKind == IPVK_IndirectCallTarget)
770     return SymTab->getFunctionHashFromAddress(Value);
771 
772   return Value;
773 }
774 
775 void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
776                                    InstrProfValueData *VData, uint32_t N,
777                                    InstrProfSymtab *ValueMap) {
778   for (uint32_t I = 0; I < N; I++) {
779     VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap);
780   }
781   std::vector<InstrProfValueSiteRecord> &ValueSites =
782       getOrCreateValueSitesForKind(ValueKind);
783   if (N == 0)
784     ValueSites.emplace_back();
785   else
786     ValueSites.emplace_back(VData, VData + N);
787 }
788 
789 std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
790     ArrayRef<TemporalProfTraceTy> Traces) {
791   using IDT = BPFunctionNode::IDT;
792   using UtilityNodeT = BPFunctionNode::UtilityNodeT;
793   // Collect all function IDs ordered by their smallest timestamp. This will be
794   // used as the initial FunctionNode order.
795   SetVector<IDT> FunctionIds;
796   size_t LargestTraceSize = 0;
797   for (auto &Trace : Traces)
798     LargestTraceSize =
799         std::max(LargestTraceSize, Trace.FunctionNameRefs.size());
800   for (size_t Timestamp = 0; Timestamp < LargestTraceSize; Timestamp++)
801     for (auto &Trace : Traces)
802       if (Timestamp < Trace.FunctionNameRefs.size())
803         FunctionIds.insert(Trace.FunctionNameRefs[Timestamp]);
804 
805   int N = std::ceil(std::log2(LargestTraceSize));
806 
807   // TODO: We need to use the Trace.Weight field to give more weight to more
808   // important utilities
809   DenseMap<IDT, SmallVector<UtilityNodeT, 4>> FuncGroups;
810   for (size_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
811     auto &Trace = Traces[TraceIdx].FunctionNameRefs;
812     for (size_t Timestamp = 0; Timestamp < Trace.size(); Timestamp++) {
813       for (int I = std::floor(std::log2(Timestamp + 1)); I < N; I++) {
814         auto &FunctionId = Trace[Timestamp];
815         UtilityNodeT GroupId = TraceIdx * N + I;
816         FuncGroups[FunctionId].push_back(GroupId);
817       }
818     }
819   }
820 
821   std::vector<BPFunctionNode> Nodes;
822   for (auto &Id : FunctionIds) {
823     auto &UNs = FuncGroups[Id];
824     llvm::sort(UNs);
825     UNs.erase(std::unique(UNs.begin(), UNs.end()), UNs.end());
826     Nodes.emplace_back(Id, UNs);
827   }
828   return Nodes;
829 }
830 
831 #define INSTR_PROF_COMMON_API_IMPL
832 #include "llvm/ProfileData/InstrProfData.inc"
833 
834 /*!
835  * ValueProfRecordClosure Interface implementation for  InstrProfRecord
836  *  class. These C wrappers are used as adaptors so that C++ code can be
837  *  invoked as callbacks.
838  */
839 uint32_t getNumValueKindsInstrProf(const void *Record) {
840   return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds();
841 }
842 
843 uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) {
844   return reinterpret_cast<const InstrProfRecord *>(Record)
845       ->getNumValueSites(VKind);
846 }
847 
848 uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) {
849   return reinterpret_cast<const InstrProfRecord *>(Record)
850       ->getNumValueData(VKind);
851 }
852 
853 uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK,
854                                          uint32_t S) {
855   return reinterpret_cast<const InstrProfRecord *>(R)
856       ->getNumValueDataForSite(VK, S);
857 }
858 
859 void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst,
860                               uint32_t K, uint32_t S) {
861   reinterpret_cast<const InstrProfRecord *>(R)->getValueForSite(Dst, K, S);
862 }
863 
864 ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) {
865   ValueProfData *VD =
866       (ValueProfData *)(new (::operator new(TotalSizeInBytes)) ValueProfData());
867   memset(VD, 0, TotalSizeInBytes);
868   return VD;
869 }
870 
871 static ValueProfRecordClosure InstrProfRecordClosure = {
872     nullptr,
873     getNumValueKindsInstrProf,
874     getNumValueSitesInstrProf,
875     getNumValueDataInstrProf,
876     getNumValueDataForSiteInstrProf,
877     nullptr,
878     getValueForSiteInstrProf,
879     allocValueProfDataInstrProf};
880 
881 // Wrapper implementation using the closure mechanism.
882 uint32_t ValueProfData::getSize(const InstrProfRecord &Record) {
883   auto Closure = InstrProfRecordClosure;
884   Closure.Record = &Record;
885   return getValueProfDataSize(&Closure);
886 }
887 
888 // Wrapper implementation using the closure mechanism.
889 std::unique_ptr<ValueProfData>
890 ValueProfData::serializeFrom(const InstrProfRecord &Record) {
891   InstrProfRecordClosure.Record = &Record;
892 
893   std::unique_ptr<ValueProfData> VPD(
894       serializeValueProfDataFrom(&InstrProfRecordClosure, nullptr));
895   return VPD;
896 }
897 
898 void ValueProfRecord::deserializeTo(InstrProfRecord &Record,
899                                     InstrProfSymtab *SymTab) {
900   Record.reserveSites(Kind, NumValueSites);
901 
902   InstrProfValueData *ValueData = getValueProfRecordValueData(this);
903   for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) {
904     uint8_t ValueDataCount = this->SiteCountArray[VSite];
905     Record.addValueData(Kind, VSite, ValueData, ValueDataCount, SymTab);
906     ValueData += ValueDataCount;
907   }
908 }
909 
910 // For writing/serializing,  Old is the host endianness, and  New is
911 // byte order intended on disk. For Reading/deserialization, Old
912 // is the on-disk source endianness, and New is the host endianness.
913 void ValueProfRecord::swapBytes(support::endianness Old,
914                                 support::endianness New) {
915   using namespace support;
916 
917   if (Old == New)
918     return;
919 
920   if (getHostEndianness() != Old) {
921     sys::swapByteOrder<uint32_t>(NumValueSites);
922     sys::swapByteOrder<uint32_t>(Kind);
923   }
924   uint32_t ND = getValueProfRecordNumValueData(this);
925   InstrProfValueData *VD = getValueProfRecordValueData(this);
926 
927   // No need to swap byte array: SiteCountArrray.
928   for (uint32_t I = 0; I < ND; I++) {
929     sys::swapByteOrder<uint64_t>(VD[I].Value);
930     sys::swapByteOrder<uint64_t>(VD[I].Count);
931   }
932   if (getHostEndianness() == Old) {
933     sys::swapByteOrder<uint32_t>(NumValueSites);
934     sys::swapByteOrder<uint32_t>(Kind);
935   }
936 }
937 
938 void ValueProfData::deserializeTo(InstrProfRecord &Record,
939                                   InstrProfSymtab *SymTab) {
940   if (NumValueKinds == 0)
941     return;
942 
943   ValueProfRecord *VR = getFirstValueProfRecord(this);
944   for (uint32_t K = 0; K < NumValueKinds; K++) {
945     VR->deserializeTo(Record, SymTab);
946     VR = getValueProfRecordNext(VR);
947   }
948 }
949 
950 template <class T>
951 static T swapToHostOrder(const unsigned char *&D, support::endianness Orig) {
952   using namespace support;
953 
954   if (Orig == little)
955     return endian::readNext<T, little, unaligned>(D);
956   else
957     return endian::readNext<T, big, unaligned>(D);
958 }
959 
960 static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) {
961   return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize))
962                                             ValueProfData());
963 }
964 
965 Error ValueProfData::checkIntegrity() {
966   if (NumValueKinds > IPVK_Last + 1)
967     return make_error<InstrProfError>(
968         instrprof_error::malformed, "number of value profile kinds is invalid");
969   // Total size needs to be multiple of quadword size.
970   if (TotalSize % sizeof(uint64_t))
971     return make_error<InstrProfError>(
972         instrprof_error::malformed, "total size is not multiples of quardword");
973 
974   ValueProfRecord *VR = getFirstValueProfRecord(this);
975   for (uint32_t K = 0; K < this->NumValueKinds; K++) {
976     if (VR->Kind > IPVK_Last)
977       return make_error<InstrProfError>(instrprof_error::malformed,
978                                         "value kind is invalid");
979     VR = getValueProfRecordNext(VR);
980     if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize)
981       return make_error<InstrProfError>(
982           instrprof_error::malformed,
983           "value profile address is greater than total size");
984   }
985   return Error::success();
986 }
987 
988 Expected<std::unique_ptr<ValueProfData>>
989 ValueProfData::getValueProfData(const unsigned char *D,
990                                 const unsigned char *const BufferEnd,
991                                 support::endianness Endianness) {
992   using namespace support;
993 
994   if (D + sizeof(ValueProfData) > BufferEnd)
995     return make_error<InstrProfError>(instrprof_error::truncated);
996 
997   const unsigned char *Header = D;
998   uint32_t TotalSize = swapToHostOrder<uint32_t>(Header, Endianness);
999   if (D + TotalSize > BufferEnd)
1000     return make_error<InstrProfError>(instrprof_error::too_large);
1001 
1002   std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize);
1003   memcpy(VPD.get(), D, TotalSize);
1004   // Byte swap.
1005   VPD->swapBytesToHost(Endianness);
1006 
1007   Error E = VPD->checkIntegrity();
1008   if (E)
1009     return std::move(E);
1010 
1011   return std::move(VPD);
1012 }
1013 
1014 void ValueProfData::swapBytesToHost(support::endianness Endianness) {
1015   using namespace support;
1016 
1017   if (Endianness == getHostEndianness())
1018     return;
1019 
1020   sys::swapByteOrder<uint32_t>(TotalSize);
1021   sys::swapByteOrder<uint32_t>(NumValueKinds);
1022 
1023   ValueProfRecord *VR = getFirstValueProfRecord(this);
1024   for (uint32_t K = 0; K < NumValueKinds; K++) {
1025     VR->swapBytes(Endianness, getHostEndianness());
1026     VR = getValueProfRecordNext(VR);
1027   }
1028 }
1029 
1030 void ValueProfData::swapBytesFromHost(support::endianness Endianness) {
1031   using namespace support;
1032 
1033   if (Endianness == getHostEndianness())
1034     return;
1035 
1036   ValueProfRecord *VR = getFirstValueProfRecord(this);
1037   for (uint32_t K = 0; K < NumValueKinds; K++) {
1038     ValueProfRecord *NVR = getValueProfRecordNext(VR);
1039     VR->swapBytes(getHostEndianness(), Endianness);
1040     VR = NVR;
1041   }
1042   sys::swapByteOrder<uint32_t>(TotalSize);
1043   sys::swapByteOrder<uint32_t>(NumValueKinds);
1044 }
1045 
1046 void annotateValueSite(Module &M, Instruction &Inst,
1047                        const InstrProfRecord &InstrProfR,
1048                        InstrProfValueKind ValueKind, uint32_t SiteIdx,
1049                        uint32_t MaxMDCount) {
1050   uint32_t NV = InstrProfR.getNumValueDataForSite(ValueKind, SiteIdx);
1051   if (!NV)
1052     return;
1053 
1054   uint64_t Sum = 0;
1055   std::unique_ptr<InstrProfValueData[]> VD =
1056       InstrProfR.getValueForSite(ValueKind, SiteIdx, &Sum);
1057 
1058   ArrayRef<InstrProfValueData> VDs(VD.get(), NV);
1059   annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount);
1060 }
1061 
1062 void annotateValueSite(Module &M, Instruction &Inst,
1063                        ArrayRef<InstrProfValueData> VDs,
1064                        uint64_t Sum, InstrProfValueKind ValueKind,
1065                        uint32_t MaxMDCount) {
1066   LLVMContext &Ctx = M.getContext();
1067   MDBuilder MDHelper(Ctx);
1068   SmallVector<Metadata *, 3> Vals;
1069   // Tag
1070   Vals.push_back(MDHelper.createString("VP"));
1071   // Value Kind
1072   Vals.push_back(MDHelper.createConstant(
1073       ConstantInt::get(Type::getInt32Ty(Ctx), ValueKind)));
1074   // Total Count
1075   Vals.push_back(
1076       MDHelper.createConstant(ConstantInt::get(Type::getInt64Ty(Ctx), Sum)));
1077 
1078   // Value Profile Data
1079   uint32_t MDCount = MaxMDCount;
1080   for (auto &VD : VDs) {
1081     Vals.push_back(MDHelper.createConstant(
1082         ConstantInt::get(Type::getInt64Ty(Ctx), VD.Value)));
1083     Vals.push_back(MDHelper.createConstant(
1084         ConstantInt::get(Type::getInt64Ty(Ctx), VD.Count)));
1085     if (--MDCount == 0)
1086       break;
1087   }
1088   Inst.setMetadata(LLVMContext::MD_prof, MDNode::get(Ctx, Vals));
1089 }
1090 
1091 bool getValueProfDataFromInst(const Instruction &Inst,
1092                               InstrProfValueKind ValueKind,
1093                               uint32_t MaxNumValueData,
1094                               InstrProfValueData ValueData[],
1095                               uint32_t &ActualNumValueData, uint64_t &TotalC,
1096                               bool GetNoICPValue) {
1097   MDNode *MD = Inst.getMetadata(LLVMContext::MD_prof);
1098   if (!MD)
1099     return false;
1100 
1101   unsigned NOps = MD->getNumOperands();
1102 
1103   if (NOps < 5)
1104     return false;
1105 
1106   // Operand 0 is a string tag "VP":
1107   MDString *Tag = cast<MDString>(MD->getOperand(0));
1108   if (!Tag)
1109     return false;
1110 
1111   if (!Tag->getString().equals("VP"))
1112     return false;
1113 
1114   // Now check kind:
1115   ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
1116   if (!KindInt)
1117     return false;
1118   if (KindInt->getZExtValue() != ValueKind)
1119     return false;
1120 
1121   // Get total count
1122   ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
1123   if (!TotalCInt)
1124     return false;
1125   TotalC = TotalCInt->getZExtValue();
1126 
1127   ActualNumValueData = 0;
1128 
1129   for (unsigned I = 3; I < NOps; I += 2) {
1130     if (ActualNumValueData >= MaxNumValueData)
1131       break;
1132     ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD->getOperand(I));
1133     ConstantInt *Count =
1134         mdconst::dyn_extract<ConstantInt>(MD->getOperand(I + 1));
1135     if (!Value || !Count)
1136       return false;
1137     uint64_t CntValue = Count->getZExtValue();
1138     if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM))
1139       continue;
1140     ValueData[ActualNumValueData].Value = Value->getZExtValue();
1141     ValueData[ActualNumValueData].Count = CntValue;
1142     ActualNumValueData++;
1143   }
1144   return true;
1145 }
1146 
1147 MDNode *getPGOFuncNameMetadata(const Function &F) {
1148   return F.getMetadata(getPGOFuncNameMetadataName());
1149 }
1150 
1151 void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) {
1152   // Only for internal linkage functions.
1153   if (PGOFuncName == F.getName())
1154       return;
1155   // Don't create duplicated meta-data.
1156   if (getPGOFuncNameMetadata(F))
1157     return;
1158   LLVMContext &C = F.getContext();
1159   MDNode *N = MDNode::get(C, MDString::get(C, PGOFuncName));
1160   F.setMetadata(getPGOFuncNameMetadataName(), N);
1161 }
1162 
1163 bool needsComdatForCounter(const Function &F, const Module &M) {
1164   if (F.hasComdat())
1165     return true;
1166 
1167   if (!Triple(M.getTargetTriple()).supportsCOMDAT())
1168     return false;
1169 
1170   // See createPGOFuncNameVar for more details. To avoid link errors, profile
1171   // counters for function with available_externally linkage needs to be changed
1172   // to linkonce linkage. On ELF based systems, this leads to weak symbols to be
1173   // created. Without using comdat, duplicate entries won't be removed by the
1174   // linker leading to increased data segement size and raw profile size. Even
1175   // worse, since the referenced counter from profile per-function data object
1176   // will be resolved to the common strong definition, the profile counts for
1177   // available_externally functions will end up being duplicated in raw profile
1178   // data. This can result in distorted profile as the counts of those dups
1179   // will be accumulated by the profile merger.
1180   GlobalValue::LinkageTypes Linkage = F.getLinkage();
1181   if (Linkage != GlobalValue::ExternalWeakLinkage &&
1182       Linkage != GlobalValue::AvailableExternallyLinkage)
1183     return false;
1184 
1185   return true;
1186 }
1187 
1188 // Check if INSTR_PROF_RAW_VERSION_VAR is defined.
1189 bool isIRPGOFlagSet(const Module *M) {
1190   auto IRInstrVar =
1191       M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR));
1192   if (!IRInstrVar || IRInstrVar->hasLocalLinkage())
1193     return false;
1194 
1195   // For CSPGO+LTO, this variable might be marked as non-prevailing and we only
1196   // have the decl.
1197   if (IRInstrVar->isDeclaration())
1198     return true;
1199 
1200   // Check if the flag is set.
1201   if (!IRInstrVar->hasInitializer())
1202     return false;
1203 
1204   auto *InitVal = dyn_cast_or_null<ConstantInt>(IRInstrVar->getInitializer());
1205   if (!InitVal)
1206     return false;
1207   return (InitVal->getZExtValue() & VARIANT_MASK_IR_PROF) != 0;
1208 }
1209 
1210 // Check if we can safely rename this Comdat function.
1211 bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) {
1212   if (F.getName().empty())
1213     return false;
1214   if (!needsComdatForCounter(F, *(F.getParent())))
1215     return false;
1216   // Unsafe to rename the address-taken function (which can be used in
1217   // function comparison).
1218   if (CheckAddressTaken && F.hasAddressTaken())
1219     return false;
1220   // Only safe to do if this function may be discarded if it is not used
1221   // in the compilation unit.
1222   if (!GlobalValue::isDiscardableIfUnused(F.getLinkage()))
1223     return false;
1224 
1225   // For AvailableExternallyLinkage functions.
1226   if (!F.hasComdat()) {
1227     assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage);
1228     return true;
1229   }
1230   return true;
1231 }
1232 
1233 // Create the variable for the profile file name.
1234 void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput) {
1235   if (InstrProfileOutput.empty())
1236     return;
1237   Constant *ProfileNameConst =
1238       ConstantDataArray::getString(M.getContext(), InstrProfileOutput, true);
1239   GlobalVariable *ProfileNameVar = new GlobalVariable(
1240       M, ProfileNameConst->getType(), true, GlobalValue::WeakAnyLinkage,
1241       ProfileNameConst, INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR));
1242   ProfileNameVar->setVisibility(GlobalValue::HiddenVisibility);
1243   Triple TT(M.getTargetTriple());
1244   if (TT.supportsCOMDAT()) {
1245     ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
1246     ProfileNameVar->setComdat(M.getOrInsertComdat(
1247         StringRef(INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR))));
1248   }
1249 }
1250 
1251 Error OverlapStats::accumulateCounts(const std::string &BaseFilename,
1252                                      const std::string &TestFilename,
1253                                      bool IsCS) {
1254   auto getProfileSum = [IsCS](const std::string &Filename,
1255                               CountSumOrPercent &Sum) -> Error {
1256     // This function is only used from llvm-profdata that doesn't use any kind
1257     // of VFS. Just create a default RealFileSystem to read profiles.
1258     auto FS = vfs::getRealFileSystem();
1259     auto ReaderOrErr = InstrProfReader::create(Filename, *FS);
1260     if (Error E = ReaderOrErr.takeError()) {
1261       return E;
1262     }
1263     auto Reader = std::move(ReaderOrErr.get());
1264     Reader->accumulateCounts(Sum, IsCS);
1265     return Error::success();
1266   };
1267   auto Ret = getProfileSum(BaseFilename, Base);
1268   if (Ret)
1269     return Ret;
1270   Ret = getProfileSum(TestFilename, Test);
1271   if (Ret)
1272     return Ret;
1273   this->BaseFilename = &BaseFilename;
1274   this->TestFilename = &TestFilename;
1275   Valid = true;
1276   return Error::success();
1277 }
1278 
1279 void OverlapStats::addOneMismatch(const CountSumOrPercent &MismatchFunc) {
1280   Mismatch.NumEntries += 1;
1281   Mismatch.CountSum += MismatchFunc.CountSum / Test.CountSum;
1282   for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1283     if (Test.ValueCounts[I] >= 1.0f)
1284       Mismatch.ValueCounts[I] +=
1285           MismatchFunc.ValueCounts[I] / Test.ValueCounts[I];
1286   }
1287 }
1288 
1289 void OverlapStats::addOneUnique(const CountSumOrPercent &UniqueFunc) {
1290   Unique.NumEntries += 1;
1291   Unique.CountSum += UniqueFunc.CountSum / Test.CountSum;
1292   for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1293     if (Test.ValueCounts[I] >= 1.0f)
1294       Unique.ValueCounts[I] += UniqueFunc.ValueCounts[I] / Test.ValueCounts[I];
1295   }
1296 }
1297 
1298 void OverlapStats::dump(raw_fd_ostream &OS) const {
1299   if (!Valid)
1300     return;
1301 
1302   const char *EntryName =
1303       (Level == ProgramLevel ? "functions" : "edge counters");
1304   if (Level == ProgramLevel) {
1305     OS << "Profile overlap infomation for base_profile: " << *BaseFilename
1306        << " and test_profile: " << *TestFilename << "\nProgram level:\n";
1307   } else {
1308     OS << "Function level:\n"
1309        << "  Function: " << FuncName << " (Hash=" << FuncHash << ")\n";
1310   }
1311 
1312   OS << "  # of " << EntryName << " overlap: " << Overlap.NumEntries << "\n";
1313   if (Mismatch.NumEntries)
1314     OS << "  # of " << EntryName << " mismatch: " << Mismatch.NumEntries
1315        << "\n";
1316   if (Unique.NumEntries)
1317     OS << "  # of " << EntryName
1318        << " only in test_profile: " << Unique.NumEntries << "\n";
1319 
1320   OS << "  Edge profile overlap: " << format("%.3f%%", Overlap.CountSum * 100)
1321      << "\n";
1322   if (Mismatch.NumEntries)
1323     OS << "  Mismatched count percentage (Edge): "
1324        << format("%.3f%%", Mismatch.CountSum * 100) << "\n";
1325   if (Unique.NumEntries)
1326     OS << "  Percentage of Edge profile only in test_profile: "
1327        << format("%.3f%%", Unique.CountSum * 100) << "\n";
1328   OS << "  Edge profile base count sum: " << format("%.0f", Base.CountSum)
1329      << "\n"
1330      << "  Edge profile test count sum: " << format("%.0f", Test.CountSum)
1331      << "\n";
1332 
1333   for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1334     if (Base.ValueCounts[I] < 1.0f && Test.ValueCounts[I] < 1.0f)
1335       continue;
1336     char ProfileKindName[20];
1337     switch (I) {
1338     case IPVK_IndirectCallTarget:
1339       strncpy(ProfileKindName, "IndirectCall", 19);
1340       break;
1341     case IPVK_MemOPSize:
1342       strncpy(ProfileKindName, "MemOP", 19);
1343       break;
1344     default:
1345       snprintf(ProfileKindName, 19, "VP[%d]", I);
1346       break;
1347     }
1348     OS << "  " << ProfileKindName
1349        << " profile overlap: " << format("%.3f%%", Overlap.ValueCounts[I] * 100)
1350        << "\n";
1351     if (Mismatch.NumEntries)
1352       OS << "  Mismatched count percentage (" << ProfileKindName
1353          << "): " << format("%.3f%%", Mismatch.ValueCounts[I] * 100) << "\n";
1354     if (Unique.NumEntries)
1355       OS << "  Percentage of " << ProfileKindName
1356          << " profile only in test_profile: "
1357          << format("%.3f%%", Unique.ValueCounts[I] * 100) << "\n";
1358     OS << "  " << ProfileKindName
1359        << " profile base count sum: " << format("%.0f", Base.ValueCounts[I])
1360        << "\n"
1361        << "  " << ProfileKindName
1362        << " profile test count sum: " << format("%.0f", Test.ValueCounts[I])
1363        << "\n";
1364   }
1365 }
1366 
1367 namespace IndexedInstrProf {
1368 // A C++14 compatible version of the offsetof macro.
1369 template <typename T1, typename T2>
1370 inline size_t constexpr offsetOf(T1 T2::*Member) {
1371   constexpr T2 Object{};
1372   return size_t(&(Object.*Member)) - size_t(&Object);
1373 }
1374 
1375 static inline uint64_t read(const unsigned char *Buffer, size_t Offset) {
1376   return *reinterpret_cast<const uint64_t *>(Buffer + Offset);
1377 }
1378 
1379 uint64_t Header::formatVersion() const {
1380   using namespace support;
1381   return endian::byte_swap<uint64_t, little>(Version);
1382 }
1383 
1384 Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
1385   using namespace support;
1386   static_assert(std::is_standard_layout_v<Header>,
1387                 "The header should be standard layout type since we use offset "
1388                 "of fields to read.");
1389   Header H;
1390 
1391   H.Magic = read(Buffer, offsetOf(&Header::Magic));
1392   // Check the magic number.
1393   uint64_t Magic = endian::byte_swap<uint64_t, little>(H.Magic);
1394   if (Magic != IndexedInstrProf::Magic)
1395     return make_error<InstrProfError>(instrprof_error::bad_magic);
1396 
1397   // Read the version.
1398   H.Version = read(Buffer, offsetOf(&Header::Version));
1399   if (GET_VERSION(H.formatVersion()) >
1400       IndexedInstrProf::ProfVersion::CurrentVersion)
1401     return make_error<InstrProfError>(instrprof_error::unsupported_version);
1402 
1403   switch (GET_VERSION(H.formatVersion())) {
1404     // When a new field is added in the header add a case statement here to
1405     // populate it.
1406     static_assert(
1407         IndexedInstrProf::ProfVersion::CurrentVersion == Version10,
1408         "Please update the reading code below if a new field has been added, "
1409         "if not add a case statement to fall through to the latest version.");
1410   case 10ull:
1411     H.TemporalProfTracesOffset =
1412         read(Buffer, offsetOf(&Header::TemporalProfTracesOffset));
1413     [[fallthrough]];
1414   case 9ull:
1415     H.BinaryIdOffset = read(Buffer, offsetOf(&Header::BinaryIdOffset));
1416     [[fallthrough]];
1417   case 8ull:
1418     H.MemProfOffset = read(Buffer, offsetOf(&Header::MemProfOffset));
1419     [[fallthrough]];
1420   default: // Version7 (when the backwards compatible header was introduced).
1421     H.HashType = read(Buffer, offsetOf(&Header::HashType));
1422     H.HashOffset = read(Buffer, offsetOf(&Header::HashOffset));
1423   }
1424 
1425   return H;
1426 }
1427 
1428 size_t Header::size() const {
1429   switch (GET_VERSION(formatVersion())) {
1430     // When a new field is added to the header add a case statement here to
1431     // compute the size as offset of the new field + size of the new field. This
1432     // relies on the field being added to the end of the list.
1433     static_assert(IndexedInstrProf::ProfVersion::CurrentVersion == Version10,
1434                   "Please update the size computation below if a new field has "
1435                   "been added to the header, if not add a case statement to "
1436                   "fall through to the latest version.");
1437   case 10ull:
1438     return offsetOf(&Header::TemporalProfTracesOffset) +
1439            sizeof(Header::TemporalProfTracesOffset);
1440   case 9ull:
1441     return offsetOf(&Header::BinaryIdOffset) + sizeof(Header::BinaryIdOffset);
1442   case 8ull:
1443     return offsetOf(&Header::MemProfOffset) + sizeof(Header::MemProfOffset);
1444   default: // Version7 (when the backwards compatible header was introduced).
1445     return offsetOf(&Header::HashOffset) + sizeof(Header::HashOffset);
1446   }
1447 }
1448 
1449 } // namespace IndexedInstrProf
1450 
1451 } // end namespace llvm
1452