xref: /freebsd/contrib/llvm-project/llvm/lib/ProfileData/InstrProfWriter.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for writing profiling data for clang's
10 // instrumentation based PGO and coverage.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ProfileData/InstrProfWriter.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/IR/ProfileSummary.h"
19 #include "llvm/ProfileData/InstrProf.h"
20 #include "llvm/ProfileData/MemProf.h"
21 #include "llvm/ProfileData/ProfileCommon.h"
22 #include "llvm/Support/Endian.h"
23 #include "llvm/Support/EndianStream.h"
24 #include "llvm/Support/Error.h"
25 #include "llvm/Support/MemoryBuffer.h"
26 #include "llvm/Support/OnDiskHashTable.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <cstdint>
29 #include <memory>
30 #include <string>
31 #include <tuple>
32 #include <utility>
33 #include <vector>
34 
35 using namespace llvm;
36 
37 // A struct to define how the data stream should be patched. For Indexed
38 // profiling, only uint64_t data type is needed.
39 struct PatchItem {
40   uint64_t Pos; // Where to patch.
41   uint64_t *D;  // Pointer to an array of source data.
42   int N;        // Number of elements in \c D array.
43 };
44 
45 namespace llvm {
46 
47 // A wrapper class to abstract writer stream with support of bytes
48 // back patching.
49 class ProfOStream {
50 public:
51   ProfOStream(raw_fd_ostream &FD)
52       : IsFDOStream(true), OS(FD), LE(FD, support::little) {}
53   ProfOStream(raw_string_ostream &STR)
54       : IsFDOStream(false), OS(STR), LE(STR, support::little) {}
55 
56   uint64_t tell() { return OS.tell(); }
57   void write(uint64_t V) { LE.write<uint64_t>(V); }
58   void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
59 
60   // \c patch can only be called when all data is written and flushed.
61   // For raw_string_ostream, the patch is done on the target string
62   // directly and it won't be reflected in the stream's internal buffer.
63   void patch(PatchItem *P, int NItems) {
64     using namespace support;
65 
66     if (IsFDOStream) {
67       raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
68       const uint64_t LastPos = FDOStream.tell();
69       for (int K = 0; K < NItems; K++) {
70         FDOStream.seek(P[K].Pos);
71         for (int I = 0; I < P[K].N; I++)
72           write(P[K].D[I]);
73       }
74       // Reset the stream to the last position after patching so that users
75       // don't accidentally overwrite data. This makes it consistent with
76       // the string stream below which replaces the data directly.
77       FDOStream.seek(LastPos);
78     } else {
79       raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
80       std::string &Data = SOStream.str(); // with flush
81       for (int K = 0; K < NItems; K++) {
82         for (int I = 0; I < P[K].N; I++) {
83           uint64_t Bytes = endian::byte_swap<uint64_t, little>(P[K].D[I]);
84           Data.replace(P[K].Pos + I * sizeof(uint64_t), sizeof(uint64_t),
85                        (const char *)&Bytes, sizeof(uint64_t));
86         }
87       }
88     }
89   }
90 
91   // If \c OS is an instance of \c raw_fd_ostream, this field will be
92   // true. Otherwise, \c OS will be an raw_string_ostream.
93   bool IsFDOStream;
94   raw_ostream &OS;
95   support::endian::Writer LE;
96 };
97 
98 class InstrProfRecordWriterTrait {
99 public:
100   using key_type = StringRef;
101   using key_type_ref = StringRef;
102 
103   using data_type = const InstrProfWriter::ProfilingData *const;
104   using data_type_ref = const InstrProfWriter::ProfilingData *const;
105 
106   using hash_value_type = uint64_t;
107   using offset_type = uint64_t;
108 
109   support::endianness ValueProfDataEndianness = support::little;
110   InstrProfSummaryBuilder *SummaryBuilder;
111   InstrProfSummaryBuilder *CSSummaryBuilder;
112 
113   InstrProfRecordWriterTrait() = default;
114 
115   static hash_value_type ComputeHash(key_type_ref K) {
116     return IndexedInstrProf::ComputeHash(K);
117   }
118 
119   static std::pair<offset_type, offset_type>
120   EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
121     using namespace support;
122 
123     endian::Writer LE(Out, little);
124 
125     offset_type N = K.size();
126     LE.write<offset_type>(N);
127 
128     offset_type M = 0;
129     for (const auto &ProfileData : *V) {
130       const InstrProfRecord &ProfRecord = ProfileData.second;
131       M += sizeof(uint64_t); // The function hash
132       M += sizeof(uint64_t); // The size of the Counts vector
133       M += ProfRecord.Counts.size() * sizeof(uint64_t);
134 
135       // Value data
136       M += ValueProfData::getSize(ProfileData.second);
137     }
138     LE.write<offset_type>(M);
139 
140     return std::make_pair(N, M);
141   }
142 
143   void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N) {
144     Out.write(K.data(), N);
145   }
146 
147   void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type) {
148     using namespace support;
149 
150     endian::Writer LE(Out, little);
151     for (const auto &ProfileData : *V) {
152       const InstrProfRecord &ProfRecord = ProfileData.second;
153       if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
154         CSSummaryBuilder->addRecord(ProfRecord);
155       else
156         SummaryBuilder->addRecord(ProfRecord);
157 
158       LE.write<uint64_t>(ProfileData.first); // Function hash
159       LE.write<uint64_t>(ProfRecord.Counts.size());
160       for (uint64_t I : ProfRecord.Counts)
161         LE.write<uint64_t>(I);
162 
163       // Write value data
164       std::unique_ptr<ValueProfData> VDataPtr =
165           ValueProfData::serializeFrom(ProfileData.second);
166       uint32_t S = VDataPtr->getSize();
167       VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
168       Out.write((const char *)VDataPtr.get(), S);
169     }
170   }
171 };
172 
173 } // end namespace llvm
174 
175 InstrProfWriter::InstrProfWriter(bool Sparse,
176                                  uint64_t TemporalProfTraceReservoirSize,
177                                  uint64_t MaxTemporalProfTraceLength)
178     : Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
179       TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
180       InfoObj(new InstrProfRecordWriterTrait()) {}
181 
182 InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
183 
184 // Internal interface for testing purpose only.
185 void InstrProfWriter::setValueProfDataEndianness(
186     support::endianness Endianness) {
187   InfoObj->ValueProfDataEndianness = Endianness;
188 }
189 
190 void InstrProfWriter::setOutputSparse(bool Sparse) {
191   this->Sparse = Sparse;
192 }
193 
194 void InstrProfWriter::addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
195                                 function_ref<void(Error)> Warn) {
196   auto Name = I.Name;
197   auto Hash = I.Hash;
198   addRecord(Name, Hash, std::move(I), Weight, Warn);
199 }
200 
201 void InstrProfWriter::overlapRecord(NamedInstrProfRecord &&Other,
202                                     OverlapStats &Overlap,
203                                     OverlapStats &FuncLevelOverlap,
204                                     const OverlapFuncFilters &FuncFilter) {
205   auto Name = Other.Name;
206   auto Hash = Other.Hash;
207   Other.accumulateCounts(FuncLevelOverlap.Test);
208   if (!FunctionData.contains(Name)) {
209     Overlap.addOneUnique(FuncLevelOverlap.Test);
210     return;
211   }
212   if (FuncLevelOverlap.Test.CountSum < 1.0f) {
213     Overlap.Overlap.NumEntries += 1;
214     return;
215   }
216   auto &ProfileDataMap = FunctionData[Name];
217   bool NewFunc;
218   ProfilingData::iterator Where;
219   std::tie(Where, NewFunc) =
220       ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
221   if (NewFunc) {
222     Overlap.addOneMismatch(FuncLevelOverlap.Test);
223     return;
224   }
225   InstrProfRecord &Dest = Where->second;
226 
227   uint64_t ValueCutoff = FuncFilter.ValueCutoff;
228   if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
229     ValueCutoff = 0;
230 
231   Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
232 }
233 
234 void InstrProfWriter::addRecord(StringRef Name, uint64_t Hash,
235                                 InstrProfRecord &&I, uint64_t Weight,
236                                 function_ref<void(Error)> Warn) {
237   auto &ProfileDataMap = FunctionData[Name];
238 
239   bool NewFunc;
240   ProfilingData::iterator Where;
241   std::tie(Where, NewFunc) =
242       ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
243   InstrProfRecord &Dest = Where->second;
244 
245   auto MapWarn = [&](instrprof_error E) {
246     Warn(make_error<InstrProfError>(E));
247   };
248 
249   if (NewFunc) {
250     // We've never seen a function with this name and hash, add it.
251     Dest = std::move(I);
252     if (Weight > 1)
253       Dest.scale(Weight, 1, MapWarn);
254   } else {
255     // We're updating a function we've seen before.
256     Dest.merge(I, Weight, MapWarn);
257   }
258 
259   Dest.sortValueData();
260 }
261 
262 void InstrProfWriter::addMemProfRecord(
263     const Function::GUID Id, const memprof::IndexedMemProfRecord &Record) {
264   auto Result = MemProfRecordData.insert({Id, Record});
265   // If we inserted a new record then we are done.
266   if (Result.second) {
267     return;
268   }
269   memprof::IndexedMemProfRecord &Existing = Result.first->second;
270   Existing.merge(Record);
271 }
272 
273 bool InstrProfWriter::addMemProfFrame(const memprof::FrameId Id,
274                                       const memprof::Frame &Frame,
275                                       function_ref<void(Error)> Warn) {
276   auto Result = MemProfFrameData.insert({Id, Frame});
277   // If a mapping already exists for the current frame id and it does not
278   // match the new mapping provided then reset the existing contents and bail
279   // out. We don't support the merging of memprof data whose Frame -> Id
280   // mapping across profiles is inconsistent.
281   if (!Result.second && Result.first->second != Frame) {
282     Warn(make_error<InstrProfError>(instrprof_error::malformed,
283                                     "frame to id mapping mismatch"));
284     return false;
285   }
286   return true;
287 }
288 
289 void InstrProfWriter::addBinaryIds(ArrayRef<llvm::object::BuildID> BIs) {
290   llvm::append_range(BinaryIds, BIs);
291 }
292 
293 void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
294   if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
295     Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
296   if (Trace.FunctionNameRefs.empty())
297     return;
298 
299   if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
300     // Simply append the trace if we have not yet hit our reservoir size limit.
301     TemporalProfTraces.push_back(std::move(Trace));
302   } else {
303     // Otherwise, replace a random trace in the stream.
304     std::uniform_int_distribution<uint64_t> Distribution(
305         0, TemporalProfTraceStreamSize);
306     uint64_t RandomIndex = Distribution(RNG);
307     if (RandomIndex < TemporalProfTraces.size())
308       TemporalProfTraces[RandomIndex] = std::move(Trace);
309   }
310   ++TemporalProfTraceStreamSize;
311 }
312 
313 void InstrProfWriter::addTemporalProfileTraces(
314     SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
315   // Assume that the source has the same reservoir size as the destination to
316   // avoid needing to record it in the indexed profile format.
317   bool IsDestSampled =
318       (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
319   bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
320   if (!IsDestSampled && IsSrcSampled) {
321     // If one of the traces are sampled, ensure that it belongs to Dest.
322     std::swap(TemporalProfTraces, SrcTraces);
323     std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
324     std::swap(IsDestSampled, IsSrcSampled);
325   }
326   if (!IsSrcSampled) {
327     // If the source stream is not sampled, we add each source trace normally.
328     for (auto &Trace : SrcTraces)
329       addTemporalProfileTrace(std::move(Trace));
330     return;
331   }
332   // Otherwise, we find the traces that would have been removed if we added
333   // the whole source stream.
334   SmallSetVector<uint64_t, 8> IndicesToReplace;
335   for (uint64_t I = 0; I < SrcStreamSize; I++) {
336     std::uniform_int_distribution<uint64_t> Distribution(
337         0, TemporalProfTraceStreamSize);
338     uint64_t RandomIndex = Distribution(RNG);
339     if (RandomIndex < TemporalProfTraces.size())
340       IndicesToReplace.insert(RandomIndex);
341     ++TemporalProfTraceStreamSize;
342   }
343   // Then we insert a random sample of the source traces.
344   llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
345   for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
346     TemporalProfTraces[Index] = std::move(Trace);
347 }
348 
349 void InstrProfWriter::mergeRecordsFromWriter(InstrProfWriter &&IPW,
350                                              function_ref<void(Error)> Warn) {
351   for (auto &I : IPW.FunctionData)
352     for (auto &Func : I.getValue())
353       addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
354 
355   BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
356   for (auto &I : IPW.BinaryIds)
357     addBinaryIds(I);
358 
359   addTemporalProfileTraces(IPW.TemporalProfTraces,
360                            IPW.TemporalProfTraceStreamSize);
361 
362   MemProfFrameData.reserve(IPW.MemProfFrameData.size());
363   for (auto &I : IPW.MemProfFrameData) {
364     // If we weren't able to add the frame mappings then it doesn't make sense
365     // to try to merge the records from this profile.
366     if (!addMemProfFrame(I.first, I.second, Warn))
367       return;
368   }
369 
370   MemProfRecordData.reserve(IPW.MemProfRecordData.size());
371   for (auto &I : IPW.MemProfRecordData) {
372     addMemProfRecord(I.first, I.second);
373   }
374 }
375 
376 bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
377   if (!Sparse)
378     return true;
379   for (const auto &Func : PD) {
380     const InstrProfRecord &IPR = Func.second;
381     if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
382       return true;
383   }
384   return false;
385 }
386 
387 static void setSummary(IndexedInstrProf::Summary *TheSummary,
388                        ProfileSummary &PS) {
389   using namespace IndexedInstrProf;
390 
391   const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
392   TheSummary->NumSummaryFields = Summary::NumKinds;
393   TheSummary->NumCutoffEntries = Res.size();
394   TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
395   TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
396   TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
397   TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
398   TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
399   TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
400   for (unsigned I = 0; I < Res.size(); I++)
401     TheSummary->setEntry(I, Res[I]);
402 }
403 
404 Error InstrProfWriter::writeImpl(ProfOStream &OS) {
405   using namespace IndexedInstrProf;
406   using namespace support;
407 
408   OnDiskChainedHashTableGenerator<InstrProfRecordWriterTrait> Generator;
409 
410   InstrProfSummaryBuilder ISB(ProfileSummaryBuilder::DefaultCutoffs);
411   InfoObj->SummaryBuilder = &ISB;
412   InstrProfSummaryBuilder CSISB(ProfileSummaryBuilder::DefaultCutoffs);
413   InfoObj->CSSummaryBuilder = &CSISB;
414 
415   // Populate the hash table generator.
416   SmallVector<std::pair<StringRef, const ProfilingData *>, 0> OrderedData;
417   for (const auto &I : FunctionData)
418     if (shouldEncodeData(I.getValue()))
419       OrderedData.emplace_back((I.getKey()), &I.getValue());
420   llvm::sort(OrderedData, less_first());
421   for (const auto &I : OrderedData)
422     Generator.insert(I.first, I.second);
423 
424   // Write the header.
425   IndexedInstrProf::Header Header;
426   Header.Magic = IndexedInstrProf::Magic;
427   Header.Version = IndexedInstrProf::ProfVersion::CurrentVersion;
428   if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
429     Header.Version |= VARIANT_MASK_IR_PROF;
430   if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
431     Header.Version |= VARIANT_MASK_CSIR_PROF;
432   if (static_cast<bool>(ProfileKind &
433                         InstrProfKind::FunctionEntryInstrumentation))
434     Header.Version |= VARIANT_MASK_INSTR_ENTRY;
435   if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
436     Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
437   if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
438     Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
439   if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
440     Header.Version |= VARIANT_MASK_MEMPROF;
441   if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
442     Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
443 
444   Header.Unused = 0;
445   Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
446   Header.HashOffset = 0;
447   Header.MemProfOffset = 0;
448   Header.BinaryIdOffset = 0;
449   Header.TemporalProfTracesOffset = 0;
450   int N = sizeof(IndexedInstrProf::Header) / sizeof(uint64_t);
451 
452   // Only write out all the fields except 'HashOffset', 'MemProfOffset',
453   // 'BinaryIdOffset' and `TemporalProfTracesOffset`. We need to remember the
454   // offset of these fields to allow back patching later.
455   for (int I = 0; I < N - 4; I++)
456     OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
457 
458   // Save the location of Header.HashOffset field in \c OS.
459   uint64_t HashTableStartFieldOffset = OS.tell();
460   // Reserve the space for HashOffset field.
461   OS.write(0);
462 
463   // Save the location of MemProf profile data. This is stored in two parts as
464   // the schema and as a separate on-disk chained hashtable.
465   uint64_t MemProfSectionOffset = OS.tell();
466   // Reserve space for the MemProf table field to be patched later if this
467   // profile contains memory profile information.
468   OS.write(0);
469 
470   // Save the location of binary ids section.
471   uint64_t BinaryIdSectionOffset = OS.tell();
472   // Reserve space for the BinaryIdOffset field to be patched later if this
473   // profile contains binary ids.
474   OS.write(0);
475 
476   uint64_t TemporalProfTracesOffset = OS.tell();
477   OS.write(0);
478 
479   // Reserve space to write profile summary data.
480   uint32_t NumEntries = ProfileSummaryBuilder::DefaultCutoffs.size();
481   uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
482   // Remember the summary offset.
483   uint64_t SummaryOffset = OS.tell();
484   for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
485     OS.write(0);
486   uint64_t CSSummaryOffset = 0;
487   uint64_t CSSummarySize = 0;
488   if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
489     CSSummaryOffset = OS.tell();
490     CSSummarySize = SummarySize / sizeof(uint64_t);
491     for (unsigned I = 0; I < CSSummarySize; I++)
492       OS.write(0);
493   }
494 
495   // Write the hash table.
496   uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
497 
498   // Write the MemProf profile data if we have it. This includes a simple schema
499   // with the format described below followed by the hashtable:
500   // uint64_t RecordTableOffset = RecordTableGenerator.Emit
501   // uint64_t FramePayloadOffset = Stream offset before emitting the frame table
502   // uint64_t FrameTableOffset = FrameTableGenerator.Emit
503   // uint64_t Num schema entries
504   // uint64_t Schema entry 0
505   // uint64_t Schema entry 1
506   // ....
507   // uint64_t Schema entry N - 1
508   // OnDiskChainedHashTable MemProfRecordData
509   // OnDiskChainedHashTable MemProfFrameData
510   uint64_t MemProfSectionStart = 0;
511   if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
512     MemProfSectionStart = OS.tell();
513     OS.write(0ULL); // Reserve space for the memprof record table offset.
514     OS.write(0ULL); // Reserve space for the memprof frame payload offset.
515     OS.write(0ULL); // Reserve space for the memprof frame table offset.
516 
517     auto Schema = memprof::PortableMemInfoBlock::getSchema();
518     OS.write(static_cast<uint64_t>(Schema.size()));
519     for (const auto Id : Schema) {
520       OS.write(static_cast<uint64_t>(Id));
521     }
522 
523     auto RecordWriter = std::make_unique<memprof::RecordWriterTrait>();
524     RecordWriter->Schema = &Schema;
525     OnDiskChainedHashTableGenerator<memprof::RecordWriterTrait>
526         RecordTableGenerator;
527     for (auto &I : MemProfRecordData) {
528       // Insert the key (func hash) and value (memprof record).
529       RecordTableGenerator.insert(I.first, I.second);
530     }
531 
532     uint64_t RecordTableOffset =
533         RecordTableGenerator.Emit(OS.OS, *RecordWriter);
534 
535     uint64_t FramePayloadOffset = OS.tell();
536 
537     auto FrameWriter = std::make_unique<memprof::FrameWriterTrait>();
538     OnDiskChainedHashTableGenerator<memprof::FrameWriterTrait>
539         FrameTableGenerator;
540     for (auto &I : MemProfFrameData) {
541       // Insert the key (frame id) and value (frame contents).
542       FrameTableGenerator.insert(I.first, I.second);
543     }
544 
545     uint64_t FrameTableOffset = FrameTableGenerator.Emit(OS.OS, *FrameWriter);
546 
547     PatchItem PatchItems[] = {
548         {MemProfSectionStart, &RecordTableOffset, 1},
549         {MemProfSectionStart + sizeof(uint64_t), &FramePayloadOffset, 1},
550         {MemProfSectionStart + 2 * sizeof(uint64_t), &FrameTableOffset, 1},
551     };
552     OS.patch(PatchItems, 3);
553   }
554 
555   // BinaryIdSection has two parts:
556   // 1. uint64_t BinaryIdsSectionSize
557   // 2. list of binary ids that consist of:
558   //    a. uint64_t BinaryIdLength
559   //    b. uint8_t  BinaryIdData
560   //    c. uint8_t  Padding (if necessary)
561   uint64_t BinaryIdSectionStart = OS.tell();
562   // Calculate size of binary section.
563   uint64_t BinaryIdsSectionSize = 0;
564 
565   // Remove duplicate binary ids.
566   llvm::sort(BinaryIds);
567   BinaryIds.erase(std::unique(BinaryIds.begin(), BinaryIds.end()),
568                   BinaryIds.end());
569 
570   for (auto BI : BinaryIds) {
571     // Increment by binary id length data type size.
572     BinaryIdsSectionSize += sizeof(uint64_t);
573     // Increment by binary id data length, aligned to 8 bytes.
574     BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
575   }
576   // Write binary ids section size.
577   OS.write(BinaryIdsSectionSize);
578 
579   for (auto BI : BinaryIds) {
580     uint64_t BILen = BI.size();
581     // Write binary id length.
582     OS.write(BILen);
583     // Write binary id data.
584     for (unsigned K = 0; K < BILen; K++)
585       OS.writeByte(BI[K]);
586     // Write padding if necessary.
587     uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
588     for (unsigned K = 0; K < PaddingSize; K++)
589       OS.writeByte(0);
590   }
591 
592   uint64_t TemporalProfTracesSectionStart = 0;
593   if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
594     TemporalProfTracesSectionStart = OS.tell();
595     OS.write(TemporalProfTraces.size());
596     OS.write(TemporalProfTraceStreamSize);
597     for (auto &Trace : TemporalProfTraces) {
598       OS.write(Trace.Weight);
599       OS.write(Trace.FunctionNameRefs.size());
600       for (auto &NameRef : Trace.FunctionNameRefs)
601         OS.write(NameRef);
602     }
603   }
604 
605   // Allocate space for data to be serialized out.
606   std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
607       IndexedInstrProf::allocSummary(SummarySize);
608   // Compute the Summary and copy the data to the data
609   // structure to be serialized out (to disk or buffer).
610   std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
611   setSummary(TheSummary.get(), *PS);
612   InfoObj->SummaryBuilder = nullptr;
613 
614   // For Context Sensitive summary.
615   std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
616   if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
617     TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
618     std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
619     setSummary(TheCSSummary.get(), *CSPS);
620   }
621   InfoObj->CSSummaryBuilder = nullptr;
622 
623   // Now do the final patch:
624   PatchItem PatchItems[] = {
625       // Patch the Header.HashOffset field.
626       {HashTableStartFieldOffset, &HashTableStart, 1},
627       // Patch the Header.MemProfOffset (=0 for profiles without MemProf
628       // data).
629       {MemProfSectionOffset, &MemProfSectionStart, 1},
630       // Patch the Header.BinaryIdSectionOffset.
631       {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
632       // Patch the Header.TemporalProfTracesOffset (=0 for profiles without
633       // traces).
634       {TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
635       // Patch the summary data.
636       {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
637        (int)(SummarySize / sizeof(uint64_t))},
638       {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
639        (int)CSSummarySize}};
640 
641   OS.patch(PatchItems, std::size(PatchItems));
642 
643   for (const auto &I : FunctionData)
644     for (const auto &F : I.getValue())
645       if (Error E = validateRecord(F.second))
646         return E;
647 
648   return Error::success();
649 }
650 
651 Error InstrProfWriter::write(raw_fd_ostream &OS) {
652   // Write the hash table.
653   ProfOStream POS(OS);
654   return writeImpl(POS);
655 }
656 
657 Error InstrProfWriter::write(raw_string_ostream &OS) {
658   ProfOStream POS(OS);
659   return writeImpl(POS);
660 }
661 
662 std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
663   std::string Data;
664   raw_string_ostream OS(Data);
665   // Write the hash table.
666   if (Error E = write(OS))
667     return nullptr;
668   // Return this in an aligned memory buffer.
669   return MemoryBuffer::getMemBufferCopy(Data);
670 }
671 
672 static const char *ValueProfKindStr[] = {
673 #define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
674 #include "llvm/ProfileData/InstrProfData.inc"
675 };
676 
677 Error InstrProfWriter::validateRecord(const InstrProfRecord &Func) {
678   for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
679     uint32_t NS = Func.getNumValueSites(VK);
680     if (!NS)
681       continue;
682     for (uint32_t S = 0; S < NS; S++) {
683       uint32_t ND = Func.getNumValueDataForSite(VK, S);
684       std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
685       DenseSet<uint64_t> SeenValues;
686       for (uint32_t I = 0; I < ND; I++)
687         if ((VK != IPVK_IndirectCallTarget) && !SeenValues.insert(VD[I].Value).second)
688           return make_error<InstrProfError>(instrprof_error::invalid_prof);
689     }
690   }
691 
692   return Error::success();
693 }
694 
695 void InstrProfWriter::writeRecordInText(StringRef Name, uint64_t Hash,
696                                         const InstrProfRecord &Func,
697                                         InstrProfSymtab &Symtab,
698                                         raw_fd_ostream &OS) {
699   OS << Name << "\n";
700   OS << "# Func Hash:\n" << Hash << "\n";
701   OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
702   OS << "# Counter Values:\n";
703   for (uint64_t Count : Func.Counts)
704     OS << Count << "\n";
705 
706   uint32_t NumValueKinds = Func.getNumValueKinds();
707   if (!NumValueKinds) {
708     OS << "\n";
709     return;
710   }
711 
712   OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
713   for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
714     uint32_t NS = Func.getNumValueSites(VK);
715     if (!NS)
716       continue;
717     OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
718     OS << "# NumValueSites:\n" << NS << "\n";
719     for (uint32_t S = 0; S < NS; S++) {
720       uint32_t ND = Func.getNumValueDataForSite(VK, S);
721       OS << ND << "\n";
722       std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
723       for (uint32_t I = 0; I < ND; I++) {
724         if (VK == IPVK_IndirectCallTarget)
725           OS << Symtab.getFuncNameOrExternalSymbol(VD[I].Value) << ":"
726              << VD[I].Count << "\n";
727         else
728           OS << VD[I].Value << ":" << VD[I].Count << "\n";
729       }
730     }
731   }
732 
733   OS << "\n";
734 }
735 
736 Error InstrProfWriter::writeText(raw_fd_ostream &OS) {
737   // Check CS first since it implies an IR level profile.
738   if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
739     OS << "# CSIR level Instrumentation Flag\n:csir\n";
740   else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
741     OS << "# IR level Instrumentation Flag\n:ir\n";
742 
743   if (static_cast<bool>(ProfileKind &
744                         InstrProfKind::FunctionEntryInstrumentation))
745     OS << "# Always instrument the function entry block\n:entry_first\n";
746   InstrProfSymtab Symtab;
747 
748   using FuncPair = detail::DenseMapPair<uint64_t, InstrProfRecord>;
749   using RecordType = std::pair<StringRef, FuncPair>;
750   SmallVector<RecordType, 4> OrderedFuncData;
751 
752   for (const auto &I : FunctionData) {
753     if (shouldEncodeData(I.getValue())) {
754       if (Error E = Symtab.addFuncName(I.getKey()))
755         return E;
756       for (const auto &Func : I.getValue())
757         OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
758     }
759   }
760 
761   if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
762     writeTextTemporalProfTraceData(OS, Symtab);
763 
764   llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
765     return std::tie(A.first, A.second.first) <
766            std::tie(B.first, B.second.first);
767   });
768 
769   for (const auto &record : OrderedFuncData) {
770     const StringRef &Name = record.first;
771     const FuncPair &Func = record.second;
772     writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
773   }
774 
775   for (const auto &record : OrderedFuncData) {
776     const FuncPair &Func = record.second;
777     if (Error E = validateRecord(Func.second))
778       return E;
779   }
780 
781   return Error::success();
782 }
783 
784 void InstrProfWriter::writeTextTemporalProfTraceData(raw_fd_ostream &OS,
785                                                      InstrProfSymtab &Symtab) {
786   OS << ":temporal_prof_traces\n";
787   OS << "# Num Temporal Profile Traces:\n" << TemporalProfTraces.size() << "\n";
788   OS << "# Temporal Profile Trace Stream Size:\n"
789      << TemporalProfTraceStreamSize << "\n";
790   for (auto &Trace : TemporalProfTraces) {
791     OS << "# Weight:\n" << Trace.Weight << "\n";
792     for (auto &NameRef : Trace.FunctionNameRefs)
793       OS << Symtab.getFuncName(NameRef) << ",";
794     OS << "\n";
795   }
796   OS << "\n";
797 }
798