1 #include "llvm/ProfileData/MemProf.h" 2 #include "llvm/ADT/SmallVector.h" 3 #include "llvm/IR/Function.h" 4 #include "llvm/ProfileData/InstrProf.h" 5 #include "llvm/Support/Endian.h" 6 #include "llvm/Support/EndianStream.h" 7 8 namespace llvm { 9 namespace memprof { 10 11 void IndexedMemProfRecord::serialize(const MemProfSchema &Schema, 12 raw_ostream &OS) { 13 using namespace support; 14 15 endian::Writer LE(OS, little); 16 17 LE.write<uint64_t>(AllocSites.size()); 18 for (const IndexedAllocationInfo &N : AllocSites) { 19 LE.write<uint64_t>(N.CallStack.size()); 20 for (const FrameId &Id : N.CallStack) 21 LE.write<FrameId>(Id); 22 N.Info.serialize(Schema, OS); 23 } 24 25 // Related contexts. 26 LE.write<uint64_t>(CallSites.size()); 27 for (const auto &Frames : CallSites) { 28 LE.write<uint64_t>(Frames.size()); 29 for (const FrameId &Id : Frames) 30 LE.write<FrameId>(Id); 31 } 32 } 33 34 IndexedMemProfRecord 35 IndexedMemProfRecord::deserialize(const MemProfSchema &Schema, 36 const unsigned char *Ptr) { 37 using namespace support; 38 39 IndexedMemProfRecord Record; 40 41 // Read the meminfo nodes. 42 const uint64_t NumNodes = endian::readNext<uint64_t, little, unaligned>(Ptr); 43 for (uint64_t I = 0; I < NumNodes; I++) { 44 IndexedAllocationInfo Node; 45 const uint64_t NumFrames = 46 endian::readNext<uint64_t, little, unaligned>(Ptr); 47 for (uint64_t J = 0; J < NumFrames; J++) { 48 const FrameId Id = endian::readNext<FrameId, little, unaligned>(Ptr); 49 Node.CallStack.push_back(Id); 50 } 51 Node.Info.deserialize(Schema, Ptr); 52 Ptr += PortableMemInfoBlock::serializedSize(); 53 Record.AllocSites.push_back(Node); 54 } 55 56 // Read the callsite information. 57 const uint64_t NumCtxs = endian::readNext<uint64_t, little, unaligned>(Ptr); 58 for (uint64_t J = 0; J < NumCtxs; J++) { 59 const uint64_t NumFrames = 60 endian::readNext<uint64_t, little, unaligned>(Ptr); 61 llvm::SmallVector<FrameId> Frames; 62 Frames.reserve(NumFrames); 63 for (uint64_t K = 0; K < NumFrames; K++) { 64 const FrameId Id = endian::readNext<FrameId, little, unaligned>(Ptr); 65 Frames.push_back(Id); 66 } 67 Record.CallSites.push_back(Frames); 68 } 69 70 return Record; 71 } 72 73 GlobalValue::GUID IndexedMemProfRecord::getGUID(const StringRef FunctionName) { 74 const auto Pos = FunctionName.find(".llvm."); 75 76 // We use the function guid which we expect to be a uint64_t. At 77 // this time, it is the lower 64 bits of the md5 of the function 78 // name. Any suffix with .llvm. is trimmed since these are added by 79 // thinLTO global promotion. At the time the profile is consumed, 80 // these suffixes will not be present. 81 return Function::getGUID(FunctionName.take_front(Pos)); 82 } 83 84 Expected<MemProfSchema> readMemProfSchema(const unsigned char *&Buffer) { 85 using namespace support; 86 87 const unsigned char *Ptr = Buffer; 88 const uint64_t NumSchemaIds = 89 endian::readNext<uint64_t, little, unaligned>(Ptr); 90 if (NumSchemaIds > static_cast<uint64_t>(Meta::Size)) { 91 return make_error<InstrProfError>(instrprof_error::malformed, 92 "memprof schema invalid"); 93 } 94 95 MemProfSchema Result; 96 for (size_t I = 0; I < NumSchemaIds; I++) { 97 const uint64_t Tag = endian::readNext<uint64_t, little, unaligned>(Ptr); 98 if (Tag >= static_cast<uint64_t>(Meta::Size)) { 99 return make_error<InstrProfError>(instrprof_error::malformed, 100 "memprof schema invalid"); 101 } 102 Result.push_back(static_cast<Meta>(Tag)); 103 } 104 // Advace the buffer to one past the schema if we succeeded. 105 Buffer = Ptr; 106 return Result; 107 } 108 109 } // namespace memprof 110 } // namespace llvm 111