1 //===-- MemoryProfileInfo.cpp - memory profile info ------------------------==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains utilities to analyze memory profile information. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/MemoryProfileInfo.h" 14 #include "llvm/Support/CommandLine.h" 15 16 using namespace llvm; 17 using namespace llvm::memprof; 18 19 #define DEBUG_TYPE "memory-profile-info" 20 21 // Upper bound on lifetime access density (accesses per byte per lifetime sec) 22 // for marking an allocation cold. 23 cl::opt<float> MemProfLifetimeAccessDensityColdThreshold( 24 "memprof-lifetime-access-density-cold-threshold", cl::init(0.05), 25 cl::Hidden, 26 cl::desc("The threshold the lifetime access density (accesses per byte per " 27 "lifetime sec) must be under to consider an allocation cold")); 28 29 // Lower bound on lifetime to mark an allocation cold (in addition to accesses 30 // per byte per sec above). This is to avoid pessimizing short lived objects. 31 cl::opt<unsigned> MemProfAveLifetimeColdThreshold( 32 "memprof-ave-lifetime-cold-threshold", cl::init(200), cl::Hidden, 33 cl::desc("The average lifetime (s) for an allocation to be considered " 34 "cold")); 35 36 // Lower bound on average lifetime accesses density (total life time access 37 // density / alloc count) for marking an allocation hot. 38 cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold( 39 "memprof-min-ave-lifetime-access-density-hot-threshold", cl::init(1000), 40 cl::Hidden, 41 cl::desc("The minimum TotalLifetimeAccessDensity / AllocCount for an " 42 "allocation to be considered hot")); 43 44 cl::opt<bool> MemProfReportHintedSizes( 45 "memprof-report-hinted-sizes", cl::init(false), cl::Hidden, 46 cl::desc("Report total allocation sizes of hinted allocations")); 47 48 AllocationType llvm::memprof::getAllocType(uint64_t TotalLifetimeAccessDensity, 49 uint64_t AllocCount, 50 uint64_t TotalLifetime) { 51 // The access densities are multiplied by 100 to hold 2 decimal places of 52 // precision, so need to divide by 100. 53 if (((float)TotalLifetimeAccessDensity) / AllocCount / 100 < 54 MemProfLifetimeAccessDensityColdThreshold 55 // Lifetime is expected to be in ms, so convert the threshold to ms. 56 && ((float)TotalLifetime) / AllocCount >= 57 MemProfAveLifetimeColdThreshold * 1000) 58 return AllocationType::Cold; 59 60 // The access densities are multiplied by 100 to hold 2 decimal places of 61 // precision, so need to divide by 100. 62 if (((float)TotalLifetimeAccessDensity) / AllocCount / 100 > 63 MemProfMinAveLifetimeAccessDensityHotThreshold) 64 return AllocationType::Hot; 65 66 return AllocationType::NotCold; 67 } 68 69 MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack, 70 LLVMContext &Ctx) { 71 std::vector<Metadata *> StackVals; 72 for (auto Id : CallStack) { 73 auto *StackValMD = 74 ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id)); 75 StackVals.push_back(StackValMD); 76 } 77 return MDNode::get(Ctx, StackVals); 78 } 79 80 MDNode *llvm::memprof::getMIBStackNode(const MDNode *MIB) { 81 assert(MIB->getNumOperands() >= 2); 82 // The stack metadata is the first operand of each memprof MIB metadata. 83 return cast<MDNode>(MIB->getOperand(0)); 84 } 85 86 AllocationType llvm::memprof::getMIBAllocType(const MDNode *MIB) { 87 assert(MIB->getNumOperands() >= 2); 88 // The allocation type is currently the second operand of each memprof 89 // MIB metadata. This will need to change as we add additional allocation 90 // types that can be applied based on the allocation profile data. 91 auto *MDS = dyn_cast<MDString>(MIB->getOperand(1)); 92 assert(MDS); 93 if (MDS->getString() == "cold") { 94 return AllocationType::Cold; 95 } else if (MDS->getString() == "hot") { 96 return AllocationType::Hot; 97 } 98 return AllocationType::NotCold; 99 } 100 101 uint64_t llvm::memprof::getMIBTotalSize(const MDNode *MIB) { 102 if (MIB->getNumOperands() < 3) 103 return 0; 104 return mdconst::dyn_extract<ConstantInt>(MIB->getOperand(2))->getZExtValue(); 105 } 106 107 std::string llvm::memprof::getAllocTypeAttributeString(AllocationType Type) { 108 switch (Type) { 109 case AllocationType::NotCold: 110 return "notcold"; 111 break; 112 case AllocationType::Cold: 113 return "cold"; 114 break; 115 case AllocationType::Hot: 116 return "hot"; 117 break; 118 default: 119 assert(false && "Unexpected alloc type"); 120 } 121 llvm_unreachable("invalid alloc type"); 122 } 123 124 static void addAllocTypeAttribute(LLVMContext &Ctx, CallBase *CI, 125 AllocationType AllocType) { 126 auto AllocTypeString = getAllocTypeAttributeString(AllocType); 127 auto A = llvm::Attribute::get(Ctx, "memprof", AllocTypeString); 128 CI->addFnAttr(A); 129 } 130 131 bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) { 132 const unsigned NumAllocTypes = llvm::popcount(AllocTypes); 133 assert(NumAllocTypes != 0); 134 return NumAllocTypes == 1; 135 } 136 137 void CallStackTrie::addCallStack(AllocationType AllocType, 138 ArrayRef<uint64_t> StackIds, 139 uint64_t TotalSize) { 140 bool First = true; 141 CallStackTrieNode *Curr = nullptr; 142 for (auto StackId : StackIds) { 143 // If this is the first stack frame, add or update alloc node. 144 if (First) { 145 First = false; 146 if (Alloc) { 147 assert(AllocStackId == StackId); 148 Alloc->AllocTypes |= static_cast<uint8_t>(AllocType); 149 Alloc->TotalSize += TotalSize; 150 } else { 151 AllocStackId = StackId; 152 Alloc = new CallStackTrieNode(AllocType, TotalSize); 153 } 154 Curr = Alloc; 155 continue; 156 } 157 // Update existing caller node if it exists. 158 auto Next = Curr->Callers.find(StackId); 159 if (Next != Curr->Callers.end()) { 160 Curr = Next->second; 161 Curr->AllocTypes |= static_cast<uint8_t>(AllocType); 162 Curr->TotalSize += TotalSize; 163 continue; 164 } 165 // Otherwise add a new caller node. 166 auto *New = new CallStackTrieNode(AllocType, TotalSize); 167 Curr->Callers[StackId] = New; 168 Curr = New; 169 } 170 assert(Curr); 171 } 172 173 void CallStackTrie::addCallStack(MDNode *MIB) { 174 MDNode *StackMD = getMIBStackNode(MIB); 175 assert(StackMD); 176 std::vector<uint64_t> CallStack; 177 CallStack.reserve(StackMD->getNumOperands()); 178 for (const auto &MIBStackIter : StackMD->operands()) { 179 auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter); 180 assert(StackId); 181 CallStack.push_back(StackId->getZExtValue()); 182 } 183 addCallStack(getMIBAllocType(MIB), CallStack, getMIBTotalSize(MIB)); 184 } 185 186 static MDNode *createMIBNode(LLVMContext &Ctx, 187 std::vector<uint64_t> &MIBCallStack, 188 AllocationType AllocType, uint64_t TotalSize) { 189 std::vector<Metadata *> MIBPayload( 190 {buildCallstackMetadata(MIBCallStack, Ctx)}); 191 MIBPayload.push_back( 192 MDString::get(Ctx, getAllocTypeAttributeString(AllocType))); 193 if (TotalSize) 194 MIBPayload.push_back(ValueAsMetadata::get( 195 ConstantInt::get(Type::getInt64Ty(Ctx), TotalSize))); 196 return MDNode::get(Ctx, MIBPayload); 197 } 198 199 // Recursive helper to trim contexts and create metadata nodes. 200 // Caller should have pushed Node's loc to MIBCallStack. Doing this in the 201 // caller makes it simpler to handle the many early returns in this method. 202 bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx, 203 std::vector<uint64_t> &MIBCallStack, 204 std::vector<Metadata *> &MIBNodes, 205 bool CalleeHasAmbiguousCallerContext) { 206 // Trim context below the first node in a prefix with a single alloc type. 207 // Add an MIB record for the current call stack prefix. 208 if (hasSingleAllocType(Node->AllocTypes)) { 209 MIBNodes.push_back(createMIBNode( 210 Ctx, MIBCallStack, (AllocationType)Node->AllocTypes, Node->TotalSize)); 211 return true; 212 } 213 214 // We don't have a single allocation for all the contexts sharing this prefix, 215 // so recursively descend into callers in trie. 216 if (!Node->Callers.empty()) { 217 bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1; 218 bool AddedMIBNodesForAllCallerContexts = true; 219 for (auto &Caller : Node->Callers) { 220 MIBCallStack.push_back(Caller.first); 221 AddedMIBNodesForAllCallerContexts &= 222 buildMIBNodes(Caller.second, Ctx, MIBCallStack, MIBNodes, 223 NodeHasAmbiguousCallerContext); 224 // Remove Caller. 225 MIBCallStack.pop_back(); 226 } 227 if (AddedMIBNodesForAllCallerContexts) 228 return true; 229 // We expect that the callers should be forced to add MIBs to disambiguate 230 // the context in this case (see below). 231 assert(!NodeHasAmbiguousCallerContext); 232 } 233 234 // If we reached here, then this node does not have a single allocation type, 235 // and we didn't add metadata for a longer call stack prefix including any of 236 // Node's callers. That means we never hit a single allocation type along all 237 // call stacks with this prefix. This can happen due to recursion collapsing 238 // or the stack being deeper than tracked by the profiler runtime, leading to 239 // contexts with different allocation types being merged. In that case, we 240 // trim the context just below the deepest context split, which is this 241 // node if the callee has an ambiguous caller context (multiple callers), 242 // since the recursive calls above returned false. Conservatively give it 243 // non-cold allocation type. 244 if (!CalleeHasAmbiguousCallerContext) 245 return false; 246 MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold, 247 Node->TotalSize)); 248 return true; 249 } 250 251 // Build and attach the minimal necessary MIB metadata. If the alloc has a 252 // single allocation type, add a function attribute instead. Returns true if 253 // memprof metadata attached, false if not (attribute added). 254 bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) { 255 auto &Ctx = CI->getContext(); 256 if (hasSingleAllocType(Alloc->AllocTypes)) { 257 addAllocTypeAttribute(Ctx, CI, (AllocationType)Alloc->AllocTypes); 258 if (MemProfReportHintedSizes) { 259 assert(Alloc->TotalSize); 260 errs() << "Total size for allocation with location hash " << AllocStackId 261 << " and single alloc type " 262 << getAllocTypeAttributeString((AllocationType)Alloc->AllocTypes) 263 << ": " << Alloc->TotalSize << "\n"; 264 } 265 return false; 266 } 267 std::vector<uint64_t> MIBCallStack; 268 MIBCallStack.push_back(AllocStackId); 269 std::vector<Metadata *> MIBNodes; 270 assert(!Alloc->Callers.empty() && "addCallStack has not been called yet"); 271 // The last parameter is meant to say whether the callee of the given node 272 // has more than one caller. Here the node being passed in is the alloc 273 // and it has no callees. So it's false. 274 if (buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes, false)) { 275 assert(MIBCallStack.size() == 1 && 276 "Should only be left with Alloc's location in stack"); 277 CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes)); 278 return true; 279 } 280 // If there exists corner case that CallStackTrie has one chain to leaf 281 // and all node in the chain have multi alloc type, conservatively give 282 // it non-cold allocation type. 283 // FIXME: Avoid this case before memory profile created. 284 addAllocTypeAttribute(Ctx, CI, AllocationType::NotCold); 285 return false; 286 } 287 288 template <> 289 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator( 290 const MDNode *N, bool End) 291 : N(N) { 292 if (!N) 293 return; 294 Iter = End ? N->op_end() : N->op_begin(); 295 } 296 297 template <> 298 uint64_t 299 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*() { 300 assert(Iter != N->op_end()); 301 ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter); 302 assert(StackIdCInt); 303 return StackIdCInt->getZExtValue(); 304 } 305 306 template <> uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const { 307 assert(N); 308 return mdconst::dyn_extract<ConstantInt>(N->operands().back()) 309 ->getZExtValue(); 310 } 311