1 //===-- MemoryProfileInfo.cpp - memory profile info ------------------------==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains utilities to analyze memory profile information. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/MemoryProfileInfo.h" 14 #include "llvm/Support/CommandLine.h" 15 16 using namespace llvm; 17 using namespace llvm::memprof; 18 19 #define DEBUG_TYPE "memory-profile-info" 20 21 // Upper bound on accesses per byte for marking an allocation cold. 22 cl::opt<float> MemProfAccessesPerByteColdThreshold( 23 "memprof-accesses-per-byte-cold-threshold", cl::init(10.0), cl::Hidden, 24 cl::desc("The threshold the accesses per byte must be under to consider " 25 "an allocation cold")); 26 27 // Lower bound on lifetime to mark an allocation cold (in addition to accesses 28 // per byte above). This is to avoid pessimizing short lived objects. 29 cl::opt<unsigned> MemProfMinLifetimeColdThreshold( 30 "memprof-min-lifetime-cold-threshold", cl::init(200), cl::Hidden, 31 cl::desc("The minimum lifetime (s) for an allocation to be considered " 32 "cold")); 33 34 AllocationType llvm::memprof::getAllocType(uint64_t MaxAccessCount, 35 uint64_t MinSize, 36 uint64_t MinLifetime) { 37 if (((float)MaxAccessCount) / MinSize < MemProfAccessesPerByteColdThreshold && 38 // MinLifetime is expected to be in ms, so convert the threshold to ms. 39 MinLifetime >= MemProfMinLifetimeColdThreshold * 1000) 40 return AllocationType::Cold; 41 return AllocationType::NotCold; 42 } 43 44 MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack, 45 LLVMContext &Ctx) { 46 std::vector<Metadata *> StackVals; 47 for (auto Id : CallStack) { 48 auto *StackValMD = 49 ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id)); 50 StackVals.push_back(StackValMD); 51 } 52 return MDNode::get(Ctx, StackVals); 53 } 54 55 MDNode *llvm::memprof::getMIBStackNode(const MDNode *MIB) { 56 assert(MIB->getNumOperands() == 2); 57 // The stack metadata is the first operand of each memprof MIB metadata. 58 return cast<MDNode>(MIB->getOperand(0)); 59 } 60 61 AllocationType llvm::memprof::getMIBAllocType(const MDNode *MIB) { 62 assert(MIB->getNumOperands() == 2); 63 // The allocation type is currently the second operand of each memprof 64 // MIB metadata. This will need to change as we add additional allocation 65 // types that can be applied based on the allocation profile data. 66 auto *MDS = dyn_cast<MDString>(MIB->getOperand(1)); 67 assert(MDS); 68 if (MDS->getString().equals("cold")) 69 return AllocationType::Cold; 70 return AllocationType::NotCold; 71 } 72 73 static std::string getAllocTypeAttributeString(AllocationType Type) { 74 switch (Type) { 75 case AllocationType::NotCold: 76 return "notcold"; 77 break; 78 case AllocationType::Cold: 79 return "cold"; 80 break; 81 default: 82 assert(false && "Unexpected alloc type"); 83 } 84 llvm_unreachable("invalid alloc type"); 85 } 86 87 static void addAllocTypeAttribute(LLVMContext &Ctx, CallBase *CI, 88 AllocationType AllocType) { 89 auto AllocTypeString = getAllocTypeAttributeString(AllocType); 90 auto A = llvm::Attribute::get(Ctx, "memprof", AllocTypeString); 91 CI->addFnAttr(A); 92 } 93 94 static bool hasSingleAllocType(uint8_t AllocTypes) { 95 const unsigned NumAllocTypes = llvm::popcount(AllocTypes); 96 assert(NumAllocTypes != 0); 97 return NumAllocTypes == 1; 98 } 99 100 void CallStackTrie::addCallStack(AllocationType AllocType, 101 ArrayRef<uint64_t> StackIds) { 102 bool First = true; 103 CallStackTrieNode *Curr = nullptr; 104 for (auto StackId : StackIds) { 105 // If this is the first stack frame, add or update alloc node. 106 if (First) { 107 First = false; 108 if (Alloc) { 109 assert(AllocStackId == StackId); 110 Alloc->AllocTypes |= static_cast<uint8_t>(AllocType); 111 } else { 112 AllocStackId = StackId; 113 Alloc = new CallStackTrieNode(AllocType); 114 } 115 Curr = Alloc; 116 continue; 117 } 118 // Update existing caller node if it exists. 119 auto Next = Curr->Callers.find(StackId); 120 if (Next != Curr->Callers.end()) { 121 Curr = Next->second; 122 Curr->AllocTypes |= static_cast<uint8_t>(AllocType); 123 continue; 124 } 125 // Otherwise add a new caller node. 126 auto *New = new CallStackTrieNode(AllocType); 127 Curr->Callers[StackId] = New; 128 Curr = New; 129 } 130 assert(Curr); 131 } 132 133 void CallStackTrie::addCallStack(MDNode *MIB) { 134 MDNode *StackMD = getMIBStackNode(MIB); 135 assert(StackMD); 136 std::vector<uint64_t> CallStack; 137 CallStack.reserve(StackMD->getNumOperands()); 138 for (const auto &MIBStackIter : StackMD->operands()) { 139 auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter); 140 assert(StackId); 141 CallStack.push_back(StackId->getZExtValue()); 142 } 143 addCallStack(getMIBAllocType(MIB), CallStack); 144 } 145 146 static MDNode *createMIBNode(LLVMContext &Ctx, 147 std::vector<uint64_t> &MIBCallStack, 148 AllocationType AllocType) { 149 std::vector<Metadata *> MIBPayload( 150 {buildCallstackMetadata(MIBCallStack, Ctx)}); 151 MIBPayload.push_back( 152 MDString::get(Ctx, getAllocTypeAttributeString(AllocType))); 153 return MDNode::get(Ctx, MIBPayload); 154 } 155 156 // Recursive helper to trim contexts and create metadata nodes. 157 // Caller should have pushed Node's loc to MIBCallStack. Doing this in the 158 // caller makes it simpler to handle the many early returns in this method. 159 bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx, 160 std::vector<uint64_t> &MIBCallStack, 161 std::vector<Metadata *> &MIBNodes, 162 bool CalleeHasAmbiguousCallerContext) { 163 // Trim context below the first node in a prefix with a single alloc type. 164 // Add an MIB record for the current call stack prefix. 165 if (hasSingleAllocType(Node->AllocTypes)) { 166 MIBNodes.push_back( 167 createMIBNode(Ctx, MIBCallStack, (AllocationType)Node->AllocTypes)); 168 return true; 169 } 170 171 // We don't have a single allocation for all the contexts sharing this prefix, 172 // so recursively descend into callers in trie. 173 if (!Node->Callers.empty()) { 174 bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1; 175 bool AddedMIBNodesForAllCallerContexts = true; 176 for (auto &Caller : Node->Callers) { 177 MIBCallStack.push_back(Caller.first); 178 AddedMIBNodesForAllCallerContexts &= 179 buildMIBNodes(Caller.second, Ctx, MIBCallStack, MIBNodes, 180 NodeHasAmbiguousCallerContext); 181 // Remove Caller. 182 MIBCallStack.pop_back(); 183 } 184 if (AddedMIBNodesForAllCallerContexts) 185 return true; 186 // We expect that the callers should be forced to add MIBs to disambiguate 187 // the context in this case (see below). 188 assert(!NodeHasAmbiguousCallerContext); 189 } 190 191 // If we reached here, then this node does not have a single allocation type, 192 // and we didn't add metadata for a longer call stack prefix including any of 193 // Node's callers. That means we never hit a single allocation type along all 194 // call stacks with this prefix. This can happen due to recursion collapsing 195 // or the stack being deeper than tracked by the profiler runtime, leading to 196 // contexts with different allocation types being merged. In that case, we 197 // trim the context just below the deepest context split, which is this 198 // node if the callee has an ambiguous caller context (multiple callers), 199 // since the recursive calls above returned false. Conservatively give it 200 // non-cold allocation type. 201 if (!CalleeHasAmbiguousCallerContext) 202 return false; 203 MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold)); 204 return true; 205 } 206 207 // Build and attach the minimal necessary MIB metadata. If the alloc has a 208 // single allocation type, add a function attribute instead. Returns true if 209 // memprof metadata attached, false if not (attribute added). 210 bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) { 211 auto &Ctx = CI->getContext(); 212 if (hasSingleAllocType(Alloc->AllocTypes)) { 213 addAllocTypeAttribute(Ctx, CI, (AllocationType)Alloc->AllocTypes); 214 return false; 215 } 216 std::vector<uint64_t> MIBCallStack; 217 MIBCallStack.push_back(AllocStackId); 218 std::vector<Metadata *> MIBNodes; 219 assert(!Alloc->Callers.empty() && "addCallStack has not been called yet"); 220 buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes, 221 /*CalleeHasAmbiguousCallerContext=*/true); 222 assert(MIBCallStack.size() == 1 && 223 "Should only be left with Alloc's location in stack"); 224 CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes)); 225 return true; 226 } 227 228 template <> 229 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator( 230 const MDNode *N, bool End) 231 : N(N) { 232 if (!N) 233 return; 234 Iter = End ? N->op_end() : N->op_begin(); 235 } 236 237 template <> 238 uint64_t 239 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*() { 240 assert(Iter != N->op_end()); 241 ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter); 242 assert(StackIdCInt); 243 return StackIdCInt->getZExtValue(); 244 } 245