xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/MemoryProfileInfo.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===-- MemoryProfileInfo.cpp - memory profile info ------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains utilities to analyze memory profile information.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemoryProfileInfo.h"
14 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Compiler.h"
18 #include "llvm/Support/Format.h"
19 
20 using namespace llvm;
21 using namespace llvm::memprof;
22 
23 #define DEBUG_TYPE "memory-profile-info"
24 
25 cl::opt<bool> MemProfReportHintedSizes(
26     "memprof-report-hinted-sizes", cl::init(false), cl::Hidden,
27     cl::desc("Report total allocation sizes of hinted allocations"));
28 
29 // This is useful if we have enabled reporting of hinted sizes, and want to get
30 // information from the indexing step for all contexts (especially for testing),
31 // or have specified a value less than 100% for -memprof-cloning-cold-threshold.
32 LLVM_ABI cl::opt<bool> MemProfKeepAllNotColdContexts(
33     "memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden,
34     cl::desc("Keep all non-cold contexts (increases cloning overheads)"));
35 
36 cl::opt<unsigned> MinClonedColdBytePercent(
37     "memprof-cloning-cold-threshold", cl::init(100), cl::Hidden,
38     cl::desc("Min percent of cold bytes to hint alloc cold during cloning"));
39 
40 // Discard non-cold contexts if they overlap with much larger cold contexts,
41 // specifically, if all contexts reaching a given callsite are at least this
42 // percent cold byte allocations. This reduces the amount of cloning required
43 // to expose the cold contexts when they greatly dominate non-cold contexts.
44 cl::opt<unsigned> MinCallsiteColdBytePercent(
45     "memprof-callsite-cold-threshold", cl::init(100), cl::Hidden,
46     cl::desc("Min percent of cold bytes at a callsite to discard non-cold "
47              "contexts"));
48 
49 // Enable saving context size information for largest cold contexts, which can
50 // be used to flag contexts for more aggressive cloning and reporting.
51 cl::opt<unsigned> MinPercentMaxColdSize(
52     "memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
53     cl::desc("Min percent of max cold bytes for critical cold context"));
54 
55 bool llvm::memprof::metadataIncludesAllContextSizeInfo() {
56   return MemProfReportHintedSizes || MinClonedColdBytePercent < 100;
57 }
58 
59 bool llvm::memprof::metadataMayIncludeContextSizeInfo() {
60   return metadataIncludesAllContextSizeInfo() || MinPercentMaxColdSize < 100;
61 }
62 
63 bool llvm::memprof::recordContextSizeInfoForAnalysis() {
64   return metadataMayIncludeContextSizeInfo() ||
65          MinCallsiteColdBytePercent < 100;
66 }
67 
68 MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
69                                               LLVMContext &Ctx) {
70   SmallVector<Metadata *, 8> StackVals;
71   StackVals.reserve(CallStack.size());
72   for (auto Id : CallStack) {
73     auto *StackValMD =
74         ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id));
75     StackVals.push_back(StackValMD);
76   }
77   return MDNode::get(Ctx, StackVals);
78 }
79 
80 MDNode *llvm::memprof::getMIBStackNode(const MDNode *MIB) {
81   assert(MIB->getNumOperands() >= 2);
82   // The stack metadata is the first operand of each memprof MIB metadata.
83   return cast<MDNode>(MIB->getOperand(0));
84 }
85 
86 AllocationType llvm::memprof::getMIBAllocType(const MDNode *MIB) {
87   assert(MIB->getNumOperands() >= 2);
88   // The allocation type is currently the second operand of each memprof
89   // MIB metadata. This will need to change as we add additional allocation
90   // types that can be applied based on the allocation profile data.
91   auto *MDS = dyn_cast<MDString>(MIB->getOperand(1));
92   assert(MDS);
93   if (MDS->getString() == "cold") {
94     return AllocationType::Cold;
95   } else if (MDS->getString() == "hot") {
96     return AllocationType::Hot;
97   }
98   return AllocationType::NotCold;
99 }
100 
101 std::string llvm::memprof::getAllocTypeAttributeString(AllocationType Type) {
102   switch (Type) {
103   case AllocationType::NotCold:
104     return "notcold";
105     break;
106   case AllocationType::Cold:
107     return "cold";
108     break;
109   case AllocationType::Hot:
110     return "hot";
111     break;
112   default:
113     assert(false && "Unexpected alloc type");
114   }
115   llvm_unreachable("invalid alloc type");
116 }
117 
118 bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) {
119   const unsigned NumAllocTypes = llvm::popcount(AllocTypes);
120   assert(NumAllocTypes != 0);
121   return NumAllocTypes == 1;
122 }
123 
124 void CallStackTrie::addCallStack(
125     AllocationType AllocType, ArrayRef<uint64_t> StackIds,
126     std::vector<ContextTotalSize> ContextSizeInfo) {
127   bool First = true;
128   CallStackTrieNode *Curr = nullptr;
129   for (auto StackId : StackIds) {
130     //  If this is the first stack frame, add or update alloc node.
131     if (First) {
132       First = false;
133       if (Alloc) {
134         assert(AllocStackId == StackId);
135         Alloc->addAllocType(AllocType);
136       } else {
137         AllocStackId = StackId;
138         Alloc = new CallStackTrieNode(AllocType);
139       }
140       Curr = Alloc;
141       continue;
142     }
143     // Update existing caller node if it exists.
144     auto [Next, Inserted] = Curr->Callers.try_emplace(StackId);
145     if (!Inserted) {
146       Curr = Next->second;
147       Curr->addAllocType(AllocType);
148       continue;
149     }
150     // Otherwise add a new caller node.
151     auto *New = new CallStackTrieNode(AllocType);
152     Next->second = New;
153     Curr = New;
154   }
155   assert(Curr);
156   llvm::append_range(Curr->ContextSizeInfo, ContextSizeInfo);
157 }
158 
159 void CallStackTrie::addCallStack(MDNode *MIB) {
160   MDNode *StackMD = getMIBStackNode(MIB);
161   assert(StackMD);
162   std::vector<uint64_t> CallStack;
163   CallStack.reserve(StackMD->getNumOperands());
164   for (const auto &MIBStackIter : StackMD->operands()) {
165     auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter);
166     assert(StackId);
167     CallStack.push_back(StackId->getZExtValue());
168   }
169   std::vector<ContextTotalSize> ContextSizeInfo;
170   // Collect the context size information if it exists.
171   if (MIB->getNumOperands() > 2) {
172     for (unsigned I = 2; I < MIB->getNumOperands(); I++) {
173       MDNode *ContextSizePair = dyn_cast<MDNode>(MIB->getOperand(I));
174       assert(ContextSizePair->getNumOperands() == 2);
175       uint64_t FullStackId =
176           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(0))
177               ->getZExtValue();
178       uint64_t TotalSize =
179           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(1))
180               ->getZExtValue();
181       ContextSizeInfo.push_back({FullStackId, TotalSize});
182     }
183   }
184   addCallStack(getMIBAllocType(MIB), CallStack, std::move(ContextSizeInfo));
185 }
186 
187 static MDNode *createMIBNode(LLVMContext &Ctx, ArrayRef<uint64_t> MIBCallStack,
188                              AllocationType AllocType,
189                              ArrayRef<ContextTotalSize> ContextSizeInfo,
190                              const uint64_t MaxColdSize, uint64_t &TotalBytes,
191                              uint64_t &ColdBytes) {
192   SmallVector<Metadata *> MIBPayload(
193       {buildCallstackMetadata(MIBCallStack, Ctx)});
194   MIBPayload.push_back(
195       MDString::get(Ctx, getAllocTypeAttributeString(AllocType)));
196 
197   if (ContextSizeInfo.empty()) {
198     // The profile matcher should have provided context size info if there was a
199     // MinCallsiteColdBytePercent < 100. Here we check >=100 to gracefully
200     // handle a user-provided percent larger than 100.
201     assert(MinCallsiteColdBytePercent >= 100);
202     return MDNode::get(Ctx, MIBPayload);
203   }
204 
205   for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
206     TotalBytes += TotalSize;
207     bool LargeColdContext = false;
208     if (AllocType == AllocationType::Cold) {
209       ColdBytes += TotalSize;
210       // If we have the max cold context size from summary information and have
211       // requested identification of contexts above a percentage of the max, see
212       // if this context qualifies.
213       if (MaxColdSize > 0 && MinPercentMaxColdSize < 100 &&
214           TotalSize * 100 >= MaxColdSize * MinPercentMaxColdSize)
215         LargeColdContext = true;
216     }
217     // Only add the context size info as metadata if we need it in the thin
218     // link (currently if reporting of hinted sizes is enabled, we have
219     // specified a threshold for marking allocations cold after cloning, or we
220     // have identified this as a large cold context of interest above).
221     if (metadataIncludesAllContextSizeInfo() || LargeColdContext) {
222       auto *FullStackIdMD = ValueAsMetadata::get(
223           ConstantInt::get(Type::getInt64Ty(Ctx), FullStackId));
224       auto *TotalSizeMD = ValueAsMetadata::get(
225           ConstantInt::get(Type::getInt64Ty(Ctx), TotalSize));
226       auto *ContextSizeMD = MDNode::get(Ctx, {FullStackIdMD, TotalSizeMD});
227       MIBPayload.push_back(ContextSizeMD);
228     }
229   }
230   assert(TotalBytes > 0);
231   return MDNode::get(Ctx, MIBPayload);
232 }
233 
234 void CallStackTrie::collectContextSizeInfo(
235     CallStackTrieNode *Node, std::vector<ContextTotalSize> &ContextSizeInfo) {
236   llvm::append_range(ContextSizeInfo, Node->ContextSizeInfo);
237   for (auto &Caller : Node->Callers)
238     collectContextSizeInfo(Caller.second, ContextSizeInfo);
239 }
240 
241 void CallStackTrie::convertHotToNotCold(CallStackTrieNode *Node) {
242   if (Node->hasAllocType(AllocationType::Hot)) {
243     Node->removeAllocType(AllocationType::Hot);
244     Node->addAllocType(AllocationType::NotCold);
245   }
246   for (auto &Caller : Node->Callers)
247     convertHotToNotCold(Caller.second);
248 }
249 
250 // Copy over some or all of NewMIBNodes to the SavedMIBNodes vector, depending
251 // on options that enable filtering out some NotCold contexts.
252 static void saveFilteredNewMIBNodes(std::vector<Metadata *> &NewMIBNodes,
253                                     std::vector<Metadata *> &SavedMIBNodes,
254                                     unsigned CallerContextLength,
255                                     uint64_t TotalBytes, uint64_t ColdBytes) {
256   const bool MostlyCold =
257       MinCallsiteColdBytePercent < 100 &&
258       ColdBytes * 100 >= MinCallsiteColdBytePercent * TotalBytes;
259 
260   // In the simplest case, with pruning disabled, keep all the new MIB nodes.
261   if (MemProfKeepAllNotColdContexts && !MostlyCold) {
262     append_range(SavedMIBNodes, NewMIBNodes);
263     return;
264   }
265 
266   auto EmitMessageForRemovedContexts = [](const MDNode *MIBMD, StringRef Tag,
267                                           StringRef Extra) {
268     assert(MIBMD->getNumOperands() > 2);
269     for (unsigned I = 2; I < MIBMD->getNumOperands(); I++) {
270       MDNode *ContextSizePair = dyn_cast<MDNode>(MIBMD->getOperand(I));
271       assert(ContextSizePair->getNumOperands() == 2);
272       uint64_t FullStackId =
273           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(0))
274               ->getZExtValue();
275       uint64_t TS =
276           mdconst::dyn_extract<ConstantInt>(ContextSizePair->getOperand(1))
277               ->getZExtValue();
278       errs() << "MemProf hinting: Total size for " << Tag
279              << " non-cold full allocation context hash " << FullStackId
280              << Extra << ": " << TS << "\n";
281     }
282   };
283 
284   // If the cold bytes at the current callsite exceed the given threshold, we
285   // discard all non-cold contexts so do not need any of the later pruning
286   // handling. We can simply copy over all the cold contexts and return early.
287   if (MostlyCold) {
288     auto NewColdMIBNodes =
289         make_filter_range(NewMIBNodes, [&](const Metadata *M) {
290           auto MIBMD = cast<MDNode>(M);
291           // Only append cold contexts.
292           if (getMIBAllocType(MIBMD) == AllocationType::Cold)
293             return true;
294           if (MemProfReportHintedSizes) {
295             const float PercentCold = ColdBytes * 100.0 / TotalBytes;
296             std::string PercentStr;
297             llvm::raw_string_ostream OS(PercentStr);
298             OS << format(" for %5.2f%% cold bytes", PercentCold);
299             EmitMessageForRemovedContexts(MIBMD, "discarded", OS.str());
300           }
301           return false;
302         });
303     for (auto *M : NewColdMIBNodes)
304       SavedMIBNodes.push_back(M);
305     return;
306   }
307 
308   // Prune unneeded NotCold contexts, taking advantage of the fact
309   // that we later will only clone Cold contexts, as NotCold is the allocation
310   // default. We only need to keep as metadata the NotCold contexts that
311   // overlap the longest with Cold allocations, so that we know how deeply we
312   // need to clone. For example, assume we add the following contexts to the
313   // trie:
314   //    1 3 (notcold)
315   //    1 2 4 (cold)
316   //    1 2 5 (notcold)
317   //    1 2 6 (notcold)
318   // the trie looks like:
319   //         1
320   //        / \
321   //       2   3
322   //      /|\
323   //     4 5 6
324   //
325   // It is sufficient to prune all but one not-cold contexts (either 1,2,5 or
326   // 1,2,6, we arbitrarily keep the first one we encounter which will be
327   // 1,2,5).
328   //
329   // To do this pruning, we first check if there were any not-cold
330   // contexts kept for a deeper caller, which will have a context length larger
331   // than the CallerContextLength being handled here (i.e. kept by a deeper
332   // recursion step). If so, none of the not-cold MIB nodes added for the
333   // immediate callers need to be kept. If not, we keep the first (created
334   // for the immediate caller) not-cold MIB node.
335   bool LongerNotColdContextKept = false;
336   for (auto *MIB : NewMIBNodes) {
337     auto MIBMD = cast<MDNode>(MIB);
338     if (getMIBAllocType(MIBMD) == AllocationType::Cold)
339       continue;
340     MDNode *StackMD = getMIBStackNode(MIBMD);
341     assert(StackMD);
342     if (StackMD->getNumOperands() > CallerContextLength) {
343       LongerNotColdContextKept = true;
344       break;
345     }
346   }
347   // Don't need to emit any for the immediate caller if we already have
348   // longer overlapping contexts;
349   bool KeepFirstNewNotCold = !LongerNotColdContextKept;
350   auto NewColdMIBNodes = make_filter_range(NewMIBNodes, [&](const Metadata *M) {
351     auto MIBMD = cast<MDNode>(M);
352     // Only keep cold contexts and first (longest non-cold context).
353     if (getMIBAllocType(MIBMD) != AllocationType::Cold) {
354       MDNode *StackMD = getMIBStackNode(MIBMD);
355       assert(StackMD);
356       // Keep any already kept for longer contexts.
357       if (StackMD->getNumOperands() > CallerContextLength)
358         return true;
359       // Otherwise keep the first one added by the immediate caller if there
360       // were no longer contexts.
361       if (KeepFirstNewNotCold) {
362         KeepFirstNewNotCold = false;
363         return true;
364       }
365       if (MemProfReportHintedSizes)
366         EmitMessageForRemovedContexts(MIBMD, "pruned", "");
367       return false;
368     }
369     return true;
370   });
371   for (auto *M : NewColdMIBNodes)
372     SavedMIBNodes.push_back(M);
373 }
374 
375 // Recursive helper to trim contexts and create metadata nodes.
376 // Caller should have pushed Node's loc to MIBCallStack. Doing this in the
377 // caller makes it simpler to handle the many early returns in this method.
378 // Updates the total and cold profiled bytes in the subtrie rooted at this node.
379 bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
380                                   std::vector<uint64_t> &MIBCallStack,
381                                   std::vector<Metadata *> &MIBNodes,
382                                   bool CalleeHasAmbiguousCallerContext,
383                                   uint64_t &TotalBytes, uint64_t &ColdBytes) {
384   // Trim context below the first node in a prefix with a single alloc type.
385   // Add an MIB record for the current call stack prefix.
386   if (hasSingleAllocType(Node->AllocTypes)) {
387     std::vector<ContextTotalSize> ContextSizeInfo;
388     collectContextSizeInfo(Node, ContextSizeInfo);
389     MIBNodes.push_back(
390         createMIBNode(Ctx, MIBCallStack, (AllocationType)Node->AllocTypes,
391                       ContextSizeInfo, MaxColdSize, TotalBytes, ColdBytes));
392     return true;
393   }
394 
395   // We don't have a single allocation for all the contexts sharing this prefix,
396   // so recursively descend into callers in trie.
397   if (!Node->Callers.empty()) {
398     bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1;
399     bool AddedMIBNodesForAllCallerContexts = true;
400     // Accumulate all new MIB nodes by the recursive calls below into a vector
401     // that will later be filtered before adding to the caller's MIBNodes
402     // vector.
403     std::vector<Metadata *> NewMIBNodes;
404     // Determine the total and cold byte counts for all callers, then add to the
405     // caller's counts further below.
406     uint64_t CallerTotalBytes = 0;
407     uint64_t CallerColdBytes = 0;
408     for (auto &Caller : Node->Callers) {
409       MIBCallStack.push_back(Caller.first);
410       AddedMIBNodesForAllCallerContexts &= buildMIBNodes(
411           Caller.second, Ctx, MIBCallStack, NewMIBNodes,
412           NodeHasAmbiguousCallerContext, CallerTotalBytes, CallerColdBytes);
413       // Remove Caller.
414       MIBCallStack.pop_back();
415     }
416     // Pass in the stack length of the MIB nodes added for the immediate caller,
417     // which is the current stack length plus 1.
418     saveFilteredNewMIBNodes(NewMIBNodes, MIBNodes, MIBCallStack.size() + 1,
419                             CallerTotalBytes, CallerColdBytes);
420     TotalBytes += CallerTotalBytes;
421     ColdBytes += CallerColdBytes;
422 
423     if (AddedMIBNodesForAllCallerContexts)
424       return true;
425     // We expect that the callers should be forced to add MIBs to disambiguate
426     // the context in this case (see below).
427     assert(!NodeHasAmbiguousCallerContext);
428   }
429 
430   // If we reached here, then this node does not have a single allocation type,
431   // and we didn't add metadata for a longer call stack prefix including any of
432   // Node's callers. That means we never hit a single allocation type along all
433   // call stacks with this prefix. This can happen due to recursion collapsing
434   // or the stack being deeper than tracked by the profiler runtime, leading to
435   // contexts with different allocation types being merged. In that case, we
436   // trim the context just below the deepest context split, which is this
437   // node if the callee has an ambiguous caller context (multiple callers),
438   // since the recursive calls above returned false. Conservatively give it
439   // non-cold allocation type.
440   if (!CalleeHasAmbiguousCallerContext)
441     return false;
442   std::vector<ContextTotalSize> ContextSizeInfo;
443   collectContextSizeInfo(Node, ContextSizeInfo);
444   MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold,
445                                    ContextSizeInfo, MaxColdSize, TotalBytes,
446                                    ColdBytes));
447   return true;
448 }
449 
450 void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
451                                                 StringRef Descriptor) {
452   auto AllocTypeString = getAllocTypeAttributeString(AT);
453   auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
454   CI->addFnAttr(A);
455   if (MemProfReportHintedSizes) {
456     std::vector<ContextTotalSize> ContextSizeInfo;
457     collectContextSizeInfo(Alloc, ContextSizeInfo);
458     for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
459       errs() << "MemProf hinting: Total size for full allocation context hash "
460              << FullStackId << " and " << Descriptor << " alloc type "
461              << getAllocTypeAttributeString(AT) << ": " << TotalSize << "\n";
462     }
463   }
464   if (ORE)
465     ORE->emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CI)
466               << ore::NV("AllocationCall", CI) << " in function "
467               << ore::NV("Caller", CI->getFunction())
468               << " marked with memprof allocation attribute "
469               << ore::NV("Attribute", AllocTypeString));
470 }
471 
472 // Build and attach the minimal necessary MIB metadata. If the alloc has a
473 // single allocation type, add a function attribute instead. Returns true if
474 // memprof metadata attached, false if not (attribute added).
475 bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
476   if (hasSingleAllocType(Alloc->AllocTypes)) {
477     addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
478                                 "single");
479     return false;
480   }
481   // If there were any hot allocation contexts, the Alloc trie node would have
482   // the Hot type set. If so, because we don't currently support cloning for hot
483   // contexts, they should be converted to NotCold. This happens in the cloning
484   // support anyway, however, doing this now enables more aggressive context
485   // trimming when building the MIB metadata (and possibly may make the
486   // allocation have a single NotCold allocation type), greatly reducing
487   // overheads in bitcode, cloning memory and cloning time.
488   if (Alloc->hasAllocType(AllocationType::Hot)) {
489     convertHotToNotCold(Alloc);
490     // Check whether we now have a single alloc type.
491     if (hasSingleAllocType(Alloc->AllocTypes)) {
492       addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
493                                   "single");
494       return false;
495     }
496   }
497   auto &Ctx = CI->getContext();
498   std::vector<uint64_t> MIBCallStack;
499   MIBCallStack.push_back(AllocStackId);
500   std::vector<Metadata *> MIBNodes;
501   uint64_t TotalBytes = 0;
502   uint64_t ColdBytes = 0;
503   assert(!Alloc->Callers.empty() && "addCallStack has not been called yet");
504   // The CalleeHasAmbiguousCallerContext flag is meant to say whether the
505   // callee of the given node has more than one caller. Here the node being
506   // passed in is the alloc and it has no callees. So it's false.
507   if (buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes,
508                     /*CalleeHasAmbiguousCallerContext=*/false, TotalBytes,
509                     ColdBytes)) {
510     assert(MIBCallStack.size() == 1 &&
511            "Should only be left with Alloc's location in stack");
512     CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
513     return true;
514   }
515   // If there exists corner case that CallStackTrie has one chain to leaf
516   // and all node in the chain have multi alloc type, conservatively give
517   // it non-cold allocation type.
518   // FIXME: Avoid this case before memory profile created. Alternatively, select
519   // hint based on fraction cold.
520   addSingleAllocTypeAttribute(CI, AllocationType::NotCold, "indistinguishable");
521   return false;
522 }
523 
524 template <>
525 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
526     const MDNode *N, bool End)
527     : N(N) {
528   if (!N)
529     return;
530   Iter = End ? N->op_end() : N->op_begin();
531 }
532 
533 template <>
534 uint64_t
535 CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*() {
536   assert(Iter != N->op_end());
537   ConstantInt *StackIdCInt = mdconst::dyn_extract<ConstantInt>(*Iter);
538   assert(StackIdCInt);
539   return StackIdCInt->getZExtValue();
540 }
541 
542 template <> uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const {
543   assert(N);
544   return mdconst::dyn_extract<ConstantInt>(N->operands().back())
545       ->getZExtValue();
546 }
547 
548 MDNode *MDNode::getMergedMemProfMetadata(MDNode *A, MDNode *B) {
549   // TODO: Support more sophisticated merging, such as selecting the one with
550   // more bytes allocated, or implement support for carrying multiple allocation
551   // leaf contexts. For now, keep the first one.
552   if (A)
553     return A;
554   return B;
555 }
556 
557 MDNode *MDNode::getMergedCallsiteMetadata(MDNode *A, MDNode *B) {
558   // TODO: Support more sophisticated merging, which will require support for
559   // carrying multiple contexts. For now, keep the first one.
560   if (A)
561     return A;
562   return B;
563 }
564