1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implementation of a mapping from arbitrary values to unique 32-bit 10 // identifiers. 11 //===----------------------------------------------------------------------===// 12 13 #ifndef SANITIZER_STACKDEPOTBASE_H 14 #define SANITIZER_STACKDEPOTBASE_H 15 16 #include <stdio.h> 17 18 #include "sanitizer_atomic.h" 19 #include "sanitizer_flat_map.h" 20 #include "sanitizer_internal_defs.h" 21 #include "sanitizer_mutex.h" 22 23 namespace __sanitizer { 24 25 template <class Node, int kReservedBits, int kTabSizeLog> 26 class StackDepotBase { 27 static constexpr u32 kIdSizeLog = 28 sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */); 29 static constexpr u32 kNodesSize1Log = kIdSizeLog / 2; 30 static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log; 31 static constexpr int kTabSize = 1 << kTabSizeLog; // Hash table size. 32 static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1; 33 static constexpr u32 kLockMask = ~kUnlockMask; 34 35 public: 36 typedef typename Node::args_type args_type; 37 typedef typename Node::handle_type handle_type; 38 typedef typename Node::hash_type hash_type; 39 40 static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log; 41 static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log; 42 43 // Maps stack trace to an unique id. 44 u32 Put(args_type args, bool *inserted = nullptr); 45 // Retrieves a stored stack trace by the id. 46 args_type Get(u32 id); 47 48 StackDepotStats GetStats() const { 49 return { 50 atomic_load_relaxed(&n_uniq_ids), 51 nodes.MemoryUsage() + Node::allocated(), 52 }; 53 } 54 55 void LockAll(); 56 void UnlockAll(); 57 void PrintAll(); 58 59 void TestOnlyUnmap() { 60 nodes.TestOnlyUnmap(); 61 internal_memset(this, 0, sizeof(*this)); 62 } 63 64 private: 65 friend Node; 66 u32 find(u32 s, args_type args, hash_type hash) const; 67 static u32 lock(atomic_uint32_t *p); 68 static void unlock(atomic_uint32_t *p, u32 s); 69 atomic_uint32_t tab[kTabSize]; // Hash table of Node's. 70 71 atomic_uint32_t n_uniq_ids; 72 73 TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes; 74 75 friend class StackDepotReverseMap; 76 }; 77 78 template <class Node, int kReservedBits, int kTabSizeLog> 79 u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find( 80 u32 s, args_type args, hash_type hash) const { 81 // Searches linked list s for the stack, returns its id. 82 for (; s;) { 83 const Node &node = nodes[s]; 84 if (node.eq(hash, args)) 85 return s; 86 s = node.link; 87 } 88 return 0; 89 } 90 91 template <class Node, int kReservedBits, int kTabSizeLog> 92 u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) { 93 // Uses the pointer lsb as mutex. 94 for (int i = 0;; i++) { 95 u32 cmp = atomic_load(p, memory_order_relaxed); 96 if ((cmp & kLockMask) == 0 && 97 atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask, 98 memory_order_acquire)) 99 return cmp; 100 if (i < 10) 101 proc_yield(10); 102 else 103 internal_sched_yield(); 104 } 105 } 106 107 template <class Node, int kReservedBits, int kTabSizeLog> 108 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock( 109 atomic_uint32_t *p, u32 s) { 110 DCHECK_EQ(s & kLockMask, 0); 111 atomic_store(p, s, memory_order_release); 112 } 113 114 template <class Node, int kReservedBits, int kTabSizeLog> 115 u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args, 116 bool *inserted) { 117 if (inserted) 118 *inserted = false; 119 if (!LIKELY(Node::is_valid(args))) 120 return 0; 121 hash_type h = Node::hash(args); 122 atomic_uint32_t *p = &tab[h % kTabSize]; 123 u32 v = atomic_load(p, memory_order_consume); 124 u32 s = v & kUnlockMask; 125 // First, try to find the existing stack. 126 u32 node = find(s, args, h); 127 if (LIKELY(node)) 128 return node; 129 130 // If failed, lock, retry and insert new. 131 u32 s2 = lock(p); 132 if (s2 != s) { 133 node = find(s2, args, h); 134 if (node) { 135 unlock(p, s2); 136 return node; 137 } 138 } 139 s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1; 140 CHECK_EQ(s & kUnlockMask, s); 141 CHECK_EQ(s & (((u32)-1) >> kReservedBits), s); 142 Node &new_node = nodes[s]; 143 new_node.store(s, args, h); 144 new_node.link = s2; 145 unlock(p, s); 146 if (inserted) *inserted = true; 147 return s; 148 } 149 150 template <class Node, int kReservedBits, int kTabSizeLog> 151 typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type 152 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) { 153 if (id == 0) 154 return args_type(); 155 CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); 156 if (!nodes.contains(id)) 157 return args_type(); 158 const Node &node = nodes[id]; 159 return node.load(id); 160 } 161 162 template <class Node, int kReservedBits, int kTabSizeLog> 163 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() { 164 for (int i = 0; i < kTabSize; ++i) { 165 lock(&tab[i]); 166 } 167 } 168 169 template <class Node, int kReservedBits, int kTabSizeLog> 170 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() { 171 for (int i = 0; i < kTabSize; ++i) { 172 atomic_uint32_t *p = &tab[i]; 173 uptr s = atomic_load(p, memory_order_relaxed); 174 unlock(p, s & kUnlockMask); 175 } 176 } 177 178 template <class Node, int kReservedBits, int kTabSizeLog> 179 void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() { 180 for (int i = 0; i < kTabSize; ++i) { 181 atomic_uint32_t *p = &tab[i]; 182 u32 s = atomic_load(p, memory_order_consume) & kUnlockMask; 183 for (; s;) { 184 const Node &node = nodes[s]; 185 Printf("Stack for id %u:\n", s); 186 node.load(s).Print(); 187 s = node.link; 188 } 189 } 190 } 191 192 } // namespace __sanitizer 193 194 #endif // SANITIZER_STACKDEPOTBASE_H 195