xref: /freebsd/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h (revision 6ba2210ee039f2f12878c217bcf058e9c8b26b29)
1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Concurrent uptr->T hashmap.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef SANITIZER_ADDRHASHMAP_H
14 #define SANITIZER_ADDRHASHMAP_H
15 
16 #include "sanitizer_common.h"
17 #include "sanitizer_mutex.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_allocator_internal.h"
20 
21 namespace __sanitizer {
22 
23 // Concurrent uptr->T hashmap.
24 // T must be a POD type, kSize is preferably a prime but can be any number.
25 // Usage example:
26 //
27 // typedef AddrHashMap<uptr, 11> Map;
28 // Map m;
29 // {
30 //   Map::Handle h(&m, addr);
31 //   use h.operator->() to access the data
32 //   if h.created() then the element was just created, and the current thread
33 //     has exclusive access to it
34 //   otherwise the current thread has only read access to the data
35 // }
36 // {
37 //   Map::Handle h(&m, addr, true);
38 //   this will remove the data from the map in Handle dtor
39 //   the current thread has exclusive access to the data
40 //   if !h.exists() then the element never existed
41 // }
42 template<typename T, uptr kSize>
43 class AddrHashMap {
44  private:
45   struct Cell {
46     atomic_uintptr_t addr;
47     T                val;
48   };
49 
50   struct AddBucket {
51     uptr cap;
52     uptr size;
53     Cell cells[1];  // variable len
54   };
55 
56   static const uptr kBucketSize = 3;
57 
58   struct Bucket {
59     RWMutex          mtx;
60     atomic_uintptr_t add;
61     Cell             cells[kBucketSize];
62   };
63 
64  public:
65   AddrHashMap();
66 
67   class Handle {
68    public:
69     Handle(AddrHashMap<T, kSize> *map, uptr addr);
70     Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
71     Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
72 
73     ~Handle();
74     T *operator->();
75     T &operator*();
76     const T &operator*() const;
77     bool created() const;
78     bool exists() const;
79 
80    private:
81     friend AddrHashMap<T, kSize>;
82     AddrHashMap<T, kSize> *map_;
83     Bucket                *bucket_;
84     Cell                  *cell_;
85     uptr                   addr_;
86     uptr                   addidx_;
87     bool                   created_;
88     bool                   remove_;
89     bool                   create_;
90   };
91 
92  private:
93   friend class Handle;
94   Bucket *table_;
95 
96   void acquire(Handle *h);
97   void release(Handle *h);
98   uptr calcHash(uptr addr);
99 };
100 
101 template<typename T, uptr kSize>
102 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
103   map_ = map;
104   addr_ = addr;
105   remove_ = false;
106   create_ = true;
107   map_->acquire(this);
108 }
109 
110 template<typename T, uptr kSize>
111 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
112     bool remove) {
113   map_ = map;
114   addr_ = addr;
115   remove_ = remove;
116   create_ = true;
117   map_->acquire(this);
118 }
119 
120 template<typename T, uptr kSize>
121 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
122     bool remove, bool create) {
123   map_ = map;
124   addr_ = addr;
125   remove_ = remove;
126   create_ = create;
127   map_->acquire(this);
128 }
129 
130 template<typename T, uptr kSize>
131 AddrHashMap<T, kSize>::Handle::~Handle() {
132   map_->release(this);
133 }
134 
135 template <typename T, uptr kSize>
136 T *AddrHashMap<T, kSize>::Handle::operator->() {
137   return &cell_->val;
138 }
139 
140 template <typename T, uptr kSize>
141 const T &AddrHashMap<T, kSize>::Handle::operator*() const {
142   return cell_->val;
143 }
144 
145 template <typename T, uptr kSize>
146 T &AddrHashMap<T, kSize>::Handle::operator*() {
147   return cell_->val;
148 }
149 
150 template<typename T, uptr kSize>
151 bool AddrHashMap<T, kSize>::Handle::created() const {
152   return created_;
153 }
154 
155 template<typename T, uptr kSize>
156 bool AddrHashMap<T, kSize>::Handle::exists() const {
157   return cell_ != nullptr;
158 }
159 
160 template<typename T, uptr kSize>
161 AddrHashMap<T, kSize>::AddrHashMap() {
162   table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
163 }
164 
165 template<typename T, uptr kSize>
166 void AddrHashMap<T, kSize>::acquire(Handle *h) {
167   uptr addr = h->addr_;
168   uptr hash = calcHash(addr);
169   Bucket *b = &table_[hash];
170 
171   h->created_ = false;
172   h->addidx_ = -1U;
173   h->bucket_ = b;
174   h->cell_ = nullptr;
175 
176   // If we want to remove the element, we need exclusive access to the bucket,
177   // so skip the lock-free phase.
178   if (h->remove_)
179     goto locked;
180 
181  retry:
182   // First try to find an existing element w/o read mutex.
183   CHECK(!h->remove_);
184   // Check the embed cells.
185   for (uptr i = 0; i < kBucketSize; i++) {
186     Cell *c = &b->cells[i];
187     uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
188     if (addr1 == addr) {
189       h->cell_ = c;
190       return;
191     }
192   }
193 
194   // Check the add cells with read lock.
195   if (atomic_load(&b->add, memory_order_relaxed)) {
196     b->mtx.ReadLock();
197     AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
198     for (uptr i = 0; i < add->size; i++) {
199       Cell *c = &add->cells[i];
200       uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
201       if (addr1 == addr) {
202         h->addidx_ = i;
203         h->cell_ = c;
204         return;
205       }
206     }
207     b->mtx.ReadUnlock();
208   }
209 
210  locked:
211   // Re-check existence under write lock.
212   // Embed cells.
213   b->mtx.Lock();
214   for (uptr i = 0; i < kBucketSize; i++) {
215     Cell *c = &b->cells[i];
216     uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
217     if (addr1 == addr) {
218       if (h->remove_) {
219         h->cell_ = c;
220         return;
221       }
222       b->mtx.Unlock();
223       goto retry;
224     }
225   }
226 
227   // Add cells.
228   AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
229   if (add) {
230     for (uptr i = 0; i < add->size; i++) {
231       Cell *c = &add->cells[i];
232       uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
233       if (addr1 == addr) {
234         if (h->remove_) {
235           h->addidx_ = i;
236           h->cell_ = c;
237           return;
238         }
239         b->mtx.Unlock();
240         goto retry;
241       }
242     }
243   }
244 
245   // The element does not exist, no need to create it if we want to remove.
246   if (h->remove_ || !h->create_) {
247     b->mtx.Unlock();
248     return;
249   }
250 
251   // Now try to create it under the mutex.
252   h->created_ = true;
253   // See if we have a free embed cell.
254   for (uptr i = 0; i < kBucketSize; i++) {
255     Cell *c = &b->cells[i];
256     uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
257     if (addr1 == 0) {
258       h->cell_ = c;
259       return;
260     }
261   }
262 
263   // Store in the add cells.
264   if (!add) {
265     // Allocate a new add array.
266     const uptr kInitSize = 64;
267     add = (AddBucket*)InternalAlloc(kInitSize);
268     internal_memset(add, 0, kInitSize);
269     add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
270     add->size = 0;
271     atomic_store(&b->add, (uptr)add, memory_order_relaxed);
272   }
273   if (add->size == add->cap) {
274     // Grow existing add array.
275     uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
276     uptr newsize = oldsize * 2;
277     AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
278     internal_memset(add1, 0, newsize);
279     add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
280     add1->size = add->size;
281     internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
282     InternalFree(add);
283     atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
284     add = add1;
285   }
286   // Store.
287   uptr i = add->size++;
288   Cell *c = &add->cells[i];
289   CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
290   h->addidx_ = i;
291   h->cell_ = c;
292 }
293 
294 template<typename T, uptr kSize>
295 void AddrHashMap<T, kSize>::release(Handle *h) {
296   if (!h->cell_)
297     return;
298   Bucket *b = h->bucket_;
299   Cell *c = h->cell_;
300   uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
301   if (h->created_) {
302     // Denote completion of insertion.
303     CHECK_EQ(addr1, 0);
304     // After the following store, the element becomes available
305     // for lock-free reads.
306     atomic_store(&c->addr, h->addr_, memory_order_release);
307     b->mtx.Unlock();
308   } else if (h->remove_) {
309     // Denote that the cell is empty now.
310     CHECK_EQ(addr1, h->addr_);
311     atomic_store(&c->addr, 0, memory_order_release);
312     // See if we need to compact the bucket.
313     AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
314     if (h->addidx_ == -1U) {
315       // Removed from embed array, move an add element into the freed cell.
316       if (add && add->size != 0) {
317         uptr last = --add->size;
318         Cell *c1 = &add->cells[last];
319         c->val = c1->val;
320         uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
321         atomic_store(&c->addr, addr1, memory_order_release);
322         atomic_store(&c1->addr, 0, memory_order_release);
323       }
324     } else {
325       // Removed from add array, compact it.
326       uptr last = --add->size;
327       Cell *c1 = &add->cells[last];
328       if (c != c1) {
329         *c = *c1;
330         atomic_store(&c1->addr, 0, memory_order_relaxed);
331       }
332     }
333     if (add && add->size == 0) {
334       // FIXME(dvyukov): free add?
335     }
336     b->mtx.Unlock();
337   } else {
338     CHECK_EQ(addr1, h->addr_);
339     if (h->addidx_ != -1U)
340       b->mtx.ReadUnlock();
341   }
342 }
343 
344 template<typename T, uptr kSize>
345 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
346   addr += addr << 10;
347   addr ^= addr >> 6;
348   return addr % kSize;
349 }
350 
351 } // namespace __sanitizer
352 
353 #endif // SANITIZER_ADDRHASHMAP_H
354