xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/bin.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_BIN_H
2 #define JEMALLOC_INTERNAL_BIN_H
3 
4 #include "jemalloc/internal/bin_stats.h"
5 #include "jemalloc/internal/bin_types.h"
6 #include "jemalloc/internal/edata.h"
7 #include "jemalloc/internal/mutex.h"
8 #include "jemalloc/internal/sc.h"
9 
10 /*
11  * A bin contains a set of extents that are currently being used for slab
12  * allocations.
13  */
14 typedef struct bin_s bin_t;
15 struct bin_s {
16 	/* All operations on bin_t fields require lock ownership. */
17 	malloc_mutex_t		lock;
18 
19 	/*
20 	 * Bin statistics.  These get touched every time the lock is acquired,
21 	 * so put them close by in the hopes of getting some cache locality.
22 	 */
23 	bin_stats_t	stats;
24 
25 	/*
26 	 * Current slab being used to service allocations of this bin's size
27 	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
28 	 * slabcur is reassigned, the previous slab must be deallocated or
29 	 * inserted into slabs_{nonfull,full}.
30 	 */
31 	edata_t			*slabcur;
32 
33 	/*
34 	 * Heap of non-full slabs.  This heap is used to assure that new
35 	 * allocations come from the non-full slab that is oldest/lowest in
36 	 * memory.
37 	 */
38 	edata_heap_t		slabs_nonfull;
39 
40 	/* List used to track full slabs. */
41 	edata_list_active_t	slabs_full;
42 };
43 
44 /* A set of sharded bins of the same size class. */
45 typedef struct bins_s bins_t;
46 struct bins_s {
47 	/* Sharded bins.  Dynamically sized. */
48 	bin_t *bin_shards;
49 };
50 
51 void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
52 bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
53     size_t end_size, size_t nshards);
54 
55 /* Initializes a bin to empty.  Returns true on error. */
56 bool bin_init(bin_t *bin);
57 
58 /* Forking. */
59 void bin_prefork(tsdn_t *tsdn, bin_t *bin);
60 void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
61 void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
62 
63 /* Stats. */
64 static inline void
65 bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
66 	malloc_mutex_lock(tsdn, &bin->lock);
67 	malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
68 	bin_stats_t *stats = &dst_bin_stats->stats_data;
69 	stats->nmalloc += bin->stats.nmalloc;
70 	stats->ndalloc += bin->stats.ndalloc;
71 	stats->nrequests += bin->stats.nrequests;
72 	stats->curregs += bin->stats.curregs;
73 	stats->nfills += bin->stats.nfills;
74 	stats->nflushes += bin->stats.nflushes;
75 	stats->nslabs += bin->stats.nslabs;
76 	stats->reslabs += bin->stats.reslabs;
77 	stats->curslabs += bin->stats.curslabs;
78 	stats->nonfull_slabs += bin->stats.nonfull_slabs;
79 	malloc_mutex_unlock(tsdn, &bin->lock);
80 }
81 
82 #endif /* JEMALLOC_INTERNAL_BIN_H */
83