xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/arena_stats.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
2 #define JEMALLOC_INTERNAL_ARENA_STATS_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/lockedint.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/mutex_prof.h"
8 #include "jemalloc/internal/pa.h"
9 #include "jemalloc/internal/sc.h"
10 
11 JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
12 
13 typedef struct arena_stats_large_s arena_stats_large_t;
14 struct arena_stats_large_s {
15 	/*
16 	 * Total number of allocation/deallocation requests served directly by
17 	 * the arena.
18 	 */
19 	locked_u64_t	nmalloc;
20 	locked_u64_t	ndalloc;
21 
22 	/*
23 	 * Number of allocation requests that correspond to this size class.
24 	 * This includes requests served by tcache, though tcache only
25 	 * periodically merges into this counter.
26 	 */
27 	locked_u64_t	nrequests; /* Partially derived. */
28 	/*
29 	 * Number of tcache fills / flushes for large (similarly, periodically
30 	 * merged).  Note that there is no large tcache batch-fill currently
31 	 * (i.e. only fill 1 at a time); however flush may be batched.
32 	 */
33 	locked_u64_t	nfills; /* Partially derived. */
34 	locked_u64_t	nflushes; /* Partially derived. */
35 
36 	/* Current number of allocations of this size class. */
37 	size_t		curlextents; /* Derived. */
38 };
39 
40 /*
41  * Arena stats.  Note that fields marked "derived" are not directly maintained
42  * within the arena code; rather their values are derived during stats merge
43  * requests.
44  */
45 typedef struct arena_stats_s arena_stats_t;
46 struct arena_stats_s {
47 	LOCKEDINT_MTX_DECLARE(mtx)
48 
49 	/*
50 	 * resident includes the base stats -- that's why it lives here and not
51 	 * in pa_shard_stats_t.
52 	 */
53 	size_t			base; /* Derived. */
54 	size_t			resident; /* Derived. */
55 	size_t			metadata_thp; /* Derived. */
56 	size_t			mapped; /* Derived. */
57 
58 	atomic_zu_t		internal;
59 
60 	size_t			allocated_large; /* Derived. */
61 	uint64_t		nmalloc_large; /* Derived. */
62 	uint64_t		ndalloc_large; /* Derived. */
63 	uint64_t		nfills_large; /* Derived. */
64 	uint64_t		nflushes_large; /* Derived. */
65 	uint64_t		nrequests_large; /* Derived. */
66 
67 	/*
68 	 * The stats logically owned by the pa_shard in the same arena.  This
69 	 * lives here only because it's convenient for the purposes of the ctl
70 	 * module -- it only knows about the single arena_stats.
71 	 */
72 	pa_shard_stats_t	pa_shard_stats;
73 
74 	/* Number of bytes cached in tcache associated with this arena. */
75 	size_t			tcache_bytes; /* Derived. */
76 	size_t			tcache_stashed_bytes; /* Derived. */
77 
78 	mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
79 
80 	/* One element for each large size class. */
81 	arena_stats_large_t	lstats[SC_NSIZES - SC_NBINS];
82 
83 	/* Arena uptime. */
84 	nstime_t		uptime;
85 };
86 
87 static inline bool
88 arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
89 	if (config_debug) {
90 		for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
91 			assert(((char *)arena_stats)[i] == 0);
92 		}
93 	}
94 	if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
95 	    WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
96 		return true;
97 	}
98 	/* Memory is zeroed, so there is no need to clear stats. */
99 	return false;
100 }
101 
102 static inline void
103 arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
104     szind_t szind, uint64_t nrequests) {
105 	LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
106 	arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
107 	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
108 	    &lstats->nrequests, nrequests);
109 	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
110 	    &lstats->nflushes, 1);
111 	LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
112 }
113 
114 #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
115