xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h (revision c5ad81420c495d1d5de04209b0ec4fcb435c322c)
1 #ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
2 #define JEMALLOC_INTERNAL_PROF_INLINES_A_H
3 
4 #include "jemalloc/internal/mutex.h"
5 
6 static inline bool
prof_accum_add(tsdn_t * tsdn,prof_accum_t * prof_accum,uint64_t accumbytes)7 prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
8     uint64_t accumbytes) {
9 	cassert(config_prof);
10 
11 	bool overflow;
12 	uint64_t a0, a1;
13 
14 	/*
15 	 * If the application allocates fast enough (and/or if idump is slow
16 	 * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
17 	 * idump trigger coalescing.  This is an intentional mechanism that
18 	 * avoids rate-limiting allocation.
19 	 */
20 #ifdef JEMALLOC_ATOMIC_U64
21 	a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
22 	do {
23 		a1 = a0 + accumbytes;
24 		assert(a1 >= a0);
25 		overflow = (a1 >= prof_interval);
26 		if (overflow) {
27 			a1 %= prof_interval;
28 		}
29 	} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
30 	    a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
31 #else
32 	malloc_mutex_lock(tsdn, &prof_accum->mtx);
33 	a0 = prof_accum->accumbytes;
34 	a1 = a0 + accumbytes;
35 	overflow = (a1 >= prof_interval);
36 	if (overflow) {
37 		a1 %= prof_interval;
38 	}
39 	prof_accum->accumbytes = a1;
40 	malloc_mutex_unlock(tsdn, &prof_accum->mtx);
41 #endif
42 	return overflow;
43 }
44 
45 static inline void
prof_accum_cancel(tsdn_t * tsdn,prof_accum_t * prof_accum,size_t usize)46 prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
47     size_t usize) {
48 	cassert(config_prof);
49 
50 	/*
51 	 * Cancel out as much of the excessive prof_accumbytes increase as
52 	 * possible without underflowing.  Interval-triggered dumps occur
53 	 * slightly more often than intended as a result of incomplete
54 	 * canceling.
55 	 */
56 	uint64_t a0, a1;
57 #ifdef JEMALLOC_ATOMIC_U64
58 	a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
59 	do {
60 		a1 = (a0 >= SC_LARGE_MINCLASS - usize)
61 		    ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
62 	} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
63 	    a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
64 #else
65 	malloc_mutex_lock(tsdn, &prof_accum->mtx);
66 	a0 = prof_accum->accumbytes;
67 	a1 = (a0 >= SC_LARGE_MINCLASS - usize)
68 	    ?  a0 - (SC_LARGE_MINCLASS - usize) : 0;
69 	prof_accum->accumbytes = a1;
70 	malloc_mutex_unlock(tsdn, &prof_accum->mtx);
71 #endif
72 }
73 
74 JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)75 prof_active_get_unlocked(void) {
76 	/*
77 	 * Even if opt_prof is true, sampling can be temporarily disabled by
78 	 * setting prof_active to false.  No locking is used when reading
79 	 * prof_active in the fast path, so there are no guarantees regarding
80 	 * how long it will take for all threads to notice state changes.
81 	 */
82 	return prof_active;
83 }
84 
85 #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
86