1b7eaed25SJason Evans #define JEMALLOC_BACKGROUND_THREAD_C_
2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h"
3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h"
4b7eaed25SJason Evans
5b7eaed25SJason Evans #include "jemalloc/internal/assert.h"
6b7eaed25SJason Evans
7*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
8*c5ad8142SEric van Gyzen
9b7eaed25SJason Evans /******************************************************************************/
10b7eaed25SJason Evans /* Data. */
11b7eaed25SJason Evans
12b7eaed25SJason Evans /* This option should be opt-in only. */
13b7eaed25SJason Evans #define BACKGROUND_THREAD_DEFAULT false
14b7eaed25SJason Evans /* Read-only after initialization. */
15b7eaed25SJason Evans bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
16*c5ad8142SEric van Gyzen size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
17b7eaed25SJason Evans
18b7eaed25SJason Evans /* Used for thread creation, termination and stats. */
19b7eaed25SJason Evans malloc_mutex_t background_thread_lock;
20b7eaed25SJason Evans /* Indicates global state. Atomic because decay reads this w/o locking. */
21b7eaed25SJason Evans atomic_b_t background_thread_enabled_state;
22b7eaed25SJason Evans size_t n_background_threads;
230ef50b4eSJason Evans size_t max_background_threads;
24b7eaed25SJason Evans /* Thread info per-index. */
25b7eaed25SJason Evans background_thread_info_t *background_thread_info;
26b7eaed25SJason Evans
27b7eaed25SJason Evans /******************************************************************************/
28b7eaed25SJason Evans
29b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
30b7eaed25SJason Evans
31b7eaed25SJason Evans static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
32b7eaed25SJason Evans void *(*)(void *), void *__restrict);
33b7eaed25SJason Evans
34b7eaed25SJason Evans static void
pthread_create_wrapper_init(void)350ef50b4eSJason Evans pthread_create_wrapper_init(void) {
36b7eaed25SJason Evans #ifdef JEMALLOC_LAZY_LOCK
370ef50b4eSJason Evans if (!isthreaded) {
38b7eaed25SJason Evans isthreaded = true;
390ef50b4eSJason Evans }
40b7eaed25SJason Evans #endif
41b7eaed25SJason Evans }
42b7eaed25SJason Evans
43b7eaed25SJason Evans int
pthread_create_wrapper(pthread_t * __restrict thread,const pthread_attr_t * attr,void * (* start_routine)(void *),void * __restrict arg)44b7eaed25SJason Evans pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
45b7eaed25SJason Evans void *(*start_routine)(void *), void *__restrict arg) {
460ef50b4eSJason Evans pthread_create_wrapper_init();
47b7eaed25SJason Evans
48b7eaed25SJason Evans return pthread_create_fptr(thread, attr, start_routine, arg);
49b7eaed25SJason Evans }
50b7eaed25SJason Evans #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
51b7eaed25SJason Evans
52b7eaed25SJason Evans #ifndef JEMALLOC_BACKGROUND_THREAD
53b7eaed25SJason Evans #define NOT_REACHED { not_reached(); }
background_thread_create(tsd_t * tsd,unsigned arena_ind)54b7eaed25SJason Evans bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
55b7eaed25SJason Evans bool background_threads_enable(tsd_t *tsd) NOT_REACHED
56b7eaed25SJason Evans bool background_threads_disable(tsd_t *tsd) NOT_REACHED
57b7eaed25SJason Evans void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
58b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) NOT_REACHED
59b7eaed25SJason Evans void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
60b7eaed25SJason Evans void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
61b7eaed25SJason Evans void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
62b7eaed25SJason Evans void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
63b7eaed25SJason Evans bool background_thread_stats_read(tsdn_t *tsdn,
64b7eaed25SJason Evans background_thread_stats_t *stats) NOT_REACHED
65b7eaed25SJason Evans void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
66b7eaed25SJason Evans #undef NOT_REACHED
67b7eaed25SJason Evans #else
68b7eaed25SJason Evans
69b7eaed25SJason Evans static bool background_thread_enabled_at_fork;
70b7eaed25SJason Evans
71b7eaed25SJason Evans static void
72b7eaed25SJason Evans background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
73b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 0);
74b7eaed25SJason Evans info->npages_to_purge_new = 0;
75b7eaed25SJason Evans if (config_stats) {
76b7eaed25SJason Evans info->tot_n_runs = 0;
77b7eaed25SJason Evans nstime_init(&info->tot_sleep_time, 0);
78b7eaed25SJason Evans }
79b7eaed25SJason Evans }
80b7eaed25SJason Evans
81b7eaed25SJason Evans static inline bool
82*c5ad8142SEric van Gyzen set_current_thread_affinity(int cpu) {
83b7eaed25SJason Evans #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
84b7eaed25SJason Evans cpu_set_t cpuset;
85b7eaed25SJason Evans CPU_ZERO(&cpuset);
86b7eaed25SJason Evans CPU_SET(cpu, &cpuset);
87b7eaed25SJason Evans int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
88b7eaed25SJason Evans
89b7eaed25SJason Evans return (ret != 0);
90b7eaed25SJason Evans #else
91b7eaed25SJason Evans return false;
92b7eaed25SJason Evans #endif
93b7eaed25SJason Evans }
94b7eaed25SJason Evans
95b7eaed25SJason Evans /* Threshold for determining when to wake up the background thread. */
96b7eaed25SJason Evans #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
97b7eaed25SJason Evans #define BILLION UINT64_C(1000000000)
98b7eaed25SJason Evans /* Minimal sleep interval 100 ms. */
99b7eaed25SJason Evans #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
100b7eaed25SJason Evans
101b7eaed25SJason Evans static inline size_t
102b7eaed25SJason Evans decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
103b7eaed25SJason Evans size_t i;
104b7eaed25SJason Evans uint64_t sum = 0;
105b7eaed25SJason Evans for (i = 0; i < interval; i++) {
106b7eaed25SJason Evans sum += decay->backlog[i] * h_steps[i];
107b7eaed25SJason Evans }
108b7eaed25SJason Evans for (; i < SMOOTHSTEP_NSTEPS; i++) {
109b7eaed25SJason Evans sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
110b7eaed25SJason Evans }
111b7eaed25SJason Evans
112b7eaed25SJason Evans return (size_t)(sum >> SMOOTHSTEP_BFP);
113b7eaed25SJason Evans }
114b7eaed25SJason Evans
115b7eaed25SJason Evans static uint64_t
116b7eaed25SJason Evans arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
117b7eaed25SJason Evans extents_t *extents) {
118b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
119b7eaed25SJason Evans /* Use minimal interval if decay is contended. */
120b7eaed25SJason Evans return BACKGROUND_THREAD_MIN_INTERVAL_NS;
121b7eaed25SJason Evans }
122b7eaed25SJason Evans
123b7eaed25SJason Evans uint64_t interval;
124b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
125b7eaed25SJason Evans if (decay_time <= 0) {
126b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */
127b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
128b7eaed25SJason Evans goto label_done;
129b7eaed25SJason Evans }
130b7eaed25SJason Evans
131b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval);
132b7eaed25SJason Evans assert(decay_interval_ns > 0);
133b7eaed25SJason Evans size_t npages = extents_npages_get(extents);
134b7eaed25SJason Evans if (npages == 0) {
135b7eaed25SJason Evans unsigned i;
136b7eaed25SJason Evans for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
137b7eaed25SJason Evans if (decay->backlog[i] > 0) {
138b7eaed25SJason Evans break;
139b7eaed25SJason Evans }
140b7eaed25SJason Evans }
141b7eaed25SJason Evans if (i == SMOOTHSTEP_NSTEPS) {
142b7eaed25SJason Evans /* No dirty pages recorded. Sleep indefinitely. */
143b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
144b7eaed25SJason Evans goto label_done;
145b7eaed25SJason Evans }
146b7eaed25SJason Evans }
147b7eaed25SJason Evans if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
148b7eaed25SJason Evans /* Use max interval. */
149b7eaed25SJason Evans interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
150b7eaed25SJason Evans goto label_done;
151b7eaed25SJason Evans }
152b7eaed25SJason Evans
153b7eaed25SJason Evans size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
154b7eaed25SJason Evans size_t ub = SMOOTHSTEP_NSTEPS;
155b7eaed25SJason Evans /* Minimal 2 intervals to ensure reaching next epoch deadline. */
156b7eaed25SJason Evans lb = (lb < 2) ? 2 : lb;
157b7eaed25SJason Evans if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
158b7eaed25SJason Evans (lb + 2 > ub)) {
159b7eaed25SJason Evans interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
160b7eaed25SJason Evans goto label_done;
161b7eaed25SJason Evans }
162b7eaed25SJason Evans
163b7eaed25SJason Evans assert(lb + 2 <= ub);
164b7eaed25SJason Evans size_t npurge_lb, npurge_ub;
165b7eaed25SJason Evans npurge_lb = decay_npurge_after_interval(decay, lb);
166b7eaed25SJason Evans if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
167b7eaed25SJason Evans interval = decay_interval_ns * lb;
168b7eaed25SJason Evans goto label_done;
169b7eaed25SJason Evans }
170b7eaed25SJason Evans npurge_ub = decay_npurge_after_interval(decay, ub);
171b7eaed25SJason Evans if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
172b7eaed25SJason Evans interval = decay_interval_ns * ub;
173b7eaed25SJason Evans goto label_done;
174b7eaed25SJason Evans }
175b7eaed25SJason Evans
176b7eaed25SJason Evans unsigned n_search = 0;
177b7eaed25SJason Evans size_t target, npurge;
178b7eaed25SJason Evans while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
179b7eaed25SJason Evans && (lb + 2 < ub)) {
180b7eaed25SJason Evans target = (lb + ub) / 2;
181b7eaed25SJason Evans npurge = decay_npurge_after_interval(decay, target);
182b7eaed25SJason Evans if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
183b7eaed25SJason Evans ub = target;
184b7eaed25SJason Evans npurge_ub = npurge;
185b7eaed25SJason Evans } else {
186b7eaed25SJason Evans lb = target;
187b7eaed25SJason Evans npurge_lb = npurge;
188b7eaed25SJason Evans }
189b7eaed25SJason Evans assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
190b7eaed25SJason Evans }
191b7eaed25SJason Evans interval = decay_interval_ns * (ub + lb) / 2;
192b7eaed25SJason Evans label_done:
193b7eaed25SJason Evans interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
194b7eaed25SJason Evans BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
195b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx);
196b7eaed25SJason Evans
197b7eaed25SJason Evans return interval;
198b7eaed25SJason Evans }
199b7eaed25SJason Evans
200b7eaed25SJason Evans /* Compute purge interval for background threads. */
201b7eaed25SJason Evans static uint64_t
202b7eaed25SJason Evans arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
203b7eaed25SJason Evans uint64_t i1, i2;
204b7eaed25SJason Evans i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
205b7eaed25SJason Evans &arena->extents_dirty);
206b7eaed25SJason Evans if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
207b7eaed25SJason Evans return i1;
208b7eaed25SJason Evans }
209b7eaed25SJason Evans i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
210b7eaed25SJason Evans &arena->extents_muzzy);
211b7eaed25SJason Evans
212b7eaed25SJason Evans return i1 < i2 ? i1 : i2;
213b7eaed25SJason Evans }
214b7eaed25SJason Evans
215b7eaed25SJason Evans static void
216b7eaed25SJason Evans background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
217b7eaed25SJason Evans uint64_t interval) {
218b7eaed25SJason Evans if (config_stats) {
219b7eaed25SJason Evans info->tot_n_runs++;
220b7eaed25SJason Evans }
221b7eaed25SJason Evans info->npages_to_purge_new = 0;
222b7eaed25SJason Evans
223b7eaed25SJason Evans struct timeval tv;
224b7eaed25SJason Evans /* Specific clock required by timedwait. */
225b7eaed25SJason Evans gettimeofday(&tv, NULL);
226b7eaed25SJason Evans nstime_t before_sleep;
227b7eaed25SJason Evans nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
228b7eaed25SJason Evans
229b7eaed25SJason Evans int ret;
230b7eaed25SJason Evans if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
231b7eaed25SJason Evans assert(background_thread_indefinite_sleep(info));
232b7eaed25SJason Evans ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
233b7eaed25SJason Evans assert(ret == 0);
234b7eaed25SJason Evans } else {
235b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
236b7eaed25SJason Evans interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
237b7eaed25SJason Evans /* We need malloc clock (can be different from tv). */
238b7eaed25SJason Evans nstime_t next_wakeup;
239b7eaed25SJason Evans nstime_init(&next_wakeup, 0);
240b7eaed25SJason Evans nstime_update(&next_wakeup);
241b7eaed25SJason Evans nstime_iadd(&next_wakeup, interval);
242b7eaed25SJason Evans assert(nstime_ns(&next_wakeup) <
243b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP);
244b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info,
245b7eaed25SJason Evans nstime_ns(&next_wakeup));
246b7eaed25SJason Evans
247b7eaed25SJason Evans nstime_t ts_wakeup;
248b7eaed25SJason Evans nstime_copy(&ts_wakeup, &before_sleep);
249b7eaed25SJason Evans nstime_iadd(&ts_wakeup, interval);
250b7eaed25SJason Evans struct timespec ts;
251b7eaed25SJason Evans ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
252b7eaed25SJason Evans ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
253b7eaed25SJason Evans
254b7eaed25SJason Evans assert(!background_thread_indefinite_sleep(info));
255b7eaed25SJason Evans ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
256b7eaed25SJason Evans assert(ret == ETIMEDOUT || ret == 0);
257b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info,
258b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP);
259b7eaed25SJason Evans }
260b7eaed25SJason Evans if (config_stats) {
261b7eaed25SJason Evans gettimeofday(&tv, NULL);
262b7eaed25SJason Evans nstime_t after_sleep;
263b7eaed25SJason Evans nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
264b7eaed25SJason Evans if (nstime_compare(&after_sleep, &before_sleep) > 0) {
265b7eaed25SJason Evans nstime_subtract(&after_sleep, &before_sleep);
266b7eaed25SJason Evans nstime_add(&info->tot_sleep_time, &after_sleep);
267b7eaed25SJason Evans }
268b7eaed25SJason Evans }
269b7eaed25SJason Evans }
270b7eaed25SJason Evans
271b7eaed25SJason Evans static bool
272b7eaed25SJason Evans background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
273b7eaed25SJason Evans if (unlikely(info->state == background_thread_paused)) {
274b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
275b7eaed25SJason Evans /* Wait on global lock to update status. */
276b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock);
277b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock);
278b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx);
279b7eaed25SJason Evans return true;
280b7eaed25SJason Evans }
281b7eaed25SJason Evans
282b7eaed25SJason Evans return false;
283b7eaed25SJason Evans }
284b7eaed25SJason Evans
285b7eaed25SJason Evans static inline void
286b7eaed25SJason Evans background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
287b7eaed25SJason Evans uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
288b7eaed25SJason Evans unsigned narenas = narenas_total_get();
289b7eaed25SJason Evans
2900ef50b4eSJason Evans for (unsigned i = ind; i < narenas; i += max_background_threads) {
291b7eaed25SJason Evans arena_t *arena = arena_get(tsdn, i, false);
292b7eaed25SJason Evans if (!arena) {
293b7eaed25SJason Evans continue;
294b7eaed25SJason Evans }
295b7eaed25SJason Evans arena_decay(tsdn, arena, true, false);
296b7eaed25SJason Evans if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
297b7eaed25SJason Evans /* Min interval will be used. */
298b7eaed25SJason Evans continue;
299b7eaed25SJason Evans }
300b7eaed25SJason Evans uint64_t interval = arena_decay_compute_purge_interval(tsdn,
301b7eaed25SJason Evans arena);
302b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
303b7eaed25SJason Evans if (min_interval > interval) {
304b7eaed25SJason Evans min_interval = interval;
305b7eaed25SJason Evans }
306b7eaed25SJason Evans }
307b7eaed25SJason Evans background_thread_sleep(tsdn, info, min_interval);
308b7eaed25SJason Evans }
309b7eaed25SJason Evans
310b7eaed25SJason Evans static bool
311b7eaed25SJason Evans background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
312b7eaed25SJason Evans if (info == &background_thread_info[0]) {
313b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd),
314b7eaed25SJason Evans &background_thread_lock);
315b7eaed25SJason Evans } else {
316b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
317b7eaed25SJason Evans &background_thread_lock);
318b7eaed25SJason Evans }
319b7eaed25SJason Evans
3208b2f5aafSJason Evans pre_reentrancy(tsd, NULL);
321b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
322b7eaed25SJason Evans bool has_thread;
323b7eaed25SJason Evans assert(info->state != background_thread_paused);
324b7eaed25SJason Evans if (info->state == background_thread_started) {
325b7eaed25SJason Evans has_thread = true;
326b7eaed25SJason Evans info->state = background_thread_stopped;
327b7eaed25SJason Evans pthread_cond_signal(&info->cond);
328b7eaed25SJason Evans } else {
329b7eaed25SJason Evans has_thread = false;
330b7eaed25SJason Evans }
331b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
332b7eaed25SJason Evans
333b7eaed25SJason Evans if (!has_thread) {
334b7eaed25SJason Evans post_reentrancy(tsd);
335b7eaed25SJason Evans return false;
336b7eaed25SJason Evans }
337b7eaed25SJason Evans void *ret;
338b7eaed25SJason Evans if (pthread_join(info->thread, &ret)) {
339b7eaed25SJason Evans post_reentrancy(tsd);
340b7eaed25SJason Evans return true;
341b7eaed25SJason Evans }
342b7eaed25SJason Evans assert(ret == NULL);
343b7eaed25SJason Evans n_background_threads--;
344b7eaed25SJason Evans post_reentrancy(tsd);
345b7eaed25SJason Evans
346b7eaed25SJason Evans return false;
347b7eaed25SJason Evans }
348b7eaed25SJason Evans
349b7eaed25SJason Evans static void *background_thread_entry(void *ind_arg);
350b7eaed25SJason Evans
3518b2f5aafSJason Evans static int
3528b2f5aafSJason Evans background_thread_create_signals_masked(pthread_t *thread,
3538b2f5aafSJason Evans const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
3548b2f5aafSJason Evans /*
3558b2f5aafSJason Evans * Mask signals during thread creation so that the thread inherits
3568b2f5aafSJason Evans * an empty signal set.
3578b2f5aafSJason Evans */
3588b2f5aafSJason Evans sigset_t set;
3598b2f5aafSJason Evans sigfillset(&set);
3608b2f5aafSJason Evans sigset_t oldset;
3618b2f5aafSJason Evans int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
3628b2f5aafSJason Evans if (mask_err != 0) {
3638b2f5aafSJason Evans return mask_err;
3648b2f5aafSJason Evans }
3658b2f5aafSJason Evans int create_err = pthread_create_wrapper(thread, attr, start_routine,
3668b2f5aafSJason Evans arg);
3678b2f5aafSJason Evans /*
3688b2f5aafSJason Evans * Restore the signal mask. Failure to restore the signal mask here
3698b2f5aafSJason Evans * changes program behavior.
3708b2f5aafSJason Evans */
3718b2f5aafSJason Evans int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
3728b2f5aafSJason Evans if (restore_err != 0) {
3738b2f5aafSJason Evans malloc_printf("<jemalloc>: background thread creation "
3748b2f5aafSJason Evans "failed (%d), and signal mask restoration failed "
3758b2f5aafSJason Evans "(%d)\n", create_err, restore_err);
3768b2f5aafSJason Evans if (opt_abort) {
3778b2f5aafSJason Evans abort();
3788b2f5aafSJason Evans }
3798b2f5aafSJason Evans }
3808b2f5aafSJason Evans return create_err;
3818b2f5aafSJason Evans }
3828b2f5aafSJason Evans
3830ef50b4eSJason Evans static bool
384b7eaed25SJason Evans check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
385b7eaed25SJason Evans bool *created_threads) {
3860ef50b4eSJason Evans bool ret = false;
387b7eaed25SJason Evans if (likely(*n_created == n_background_threads)) {
3880ef50b4eSJason Evans return ret;
389b7eaed25SJason Evans }
390b7eaed25SJason Evans
3910ef50b4eSJason Evans tsdn_t *tsdn = tsd_tsdn(tsd);
3920ef50b4eSJason Evans malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
3930ef50b4eSJason Evans for (unsigned i = 1; i < max_background_threads; i++) {
394b7eaed25SJason Evans if (created_threads[i]) {
395b7eaed25SJason Evans continue;
396b7eaed25SJason Evans }
397b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i];
3980ef50b4eSJason Evans malloc_mutex_lock(tsdn, &info->mtx);
3990ef50b4eSJason Evans /*
4000ef50b4eSJason Evans * In case of the background_thread_paused state because of
4010ef50b4eSJason Evans * arena reset, delay the creation.
4020ef50b4eSJason Evans */
403b7eaed25SJason Evans bool create = (info->state == background_thread_started);
4040ef50b4eSJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
405b7eaed25SJason Evans if (!create) {
406b7eaed25SJason Evans continue;
407b7eaed25SJason Evans }
408b7eaed25SJason Evans
4098b2f5aafSJason Evans pre_reentrancy(tsd, NULL);
4108b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread,
4118b2f5aafSJason Evans NULL, background_thread_entry, (void *)(uintptr_t)i);
412b7eaed25SJason Evans post_reentrancy(tsd);
413b7eaed25SJason Evans
414b7eaed25SJason Evans if (err == 0) {
415b7eaed25SJason Evans (*n_created)++;
416b7eaed25SJason Evans created_threads[i] = true;
417b7eaed25SJason Evans } else {
418b7eaed25SJason Evans malloc_printf("<jemalloc>: background thread "
419b7eaed25SJason Evans "creation failed (%d)\n", err);
420b7eaed25SJason Evans if (opt_abort) {
421b7eaed25SJason Evans abort();
422b7eaed25SJason Evans }
423b7eaed25SJason Evans }
4240ef50b4eSJason Evans /* Return to restart the loop since we unlocked. */
4250ef50b4eSJason Evans ret = true;
4260ef50b4eSJason Evans break;
427b7eaed25SJason Evans }
4280ef50b4eSJason Evans malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
4290ef50b4eSJason Evans
4300ef50b4eSJason Evans return ret;
431b7eaed25SJason Evans }
432b7eaed25SJason Evans
433b7eaed25SJason Evans static void
434b7eaed25SJason Evans background_thread0_work(tsd_t *tsd) {
435b7eaed25SJason Evans /* Thread0 is also responsible for launching / terminating threads. */
4360ef50b4eSJason Evans VARIABLE_ARRAY(bool, created_threads, max_background_threads);
437b7eaed25SJason Evans unsigned i;
4380ef50b4eSJason Evans for (i = 1; i < max_background_threads; i++) {
439b7eaed25SJason Evans created_threads[i] = false;
440b7eaed25SJason Evans }
441b7eaed25SJason Evans /* Start working, and create more threads when asked. */
442b7eaed25SJason Evans unsigned n_created = 1;
443b7eaed25SJason Evans while (background_thread_info[0].state != background_thread_stopped) {
444b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd),
445b7eaed25SJason Evans &background_thread_info[0])) {
446b7eaed25SJason Evans continue;
447b7eaed25SJason Evans }
4480ef50b4eSJason Evans if (check_background_thread_creation(tsd, &n_created,
4490ef50b4eSJason Evans (bool *)&created_threads)) {
4500ef50b4eSJason Evans continue;
4510ef50b4eSJason Evans }
452b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd),
453b7eaed25SJason Evans &background_thread_info[0], 0);
454b7eaed25SJason Evans }
455b7eaed25SJason Evans
456b7eaed25SJason Evans /*
457b7eaed25SJason Evans * Shut down other threads at exit. Note that the ctl thread is holding
458b7eaed25SJason Evans * the global background_thread mutex (and is waiting) for us.
459b7eaed25SJason Evans */
460b7eaed25SJason Evans assert(!background_thread_enabled());
4610ef50b4eSJason Evans for (i = 1; i < max_background_threads; i++) {
462b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i];
463b7eaed25SJason Evans assert(info->state != background_thread_paused);
464b7eaed25SJason Evans if (created_threads[i]) {
465b7eaed25SJason Evans background_threads_disable_single(tsd, info);
466b7eaed25SJason Evans } else {
467b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
4680ef50b4eSJason Evans if (info->state != background_thread_stopped) {
4690ef50b4eSJason Evans /* The thread was not created. */
4700ef50b4eSJason Evans assert(info->state ==
4710ef50b4eSJason Evans background_thread_started);
4720ef50b4eSJason Evans n_background_threads--;
473b7eaed25SJason Evans info->state = background_thread_stopped;
4740ef50b4eSJason Evans }
475b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
476b7eaed25SJason Evans }
477b7eaed25SJason Evans }
478b7eaed25SJason Evans background_thread_info[0].state = background_thread_stopped;
479b7eaed25SJason Evans assert(n_background_threads == 1);
480b7eaed25SJason Evans }
481b7eaed25SJason Evans
482b7eaed25SJason Evans static void
483b7eaed25SJason Evans background_work(tsd_t *tsd, unsigned ind) {
484b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[ind];
485b7eaed25SJason Evans
486b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
487b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
488b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP);
489b7eaed25SJason Evans if (ind == 0) {
490b7eaed25SJason Evans background_thread0_work(tsd);
491b7eaed25SJason Evans } else {
492b7eaed25SJason Evans while (info->state != background_thread_stopped) {
493b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd),
494b7eaed25SJason Evans info)) {
495b7eaed25SJason Evans continue;
496b7eaed25SJason Evans }
497b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd), info, ind);
498b7eaed25SJason Evans }
499b7eaed25SJason Evans }
500b7eaed25SJason Evans assert(info->state == background_thread_stopped);
501b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
502b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
503b7eaed25SJason Evans }
504b7eaed25SJason Evans
505b7eaed25SJason Evans static void *
506b7eaed25SJason Evans background_thread_entry(void *ind_arg) {
507b7eaed25SJason Evans unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
5080ef50b4eSJason Evans assert(thread_ind < max_background_threads);
5098b2f5aafSJason Evans #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
5108b2f5aafSJason Evans pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
511*c5ad8142SEric van Gyzen #elif defined(__FreeBSD__)
512*c5ad8142SEric van Gyzen pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
5138b2f5aafSJason Evans #endif
514b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) {
515b7eaed25SJason Evans set_current_thread_affinity((int)thread_ind);
516b7eaed25SJason Evans }
517b7eaed25SJason Evans /*
518b7eaed25SJason Evans * Start periodic background work. We use internal tsd which avoids
519b7eaed25SJason Evans * side effects, for example triggering new arena creation (which in
520b7eaed25SJason Evans * turn triggers another background thread creation).
521b7eaed25SJason Evans */
522b7eaed25SJason Evans background_work(tsd_internal_fetch(), thread_ind);
523b7eaed25SJason Evans assert(pthread_equal(pthread_self(),
524b7eaed25SJason Evans background_thread_info[thread_ind].thread));
525b7eaed25SJason Evans
526b7eaed25SJason Evans return NULL;
527b7eaed25SJason Evans }
528b7eaed25SJason Evans
529b7eaed25SJason Evans static void
530b7eaed25SJason Evans background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
531b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
532b7eaed25SJason Evans info->state = background_thread_started;
533b7eaed25SJason Evans background_thread_info_init(tsd_tsdn(tsd), info);
534b7eaed25SJason Evans n_background_threads++;
535b7eaed25SJason Evans }
536b7eaed25SJason Evans
537*c5ad8142SEric van Gyzen static bool
538*c5ad8142SEric van Gyzen background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
539b7eaed25SJason Evans assert(have_background_thread);
540b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
541b7eaed25SJason Evans
542b7eaed25SJason Evans /* We create at most NCPUs threads. */
5430ef50b4eSJason Evans size_t thread_ind = arena_ind % max_background_threads;
544b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[thread_ind];
545b7eaed25SJason Evans
546b7eaed25SJason Evans bool need_new_thread;
547b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
548b7eaed25SJason Evans need_new_thread = background_thread_enabled() &&
549b7eaed25SJason Evans (info->state == background_thread_stopped);
550b7eaed25SJason Evans if (need_new_thread) {
551b7eaed25SJason Evans background_thread_init(tsd, info);
552b7eaed25SJason Evans }
553b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
554b7eaed25SJason Evans if (!need_new_thread) {
555b7eaed25SJason Evans return false;
556b7eaed25SJason Evans }
557b7eaed25SJason Evans if (arena_ind != 0) {
558b7eaed25SJason Evans /* Threads are created asynchronously by Thread 0. */
559b7eaed25SJason Evans background_thread_info_t *t0 = &background_thread_info[0];
560b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
561b7eaed25SJason Evans assert(t0->state == background_thread_started);
562b7eaed25SJason Evans pthread_cond_signal(&t0->cond);
563b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
564b7eaed25SJason Evans
565b7eaed25SJason Evans return false;
566b7eaed25SJason Evans }
567b7eaed25SJason Evans
5688b2f5aafSJason Evans pre_reentrancy(tsd, NULL);
569b7eaed25SJason Evans /*
570b7eaed25SJason Evans * To avoid complications (besides reentrancy), create internal
571b7eaed25SJason Evans * background threads with the underlying pthread_create.
572b7eaed25SJason Evans */
5738b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread, NULL,
574b7eaed25SJason Evans background_thread_entry, (void *)thread_ind);
575b7eaed25SJason Evans post_reentrancy(tsd);
576b7eaed25SJason Evans
577b7eaed25SJason Evans if (err != 0) {
578b7eaed25SJason Evans malloc_printf("<jemalloc>: arena 0 background thread creation "
579b7eaed25SJason Evans "failed (%d)\n", err);
580b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
581b7eaed25SJason Evans info->state = background_thread_stopped;
582b7eaed25SJason Evans n_background_threads--;
583b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
584b7eaed25SJason Evans
585b7eaed25SJason Evans return true;
586b7eaed25SJason Evans }
587b7eaed25SJason Evans
588b7eaed25SJason Evans return false;
589b7eaed25SJason Evans }
590b7eaed25SJason Evans
591*c5ad8142SEric van Gyzen /* Create a new background thread if needed. */
592*c5ad8142SEric van Gyzen bool
593*c5ad8142SEric van Gyzen background_thread_create(tsd_t *tsd, unsigned arena_ind) {
594*c5ad8142SEric van Gyzen assert(have_background_thread);
595*c5ad8142SEric van Gyzen
596*c5ad8142SEric van Gyzen bool ret;
597*c5ad8142SEric van Gyzen malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
598*c5ad8142SEric van Gyzen ret = background_thread_create_locked(tsd, arena_ind);
599*c5ad8142SEric van Gyzen malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
600*c5ad8142SEric van Gyzen
601*c5ad8142SEric van Gyzen return ret;
602*c5ad8142SEric van Gyzen }
603*c5ad8142SEric van Gyzen
604b7eaed25SJason Evans bool
605b7eaed25SJason Evans background_threads_enable(tsd_t *tsd) {
606b7eaed25SJason Evans assert(n_background_threads == 0);
607b7eaed25SJason Evans assert(background_thread_enabled());
608b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
609b7eaed25SJason Evans
6100ef50b4eSJason Evans VARIABLE_ARRAY(bool, marked, max_background_threads);
611b7eaed25SJason Evans unsigned i, nmarked;
6120ef50b4eSJason Evans for (i = 0; i < max_background_threads; i++) {
613b7eaed25SJason Evans marked[i] = false;
614b7eaed25SJason Evans }
615b7eaed25SJason Evans nmarked = 0;
6160ef50b4eSJason Evans /* Thread 0 is required and created at the end. */
6170ef50b4eSJason Evans marked[0] = true;
618b7eaed25SJason Evans /* Mark the threads we need to create for thread 0. */
619b7eaed25SJason Evans unsigned n = narenas_total_get();
620b7eaed25SJason Evans for (i = 1; i < n; i++) {
6210ef50b4eSJason Evans if (marked[i % max_background_threads] ||
622b7eaed25SJason Evans arena_get(tsd_tsdn(tsd), i, false) == NULL) {
623b7eaed25SJason Evans continue;
624b7eaed25SJason Evans }
6250ef50b4eSJason Evans background_thread_info_t *info = &background_thread_info[
6260ef50b4eSJason Evans i % max_background_threads];
627b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
628b7eaed25SJason Evans assert(info->state == background_thread_stopped);
629b7eaed25SJason Evans background_thread_init(tsd, info);
630b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
6310ef50b4eSJason Evans marked[i % max_background_threads] = true;
6320ef50b4eSJason Evans if (++nmarked == max_background_threads) {
633b7eaed25SJason Evans break;
634b7eaed25SJason Evans }
635b7eaed25SJason Evans }
636b7eaed25SJason Evans
637*c5ad8142SEric van Gyzen return background_thread_create_locked(tsd, 0);
638b7eaed25SJason Evans }
639b7eaed25SJason Evans
640b7eaed25SJason Evans bool
641b7eaed25SJason Evans background_threads_disable(tsd_t *tsd) {
642b7eaed25SJason Evans assert(!background_thread_enabled());
643b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
644b7eaed25SJason Evans
645b7eaed25SJason Evans /* Thread 0 will be responsible for terminating other threads. */
646b7eaed25SJason Evans if (background_threads_disable_single(tsd,
647b7eaed25SJason Evans &background_thread_info[0])) {
648b7eaed25SJason Evans return true;
649b7eaed25SJason Evans }
650b7eaed25SJason Evans assert(n_background_threads == 0);
651b7eaed25SJason Evans
652b7eaed25SJason Evans return false;
653b7eaed25SJason Evans }
654b7eaed25SJason Evans
655b7eaed25SJason Evans /* Check if we need to signal the background thread early. */
656b7eaed25SJason Evans void
657b7eaed25SJason Evans background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
658b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) {
659b7eaed25SJason Evans background_thread_info_t *info = arena_background_thread_info_get(
660b7eaed25SJason Evans arena);
661b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &info->mtx)) {
662b7eaed25SJason Evans /*
663b7eaed25SJason Evans * Background thread may hold the mutex for a long period of
664b7eaed25SJason Evans * time. We'd like to avoid the variance on application
665b7eaed25SJason Evans * threads. So keep this non-blocking, and leave the work to a
666b7eaed25SJason Evans * future epoch.
667b7eaed25SJason Evans */
668b7eaed25SJason Evans return;
669b7eaed25SJason Evans }
670b7eaed25SJason Evans
671b7eaed25SJason Evans if (info->state != background_thread_started) {
672b7eaed25SJason Evans goto label_done;
673b7eaed25SJason Evans }
674b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
675b7eaed25SJason Evans goto label_done;
676b7eaed25SJason Evans }
677b7eaed25SJason Evans
678b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
679b7eaed25SJason Evans if (decay_time <= 0) {
680b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */
681b7eaed25SJason Evans goto label_done_unlock2;
682b7eaed25SJason Evans }
683b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval);
684b7eaed25SJason Evans assert(decay_interval_ns > 0);
685b7eaed25SJason Evans
686b7eaed25SJason Evans nstime_t diff;
687b7eaed25SJason Evans nstime_init(&diff, background_thread_wakeup_time_get(info));
688b7eaed25SJason Evans if (nstime_compare(&diff, &decay->epoch) <= 0) {
689b7eaed25SJason Evans goto label_done_unlock2;
690b7eaed25SJason Evans }
691b7eaed25SJason Evans nstime_subtract(&diff, &decay->epoch);
692b7eaed25SJason Evans if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
693b7eaed25SJason Evans goto label_done_unlock2;
694b7eaed25SJason Evans }
695b7eaed25SJason Evans
696b7eaed25SJason Evans if (npages_new > 0) {
697b7eaed25SJason Evans size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
698b7eaed25SJason Evans /*
699b7eaed25SJason Evans * Compute how many new pages we would need to purge by the next
700b7eaed25SJason Evans * wakeup, which is used to determine if we should signal the
701b7eaed25SJason Evans * background thread.
702b7eaed25SJason Evans */
703b7eaed25SJason Evans uint64_t npurge_new;
704b7eaed25SJason Evans if (n_epoch >= SMOOTHSTEP_NSTEPS) {
705b7eaed25SJason Evans npurge_new = npages_new;
706b7eaed25SJason Evans } else {
707b7eaed25SJason Evans uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
708b7eaed25SJason Evans assert(h_steps_max >=
709b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
710b7eaed25SJason Evans npurge_new = npages_new * (h_steps_max -
711b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
712b7eaed25SJason Evans npurge_new >>= SMOOTHSTEP_BFP;
713b7eaed25SJason Evans }
714b7eaed25SJason Evans info->npages_to_purge_new += npurge_new;
715b7eaed25SJason Evans }
716b7eaed25SJason Evans
717b7eaed25SJason Evans bool should_signal;
718b7eaed25SJason Evans if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
719b7eaed25SJason Evans should_signal = true;
720b7eaed25SJason Evans } else if (unlikely(background_thread_indefinite_sleep(info)) &&
721b7eaed25SJason Evans (extents_npages_get(&arena->extents_dirty) > 0 ||
722b7eaed25SJason Evans extents_npages_get(&arena->extents_muzzy) > 0 ||
723b7eaed25SJason Evans info->npages_to_purge_new > 0)) {
724b7eaed25SJason Evans should_signal = true;
725b7eaed25SJason Evans } else {
726b7eaed25SJason Evans should_signal = false;
727b7eaed25SJason Evans }
728b7eaed25SJason Evans
729b7eaed25SJason Evans if (should_signal) {
730b7eaed25SJason Evans info->npages_to_purge_new = 0;
731b7eaed25SJason Evans pthread_cond_signal(&info->cond);
732b7eaed25SJason Evans }
733b7eaed25SJason Evans label_done_unlock2:
734b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx);
735b7eaed25SJason Evans label_done:
736b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
737b7eaed25SJason Evans }
738b7eaed25SJason Evans
739b7eaed25SJason Evans void
740b7eaed25SJason Evans background_thread_prefork0(tsdn_t *tsdn) {
741b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_lock);
742b7eaed25SJason Evans background_thread_enabled_at_fork = background_thread_enabled();
743b7eaed25SJason Evans }
744b7eaed25SJason Evans
745b7eaed25SJason Evans void
746b7eaed25SJason Evans background_thread_prefork1(tsdn_t *tsdn) {
7470ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
748b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
749b7eaed25SJason Evans }
750b7eaed25SJason Evans }
751b7eaed25SJason Evans
752b7eaed25SJason Evans void
753b7eaed25SJason Evans background_thread_postfork_parent(tsdn_t *tsdn) {
7540ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
755b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn,
756b7eaed25SJason Evans &background_thread_info[i].mtx);
757b7eaed25SJason Evans }
758b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
759b7eaed25SJason Evans }
760b7eaed25SJason Evans
761b7eaed25SJason Evans void
762b7eaed25SJason Evans background_thread_postfork_child(tsdn_t *tsdn) {
7630ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
764b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn,
765b7eaed25SJason Evans &background_thread_info[i].mtx);
766b7eaed25SJason Evans }
767b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &background_thread_lock);
768b7eaed25SJason Evans if (!background_thread_enabled_at_fork) {
769b7eaed25SJason Evans return;
770b7eaed25SJason Evans }
771b7eaed25SJason Evans
772b7eaed25SJason Evans /* Clear background_thread state (reset to disabled for child). */
773b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock);
774b7eaed25SJason Evans n_background_threads = 0;
775b7eaed25SJason Evans background_thread_enabled_set(tsdn, false);
7760ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
777b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i];
778b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx);
779b7eaed25SJason Evans info->state = background_thread_stopped;
780b7eaed25SJason Evans int ret = pthread_cond_init(&info->cond, NULL);
781b7eaed25SJason Evans assert(ret == 0);
782b7eaed25SJason Evans background_thread_info_init(tsdn, info);
783b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
784b7eaed25SJason Evans }
785b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock);
786b7eaed25SJason Evans }
787b7eaed25SJason Evans
788b7eaed25SJason Evans bool
789b7eaed25SJason Evans background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
790b7eaed25SJason Evans assert(config_stats);
791b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock);
792b7eaed25SJason Evans if (!background_thread_enabled()) {
793b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock);
794b7eaed25SJason Evans return true;
795b7eaed25SJason Evans }
796b7eaed25SJason Evans
797b7eaed25SJason Evans stats->num_threads = n_background_threads;
798b7eaed25SJason Evans uint64_t num_runs = 0;
799b7eaed25SJason Evans nstime_init(&stats->run_interval, 0);
8000ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
801b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i];
802*c5ad8142SEric van Gyzen if (malloc_mutex_trylock(tsdn, &info->mtx)) {
803*c5ad8142SEric van Gyzen /*
804*c5ad8142SEric van Gyzen * Each background thread run may take a long time;
805*c5ad8142SEric van Gyzen * avoid waiting on the stats if the thread is active.
806*c5ad8142SEric van Gyzen */
807*c5ad8142SEric van Gyzen continue;
808*c5ad8142SEric van Gyzen }
809b7eaed25SJason Evans if (info->state != background_thread_stopped) {
810b7eaed25SJason Evans num_runs += info->tot_n_runs;
811b7eaed25SJason Evans nstime_add(&stats->run_interval, &info->tot_sleep_time);
812b7eaed25SJason Evans }
813b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
814b7eaed25SJason Evans }
815b7eaed25SJason Evans stats->num_runs = num_runs;
816b7eaed25SJason Evans if (num_runs > 0) {
817b7eaed25SJason Evans nstime_idivide(&stats->run_interval, num_runs);
818b7eaed25SJason Evans }
819b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock);
820b7eaed25SJason Evans
821b7eaed25SJason Evans return false;
822b7eaed25SJason Evans }
823b7eaed25SJason Evans
824b7eaed25SJason Evans #undef BACKGROUND_THREAD_NPAGES_THRESHOLD
825b7eaed25SJason Evans #undef BILLION
826b7eaed25SJason Evans #undef BACKGROUND_THREAD_MIN_INTERVAL_NS
827b7eaed25SJason Evans
828*c5ad8142SEric van Gyzen #ifdef JEMALLOC_HAVE_DLSYM
829*c5ad8142SEric van Gyzen #include <dlfcn.h>
830*c5ad8142SEric van Gyzen #endif
831*c5ad8142SEric van Gyzen
8320ef50b4eSJason Evans static bool
8330ef50b4eSJason Evans pthread_create_fptr_init(void) {
8340ef50b4eSJason Evans if (pthread_create_fptr != NULL) {
8350ef50b4eSJason Evans return false;
8360ef50b4eSJason Evans }
837*c5ad8142SEric van Gyzen /*
838*c5ad8142SEric van Gyzen * Try the next symbol first, because 1) when use lazy_lock we have a
839*c5ad8142SEric van Gyzen * wrapper for pthread_create; and 2) application may define its own
840*c5ad8142SEric van Gyzen * wrapper as well (and can call malloc within the wrapper).
841*c5ad8142SEric van Gyzen */
842*c5ad8142SEric van Gyzen #ifdef JEMALLOC_HAVE_DLSYM
8430ef50b4eSJason Evans pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
844*c5ad8142SEric van Gyzen #else
845*c5ad8142SEric van Gyzen pthread_create_fptr = NULL;
846*c5ad8142SEric van Gyzen #endif
8470ef50b4eSJason Evans if (pthread_create_fptr == NULL) {
848*c5ad8142SEric van Gyzen if (config_lazy_lock) {
8490ef50b4eSJason Evans malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
8500ef50b4eSJason Evans "\"pthread_create\")\n");
8510ef50b4eSJason Evans abort();
852f2cb2907SJason Evans } else {
853*c5ad8142SEric van Gyzen /* Fall back to the default symbol. */
854*c5ad8142SEric van Gyzen pthread_create_fptr = pthread_create;
855*c5ad8142SEric van Gyzen }
8560ef50b4eSJason Evans }
8570ef50b4eSJason Evans
8580ef50b4eSJason Evans return false;
8590ef50b4eSJason Evans }
8600ef50b4eSJason Evans
861b7eaed25SJason Evans /*
862b7eaed25SJason Evans * When lazy lock is enabled, we need to make sure setting isthreaded before
863b7eaed25SJason Evans * taking any background_thread locks. This is called early in ctl (instead of
864b7eaed25SJason Evans * wait for the pthread_create calls to trigger) because the mutex is required
865b7eaed25SJason Evans * before creating background threads.
866b7eaed25SJason Evans */
867b7eaed25SJason Evans void
868b7eaed25SJason Evans background_thread_ctl_init(tsdn_t *tsdn) {
869b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
870b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
8710ef50b4eSJason Evans pthread_create_fptr_init();
8720ef50b4eSJason Evans pthread_create_wrapper_init();
873b7eaed25SJason Evans #endif
874b7eaed25SJason Evans }
875b7eaed25SJason Evans
876b7eaed25SJason Evans #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
877b7eaed25SJason Evans
878b7eaed25SJason Evans bool
879b7eaed25SJason Evans background_thread_boot0(void) {
880b7eaed25SJason Evans if (!have_background_thread && opt_background_thread) {
881b7eaed25SJason Evans malloc_printf("<jemalloc>: option background_thread currently "
882b7eaed25SJason Evans "supports pthread only\n");
883b7eaed25SJason Evans return true;
884b7eaed25SJason Evans }
885b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
8860ef50b4eSJason Evans if ((config_lazy_lock || opt_background_thread) &&
8870ef50b4eSJason Evans pthread_create_fptr_init()) {
8880ef50b4eSJason Evans return true;
889b7eaed25SJason Evans }
890b7eaed25SJason Evans #endif
891b7eaed25SJason Evans return false;
892b7eaed25SJason Evans }
893b7eaed25SJason Evans
894b7eaed25SJason Evans bool
background_thread_boot1(tsdn_t * tsdn)895b7eaed25SJason Evans background_thread_boot1(tsdn_t *tsdn) {
896b7eaed25SJason Evans #ifdef JEMALLOC_BACKGROUND_THREAD
897b7eaed25SJason Evans assert(have_background_thread);
898b7eaed25SJason Evans assert(narenas_total_get() > 0);
899b7eaed25SJason Evans
900*c5ad8142SEric van Gyzen if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
901*c5ad8142SEric van Gyzen opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
9020ef50b4eSJason Evans }
9030ef50b4eSJason Evans max_background_threads = opt_max_background_threads;
9040ef50b4eSJason Evans
905b7eaed25SJason Evans background_thread_enabled_set(tsdn, opt_background_thread);
906b7eaed25SJason Evans if (malloc_mutex_init(&background_thread_lock,
907b7eaed25SJason Evans "background_thread_global",
908b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
909b7eaed25SJason Evans malloc_mutex_rank_exclusive)) {
910b7eaed25SJason Evans return true;
911b7eaed25SJason Evans }
912b7eaed25SJason Evans
913b7eaed25SJason Evans background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
9140ef50b4eSJason Evans b0get(), opt_max_background_threads *
9150ef50b4eSJason Evans sizeof(background_thread_info_t), CACHELINE);
916b7eaed25SJason Evans if (background_thread_info == NULL) {
917b7eaed25SJason Evans return true;
918b7eaed25SJason Evans }
919b7eaed25SJason Evans
9200ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) {
921b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i];
922b7eaed25SJason Evans /* Thread mutex is rank_inclusive because of thread0. */
923b7eaed25SJason Evans if (malloc_mutex_init(&info->mtx, "background_thread",
924b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD,
925b7eaed25SJason Evans malloc_mutex_address_ordered)) {
926b7eaed25SJason Evans return true;
927b7eaed25SJason Evans }
928b7eaed25SJason Evans if (pthread_cond_init(&info->cond, NULL)) {
929b7eaed25SJason Evans return true;
930b7eaed25SJason Evans }
931b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx);
932b7eaed25SJason Evans info->state = background_thread_stopped;
933b7eaed25SJason Evans background_thread_info_init(tsdn, info);
934b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx);
935b7eaed25SJason Evans }
936b7eaed25SJason Evans #endif
937b7eaed25SJason Evans
938b7eaed25SJason Evans return false;
939b7eaed25SJason Evans }
940