1b7eaed25SJason Evans #define JEMALLOC_BACKGROUND_THREAD_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6b7eaed25SJason Evans 7b7eaed25SJason Evans /******************************************************************************/ 8b7eaed25SJason Evans /* Data. */ 9b7eaed25SJason Evans 10b7eaed25SJason Evans /* This option should be opt-in only. */ 11b7eaed25SJason Evans #define BACKGROUND_THREAD_DEFAULT false 12b7eaed25SJason Evans /* Read-only after initialization. */ 13b7eaed25SJason Evans bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; 14b7eaed25SJason Evans 15b7eaed25SJason Evans /* Used for thread creation, termination and stats. */ 16b7eaed25SJason Evans malloc_mutex_t background_thread_lock; 17b7eaed25SJason Evans /* Indicates global state. Atomic because decay reads this w/o locking. */ 18b7eaed25SJason Evans atomic_b_t background_thread_enabled_state; 19b7eaed25SJason Evans size_t n_background_threads; 20b7eaed25SJason Evans /* Thread info per-index. */ 21b7eaed25SJason Evans background_thread_info_t *background_thread_info; 22b7eaed25SJason Evans 23b7eaed25SJason Evans /* False if no necessary runtime support. */ 24b7eaed25SJason Evans bool can_enable_background_thread; 25b7eaed25SJason Evans 26b7eaed25SJason Evans /******************************************************************************/ 27b7eaed25SJason Evans 28b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 29b7eaed25SJason Evans #include <dlfcn.h> 30b7eaed25SJason Evans 31b7eaed25SJason Evans static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, 32b7eaed25SJason Evans void *(*)(void *), void *__restrict); 33b7eaed25SJason Evans static pthread_once_t once_control = PTHREAD_ONCE_INIT; 34b7eaed25SJason Evans 35b7eaed25SJason Evans static void 36b7eaed25SJason Evans pthread_create_wrapper_once(void) { 37b7eaed25SJason Evans #ifdef JEMALLOC_LAZY_LOCK 38b7eaed25SJason Evans isthreaded = true; 39b7eaed25SJason Evans #endif 40b7eaed25SJason Evans } 41b7eaed25SJason Evans 42b7eaed25SJason Evans int 43b7eaed25SJason Evans pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, 44b7eaed25SJason Evans void *(*start_routine)(void *), void *__restrict arg) { 45b7eaed25SJason Evans pthread_once(&once_control, pthread_create_wrapper_once); 46b7eaed25SJason Evans 47b7eaed25SJason Evans return pthread_create_fptr(thread, attr, start_routine, arg); 48b7eaed25SJason Evans } 49b7eaed25SJason Evans #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ 50b7eaed25SJason Evans 51b7eaed25SJason Evans #ifndef JEMALLOC_BACKGROUND_THREAD 52b7eaed25SJason Evans #define NOT_REACHED { not_reached(); } 53b7eaed25SJason Evans bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED 54b7eaed25SJason Evans bool background_threads_enable(tsd_t *tsd) NOT_REACHED 55b7eaed25SJason Evans bool background_threads_disable(tsd_t *tsd) NOT_REACHED 56b7eaed25SJason Evans void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, 57b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) NOT_REACHED 58b7eaed25SJason Evans void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED 59b7eaed25SJason Evans void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED 60b7eaed25SJason Evans void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED 61b7eaed25SJason Evans void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED 62b7eaed25SJason Evans bool background_thread_stats_read(tsdn_t *tsdn, 63b7eaed25SJason Evans background_thread_stats_t *stats) NOT_REACHED 64b7eaed25SJason Evans void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED 65b7eaed25SJason Evans #undef NOT_REACHED 66b7eaed25SJason Evans #else 67b7eaed25SJason Evans 68b7eaed25SJason Evans static bool background_thread_enabled_at_fork; 69b7eaed25SJason Evans 70b7eaed25SJason Evans static void 71b7eaed25SJason Evans background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { 72b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 0); 73b7eaed25SJason Evans info->npages_to_purge_new = 0; 74b7eaed25SJason Evans if (config_stats) { 75b7eaed25SJason Evans info->tot_n_runs = 0; 76b7eaed25SJason Evans nstime_init(&info->tot_sleep_time, 0); 77b7eaed25SJason Evans } 78b7eaed25SJason Evans } 79b7eaed25SJason Evans 80b7eaed25SJason Evans static inline bool 81b7eaed25SJason Evans set_current_thread_affinity(UNUSED int cpu) { 82b7eaed25SJason Evans #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) 83b7eaed25SJason Evans cpu_set_t cpuset; 84b7eaed25SJason Evans CPU_ZERO(&cpuset); 85b7eaed25SJason Evans CPU_SET(cpu, &cpuset); 86b7eaed25SJason Evans int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); 87b7eaed25SJason Evans 88b7eaed25SJason Evans return (ret != 0); 89b7eaed25SJason Evans #else 90b7eaed25SJason Evans return false; 91b7eaed25SJason Evans #endif 92b7eaed25SJason Evans } 93b7eaed25SJason Evans 94b7eaed25SJason Evans /* Threshold for determining when to wake up the background thread. */ 95b7eaed25SJason Evans #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) 96b7eaed25SJason Evans #define BILLION UINT64_C(1000000000) 97b7eaed25SJason Evans /* Minimal sleep interval 100 ms. */ 98b7eaed25SJason Evans #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) 99b7eaed25SJason Evans 100b7eaed25SJason Evans static inline size_t 101b7eaed25SJason Evans decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { 102b7eaed25SJason Evans size_t i; 103b7eaed25SJason Evans uint64_t sum = 0; 104b7eaed25SJason Evans for (i = 0; i < interval; i++) { 105b7eaed25SJason Evans sum += decay->backlog[i] * h_steps[i]; 106b7eaed25SJason Evans } 107b7eaed25SJason Evans for (; i < SMOOTHSTEP_NSTEPS; i++) { 108b7eaed25SJason Evans sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); 109b7eaed25SJason Evans } 110b7eaed25SJason Evans 111b7eaed25SJason Evans return (size_t)(sum >> SMOOTHSTEP_BFP); 112b7eaed25SJason Evans } 113b7eaed25SJason Evans 114b7eaed25SJason Evans static uint64_t 115b7eaed25SJason Evans arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, 116b7eaed25SJason Evans extents_t *extents) { 117b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 118b7eaed25SJason Evans /* Use minimal interval if decay is contended. */ 119b7eaed25SJason Evans return BACKGROUND_THREAD_MIN_INTERVAL_NS; 120b7eaed25SJason Evans } 121b7eaed25SJason Evans 122b7eaed25SJason Evans uint64_t interval; 123b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 124b7eaed25SJason Evans if (decay_time <= 0) { 125b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */ 126b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 127b7eaed25SJason Evans goto label_done; 128b7eaed25SJason Evans } 129b7eaed25SJason Evans 130b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval); 131b7eaed25SJason Evans assert(decay_interval_ns > 0); 132b7eaed25SJason Evans size_t npages = extents_npages_get(extents); 133b7eaed25SJason Evans if (npages == 0) { 134b7eaed25SJason Evans unsigned i; 135b7eaed25SJason Evans for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 136b7eaed25SJason Evans if (decay->backlog[i] > 0) { 137b7eaed25SJason Evans break; 138b7eaed25SJason Evans } 139b7eaed25SJason Evans } 140b7eaed25SJason Evans if (i == SMOOTHSTEP_NSTEPS) { 141b7eaed25SJason Evans /* No dirty pages recorded. Sleep indefinitely. */ 142b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 143b7eaed25SJason Evans goto label_done; 144b7eaed25SJason Evans } 145b7eaed25SJason Evans } 146b7eaed25SJason Evans if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { 147b7eaed25SJason Evans /* Use max interval. */ 148b7eaed25SJason Evans interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; 149b7eaed25SJason Evans goto label_done; 150b7eaed25SJason Evans } 151b7eaed25SJason Evans 152b7eaed25SJason Evans size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; 153b7eaed25SJason Evans size_t ub = SMOOTHSTEP_NSTEPS; 154b7eaed25SJason Evans /* Minimal 2 intervals to ensure reaching next epoch deadline. */ 155b7eaed25SJason Evans lb = (lb < 2) ? 2 : lb; 156b7eaed25SJason Evans if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || 157b7eaed25SJason Evans (lb + 2 > ub)) { 158b7eaed25SJason Evans interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; 159b7eaed25SJason Evans goto label_done; 160b7eaed25SJason Evans } 161b7eaed25SJason Evans 162b7eaed25SJason Evans assert(lb + 2 <= ub); 163b7eaed25SJason Evans size_t npurge_lb, npurge_ub; 164b7eaed25SJason Evans npurge_lb = decay_npurge_after_interval(decay, lb); 165b7eaed25SJason Evans if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 166b7eaed25SJason Evans interval = decay_interval_ns * lb; 167b7eaed25SJason Evans goto label_done; 168b7eaed25SJason Evans } 169b7eaed25SJason Evans npurge_ub = decay_npurge_after_interval(decay, ub); 170b7eaed25SJason Evans if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { 171b7eaed25SJason Evans interval = decay_interval_ns * ub; 172b7eaed25SJason Evans goto label_done; 173b7eaed25SJason Evans } 174b7eaed25SJason Evans 175b7eaed25SJason Evans unsigned n_search = 0; 176b7eaed25SJason Evans size_t target, npurge; 177b7eaed25SJason Evans while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) 178b7eaed25SJason Evans && (lb + 2 < ub)) { 179b7eaed25SJason Evans target = (lb + ub) / 2; 180b7eaed25SJason Evans npurge = decay_npurge_after_interval(decay, target); 181b7eaed25SJason Evans if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 182b7eaed25SJason Evans ub = target; 183b7eaed25SJason Evans npurge_ub = npurge; 184b7eaed25SJason Evans } else { 185b7eaed25SJason Evans lb = target; 186b7eaed25SJason Evans npurge_lb = npurge; 187b7eaed25SJason Evans } 188b7eaed25SJason Evans assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); 189b7eaed25SJason Evans } 190b7eaed25SJason Evans interval = decay_interval_ns * (ub + lb) / 2; 191b7eaed25SJason Evans label_done: 192b7eaed25SJason Evans interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? 193b7eaed25SJason Evans BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; 194b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 195b7eaed25SJason Evans 196b7eaed25SJason Evans return interval; 197b7eaed25SJason Evans } 198b7eaed25SJason Evans 199b7eaed25SJason Evans /* Compute purge interval for background threads. */ 200b7eaed25SJason Evans static uint64_t 201b7eaed25SJason Evans arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { 202b7eaed25SJason Evans uint64_t i1, i2; 203b7eaed25SJason Evans i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, 204b7eaed25SJason Evans &arena->extents_dirty); 205b7eaed25SJason Evans if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { 206b7eaed25SJason Evans return i1; 207b7eaed25SJason Evans } 208b7eaed25SJason Evans i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, 209b7eaed25SJason Evans &arena->extents_muzzy); 210b7eaed25SJason Evans 211b7eaed25SJason Evans return i1 < i2 ? i1 : i2; 212b7eaed25SJason Evans } 213b7eaed25SJason Evans 214b7eaed25SJason Evans static void 215b7eaed25SJason Evans background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, 216b7eaed25SJason Evans uint64_t interval) { 217b7eaed25SJason Evans if (config_stats) { 218b7eaed25SJason Evans info->tot_n_runs++; 219b7eaed25SJason Evans } 220b7eaed25SJason Evans info->npages_to_purge_new = 0; 221b7eaed25SJason Evans 222b7eaed25SJason Evans struct timeval tv; 223b7eaed25SJason Evans /* Specific clock required by timedwait. */ 224b7eaed25SJason Evans gettimeofday(&tv, NULL); 225b7eaed25SJason Evans nstime_t before_sleep; 226b7eaed25SJason Evans nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); 227b7eaed25SJason Evans 228b7eaed25SJason Evans int ret; 229b7eaed25SJason Evans if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { 230b7eaed25SJason Evans assert(background_thread_indefinite_sleep(info)); 231b7eaed25SJason Evans ret = pthread_cond_wait(&info->cond, &info->mtx.lock); 232b7eaed25SJason Evans assert(ret == 0); 233b7eaed25SJason Evans } else { 234b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && 235b7eaed25SJason Evans interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); 236b7eaed25SJason Evans /* We need malloc clock (can be different from tv). */ 237b7eaed25SJason Evans nstime_t next_wakeup; 238b7eaed25SJason Evans nstime_init(&next_wakeup, 0); 239b7eaed25SJason Evans nstime_update(&next_wakeup); 240b7eaed25SJason Evans nstime_iadd(&next_wakeup, interval); 241b7eaed25SJason Evans assert(nstime_ns(&next_wakeup) < 242b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 243b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 244b7eaed25SJason Evans nstime_ns(&next_wakeup)); 245b7eaed25SJason Evans 246b7eaed25SJason Evans nstime_t ts_wakeup; 247b7eaed25SJason Evans nstime_copy(&ts_wakeup, &before_sleep); 248b7eaed25SJason Evans nstime_iadd(&ts_wakeup, interval); 249b7eaed25SJason Evans struct timespec ts; 250b7eaed25SJason Evans ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); 251b7eaed25SJason Evans ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); 252b7eaed25SJason Evans 253b7eaed25SJason Evans assert(!background_thread_indefinite_sleep(info)); 254b7eaed25SJason Evans ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); 255b7eaed25SJason Evans assert(ret == ETIMEDOUT || ret == 0); 256b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 257b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 258b7eaed25SJason Evans } 259b7eaed25SJason Evans if (config_stats) { 260b7eaed25SJason Evans gettimeofday(&tv, NULL); 261b7eaed25SJason Evans nstime_t after_sleep; 262b7eaed25SJason Evans nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); 263b7eaed25SJason Evans if (nstime_compare(&after_sleep, &before_sleep) > 0) { 264b7eaed25SJason Evans nstime_subtract(&after_sleep, &before_sleep); 265b7eaed25SJason Evans nstime_add(&info->tot_sleep_time, &after_sleep); 266b7eaed25SJason Evans } 267b7eaed25SJason Evans } 268b7eaed25SJason Evans } 269b7eaed25SJason Evans 270b7eaed25SJason Evans static bool 271b7eaed25SJason Evans background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { 272b7eaed25SJason Evans if (unlikely(info->state == background_thread_paused)) { 273b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 274b7eaed25SJason Evans /* Wait on global lock to update status. */ 275b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 276b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 277b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 278b7eaed25SJason Evans return true; 279b7eaed25SJason Evans } 280b7eaed25SJason Evans 281b7eaed25SJason Evans return false; 282b7eaed25SJason Evans } 283b7eaed25SJason Evans 284b7eaed25SJason Evans static inline void 285b7eaed25SJason Evans background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { 286b7eaed25SJason Evans uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 287b7eaed25SJason Evans unsigned narenas = narenas_total_get(); 288b7eaed25SJason Evans 289b7eaed25SJason Evans for (unsigned i = ind; i < narenas; i += ncpus) { 290b7eaed25SJason Evans arena_t *arena = arena_get(tsdn, i, false); 291b7eaed25SJason Evans if (!arena) { 292b7eaed25SJason Evans continue; 293b7eaed25SJason Evans } 294b7eaed25SJason Evans arena_decay(tsdn, arena, true, false); 295b7eaed25SJason Evans if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { 296b7eaed25SJason Evans /* Min interval will be used. */ 297b7eaed25SJason Evans continue; 298b7eaed25SJason Evans } 299b7eaed25SJason Evans uint64_t interval = arena_decay_compute_purge_interval(tsdn, 300b7eaed25SJason Evans arena); 301b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); 302b7eaed25SJason Evans if (min_interval > interval) { 303b7eaed25SJason Evans min_interval = interval; 304b7eaed25SJason Evans } 305b7eaed25SJason Evans } 306b7eaed25SJason Evans background_thread_sleep(tsdn, info, min_interval); 307b7eaed25SJason Evans } 308b7eaed25SJason Evans 309b7eaed25SJason Evans static bool 310b7eaed25SJason Evans background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { 311b7eaed25SJason Evans if (info == &background_thread_info[0]) { 312b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), 313b7eaed25SJason Evans &background_thread_lock); 314b7eaed25SJason Evans } else { 315b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsd_tsdn(tsd), 316b7eaed25SJason Evans &background_thread_lock); 317b7eaed25SJason Evans } 318b7eaed25SJason Evans 319*8b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 320b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 321b7eaed25SJason Evans bool has_thread; 322b7eaed25SJason Evans assert(info->state != background_thread_paused); 323b7eaed25SJason Evans if (info->state == background_thread_started) { 324b7eaed25SJason Evans has_thread = true; 325b7eaed25SJason Evans info->state = background_thread_stopped; 326b7eaed25SJason Evans pthread_cond_signal(&info->cond); 327b7eaed25SJason Evans } else { 328b7eaed25SJason Evans has_thread = false; 329b7eaed25SJason Evans } 330b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 331b7eaed25SJason Evans 332b7eaed25SJason Evans if (!has_thread) { 333b7eaed25SJason Evans post_reentrancy(tsd); 334b7eaed25SJason Evans return false; 335b7eaed25SJason Evans } 336b7eaed25SJason Evans void *ret; 337b7eaed25SJason Evans if (pthread_join(info->thread, &ret)) { 338b7eaed25SJason Evans post_reentrancy(tsd); 339b7eaed25SJason Evans return true; 340b7eaed25SJason Evans } 341b7eaed25SJason Evans assert(ret == NULL); 342b7eaed25SJason Evans n_background_threads--; 343b7eaed25SJason Evans post_reentrancy(tsd); 344b7eaed25SJason Evans 345b7eaed25SJason Evans return false; 346b7eaed25SJason Evans } 347b7eaed25SJason Evans 348b7eaed25SJason Evans static void *background_thread_entry(void *ind_arg); 349b7eaed25SJason Evans 350*8b2f5aafSJason Evans static int 351*8b2f5aafSJason Evans background_thread_create_signals_masked(pthread_t *thread, 352*8b2f5aafSJason Evans const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { 353*8b2f5aafSJason Evans /* 354*8b2f5aafSJason Evans * Mask signals during thread creation so that the thread inherits 355*8b2f5aafSJason Evans * an empty signal set. 356*8b2f5aafSJason Evans */ 357*8b2f5aafSJason Evans sigset_t set; 358*8b2f5aafSJason Evans sigfillset(&set); 359*8b2f5aafSJason Evans sigset_t oldset; 360*8b2f5aafSJason Evans int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); 361*8b2f5aafSJason Evans if (mask_err != 0) { 362*8b2f5aafSJason Evans return mask_err; 363*8b2f5aafSJason Evans } 364*8b2f5aafSJason Evans int create_err = pthread_create_wrapper(thread, attr, start_routine, 365*8b2f5aafSJason Evans arg); 366*8b2f5aafSJason Evans /* 367*8b2f5aafSJason Evans * Restore the signal mask. Failure to restore the signal mask here 368*8b2f5aafSJason Evans * changes program behavior. 369*8b2f5aafSJason Evans */ 370*8b2f5aafSJason Evans int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); 371*8b2f5aafSJason Evans if (restore_err != 0) { 372*8b2f5aafSJason Evans malloc_printf("<jemalloc>: background thread creation " 373*8b2f5aafSJason Evans "failed (%d), and signal mask restoration failed " 374*8b2f5aafSJason Evans "(%d)\n", create_err, restore_err); 375*8b2f5aafSJason Evans if (opt_abort) { 376*8b2f5aafSJason Evans abort(); 377*8b2f5aafSJason Evans } 378*8b2f5aafSJason Evans } 379*8b2f5aafSJason Evans return create_err; 380*8b2f5aafSJason Evans } 381*8b2f5aafSJason Evans 382b7eaed25SJason Evans static void 383b7eaed25SJason Evans check_background_thread_creation(tsd_t *tsd, unsigned *n_created, 384b7eaed25SJason Evans bool *created_threads) { 385b7eaed25SJason Evans if (likely(*n_created == n_background_threads)) { 386b7eaed25SJason Evans return; 387b7eaed25SJason Evans } 388b7eaed25SJason Evans 389b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx); 390b7eaed25SJason Evans label_restart: 391b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); 392b7eaed25SJason Evans for (unsigned i = 1; i < ncpus; i++) { 393b7eaed25SJason Evans if (created_threads[i]) { 394b7eaed25SJason Evans continue; 395b7eaed25SJason Evans } 396b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 397b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 398b7eaed25SJason Evans assert(info->state != background_thread_paused); 399b7eaed25SJason Evans bool create = (info->state == background_thread_started); 400b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 401b7eaed25SJason Evans if (!create) { 402b7eaed25SJason Evans continue; 403b7eaed25SJason Evans } 404b7eaed25SJason Evans 405b7eaed25SJason Evans /* 406b7eaed25SJason Evans * To avoid deadlock with prefork handlers (which waits for the 407b7eaed25SJason Evans * mutex held here), unlock before calling pthread_create(). 408b7eaed25SJason Evans */ 409b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 410b7eaed25SJason Evans 411*8b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 412*8b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread, 413*8b2f5aafSJason Evans NULL, background_thread_entry, (void *)(uintptr_t)i); 414b7eaed25SJason Evans post_reentrancy(tsd); 415b7eaed25SJason Evans 416b7eaed25SJason Evans if (err == 0) { 417b7eaed25SJason Evans (*n_created)++; 418b7eaed25SJason Evans created_threads[i] = true; 419b7eaed25SJason Evans } else { 420b7eaed25SJason Evans malloc_printf("<jemalloc>: background thread " 421b7eaed25SJason Evans "creation failed (%d)\n", err); 422b7eaed25SJason Evans if (opt_abort) { 423b7eaed25SJason Evans abort(); 424b7eaed25SJason Evans } 425b7eaed25SJason Evans } 426b7eaed25SJason Evans /* Restart since we unlocked. */ 427b7eaed25SJason Evans goto label_restart; 428b7eaed25SJason Evans } 429b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx); 430b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 431b7eaed25SJason Evans } 432b7eaed25SJason Evans 433b7eaed25SJason Evans static void 434b7eaed25SJason Evans background_thread0_work(tsd_t *tsd) { 435b7eaed25SJason Evans /* Thread0 is also responsible for launching / terminating threads. */ 436b7eaed25SJason Evans VARIABLE_ARRAY(bool, created_threads, ncpus); 437b7eaed25SJason Evans unsigned i; 438b7eaed25SJason Evans for (i = 1; i < ncpus; i++) { 439b7eaed25SJason Evans created_threads[i] = false; 440b7eaed25SJason Evans } 441b7eaed25SJason Evans /* Start working, and create more threads when asked. */ 442b7eaed25SJason Evans unsigned n_created = 1; 443b7eaed25SJason Evans while (background_thread_info[0].state != background_thread_stopped) { 444b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd), 445b7eaed25SJason Evans &background_thread_info[0])) { 446b7eaed25SJason Evans continue; 447b7eaed25SJason Evans } 448b7eaed25SJason Evans check_background_thread_creation(tsd, &n_created, 449b7eaed25SJason Evans (bool *)&created_threads); 450b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd), 451b7eaed25SJason Evans &background_thread_info[0], 0); 452b7eaed25SJason Evans } 453b7eaed25SJason Evans 454b7eaed25SJason Evans /* 455b7eaed25SJason Evans * Shut down other threads at exit. Note that the ctl thread is holding 456b7eaed25SJason Evans * the global background_thread mutex (and is waiting) for us. 457b7eaed25SJason Evans */ 458b7eaed25SJason Evans assert(!background_thread_enabled()); 459b7eaed25SJason Evans for (i = 1; i < ncpus; i++) { 460b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 461b7eaed25SJason Evans assert(info->state != background_thread_paused); 462b7eaed25SJason Evans if (created_threads[i]) { 463b7eaed25SJason Evans background_threads_disable_single(tsd, info); 464b7eaed25SJason Evans } else { 465b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 466b7eaed25SJason Evans /* Clear in case the thread wasn't created. */ 467b7eaed25SJason Evans info->state = background_thread_stopped; 468b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 469b7eaed25SJason Evans } 470b7eaed25SJason Evans } 471b7eaed25SJason Evans background_thread_info[0].state = background_thread_stopped; 472b7eaed25SJason Evans assert(n_background_threads == 1); 473b7eaed25SJason Evans } 474b7eaed25SJason Evans 475b7eaed25SJason Evans static void 476b7eaed25SJason Evans background_work(tsd_t *tsd, unsigned ind) { 477b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[ind]; 478b7eaed25SJason Evans 479b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 480b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 481b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 482b7eaed25SJason Evans if (ind == 0) { 483b7eaed25SJason Evans background_thread0_work(tsd); 484b7eaed25SJason Evans } else { 485b7eaed25SJason Evans while (info->state != background_thread_stopped) { 486b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd), 487b7eaed25SJason Evans info)) { 488b7eaed25SJason Evans continue; 489b7eaed25SJason Evans } 490b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd), info, ind); 491b7eaed25SJason Evans } 492b7eaed25SJason Evans } 493b7eaed25SJason Evans assert(info->state == background_thread_stopped); 494b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); 495b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 496b7eaed25SJason Evans } 497b7eaed25SJason Evans 498b7eaed25SJason Evans static void * 499b7eaed25SJason Evans background_thread_entry(void *ind_arg) { 500b7eaed25SJason Evans unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; 501b7eaed25SJason Evans assert(thread_ind < ncpus); 502*8b2f5aafSJason Evans #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP 503*8b2f5aafSJason Evans pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); 504*8b2f5aafSJason Evans #endif 505b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) { 506b7eaed25SJason Evans set_current_thread_affinity((int)thread_ind); 507b7eaed25SJason Evans } 508b7eaed25SJason Evans /* 509b7eaed25SJason Evans * Start periodic background work. We use internal tsd which avoids 510b7eaed25SJason Evans * side effects, for example triggering new arena creation (which in 511b7eaed25SJason Evans * turn triggers another background thread creation). 512b7eaed25SJason Evans */ 513b7eaed25SJason Evans background_work(tsd_internal_fetch(), thread_ind); 514b7eaed25SJason Evans assert(pthread_equal(pthread_self(), 515b7eaed25SJason Evans background_thread_info[thread_ind].thread)); 516b7eaed25SJason Evans 517b7eaed25SJason Evans return NULL; 518b7eaed25SJason Evans } 519b7eaed25SJason Evans 520b7eaed25SJason Evans static void 521b7eaed25SJason Evans background_thread_init(tsd_t *tsd, background_thread_info_t *info) { 522b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 523b7eaed25SJason Evans info->state = background_thread_started; 524b7eaed25SJason Evans background_thread_info_init(tsd_tsdn(tsd), info); 525b7eaed25SJason Evans n_background_threads++; 526b7eaed25SJason Evans } 527b7eaed25SJason Evans 528b7eaed25SJason Evans /* Create a new background thread if needed. */ 529b7eaed25SJason Evans bool 530b7eaed25SJason Evans background_thread_create(tsd_t *tsd, unsigned arena_ind) { 531b7eaed25SJason Evans assert(have_background_thread); 532b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 533b7eaed25SJason Evans 534b7eaed25SJason Evans /* We create at most NCPUs threads. */ 535b7eaed25SJason Evans size_t thread_ind = arena_ind % ncpus; 536b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[thread_ind]; 537b7eaed25SJason Evans 538b7eaed25SJason Evans bool need_new_thread; 539b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 540b7eaed25SJason Evans need_new_thread = background_thread_enabled() && 541b7eaed25SJason Evans (info->state == background_thread_stopped); 542b7eaed25SJason Evans if (need_new_thread) { 543b7eaed25SJason Evans background_thread_init(tsd, info); 544b7eaed25SJason Evans } 545b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 546b7eaed25SJason Evans if (!need_new_thread) { 547b7eaed25SJason Evans return false; 548b7eaed25SJason Evans } 549b7eaed25SJason Evans if (arena_ind != 0) { 550b7eaed25SJason Evans /* Threads are created asynchronously by Thread 0. */ 551b7eaed25SJason Evans background_thread_info_t *t0 = &background_thread_info[0]; 552b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); 553b7eaed25SJason Evans assert(t0->state == background_thread_started); 554b7eaed25SJason Evans pthread_cond_signal(&t0->cond); 555b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); 556b7eaed25SJason Evans 557b7eaed25SJason Evans return false; 558b7eaed25SJason Evans } 559b7eaed25SJason Evans 560*8b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 561b7eaed25SJason Evans /* 562b7eaed25SJason Evans * To avoid complications (besides reentrancy), create internal 563b7eaed25SJason Evans * background threads with the underlying pthread_create. 564b7eaed25SJason Evans */ 565*8b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread, NULL, 566b7eaed25SJason Evans background_thread_entry, (void *)thread_ind); 567b7eaed25SJason Evans post_reentrancy(tsd); 568b7eaed25SJason Evans 569b7eaed25SJason Evans if (err != 0) { 570b7eaed25SJason Evans malloc_printf("<jemalloc>: arena 0 background thread creation " 571b7eaed25SJason Evans "failed (%d)\n", err); 572b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 573b7eaed25SJason Evans info->state = background_thread_stopped; 574b7eaed25SJason Evans n_background_threads--; 575b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 576b7eaed25SJason Evans 577b7eaed25SJason Evans return true; 578b7eaed25SJason Evans } 579b7eaed25SJason Evans 580b7eaed25SJason Evans return false; 581b7eaed25SJason Evans } 582b7eaed25SJason Evans 583b7eaed25SJason Evans bool 584b7eaed25SJason Evans background_threads_enable(tsd_t *tsd) { 585b7eaed25SJason Evans assert(n_background_threads == 0); 586b7eaed25SJason Evans assert(background_thread_enabled()); 587b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 588b7eaed25SJason Evans 589b7eaed25SJason Evans VARIABLE_ARRAY(bool, marked, ncpus); 590b7eaed25SJason Evans unsigned i, nmarked; 591b7eaed25SJason Evans for (i = 0; i < ncpus; i++) { 592b7eaed25SJason Evans marked[i] = false; 593b7eaed25SJason Evans } 594b7eaed25SJason Evans nmarked = 0; 595b7eaed25SJason Evans /* Mark the threads we need to create for thread 0. */ 596b7eaed25SJason Evans unsigned n = narenas_total_get(); 597b7eaed25SJason Evans for (i = 1; i < n; i++) { 598b7eaed25SJason Evans if (marked[i % ncpus] || 599b7eaed25SJason Evans arena_get(tsd_tsdn(tsd), i, false) == NULL) { 600b7eaed25SJason Evans continue; 601b7eaed25SJason Evans } 602b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 603b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 604b7eaed25SJason Evans assert(info->state == background_thread_stopped); 605b7eaed25SJason Evans background_thread_init(tsd, info); 606b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 607b7eaed25SJason Evans marked[i % ncpus] = true; 608b7eaed25SJason Evans if (++nmarked == ncpus) { 609b7eaed25SJason Evans break; 610b7eaed25SJason Evans } 611b7eaed25SJason Evans } 612b7eaed25SJason Evans 613b7eaed25SJason Evans return background_thread_create(tsd, 0); 614b7eaed25SJason Evans } 615b7eaed25SJason Evans 616b7eaed25SJason Evans bool 617b7eaed25SJason Evans background_threads_disable(tsd_t *tsd) { 618b7eaed25SJason Evans assert(!background_thread_enabled()); 619b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 620b7eaed25SJason Evans 621b7eaed25SJason Evans /* Thread 0 will be responsible for terminating other threads. */ 622b7eaed25SJason Evans if (background_threads_disable_single(tsd, 623b7eaed25SJason Evans &background_thread_info[0])) { 624b7eaed25SJason Evans return true; 625b7eaed25SJason Evans } 626b7eaed25SJason Evans assert(n_background_threads == 0); 627b7eaed25SJason Evans 628b7eaed25SJason Evans return false; 629b7eaed25SJason Evans } 630b7eaed25SJason Evans 631b7eaed25SJason Evans /* Check if we need to signal the background thread early. */ 632b7eaed25SJason Evans void 633b7eaed25SJason Evans background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, 634b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) { 635b7eaed25SJason Evans background_thread_info_t *info = arena_background_thread_info_get( 636b7eaed25SJason Evans arena); 637b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &info->mtx)) { 638b7eaed25SJason Evans /* 639b7eaed25SJason Evans * Background thread may hold the mutex for a long period of 640b7eaed25SJason Evans * time. We'd like to avoid the variance on application 641b7eaed25SJason Evans * threads. So keep this non-blocking, and leave the work to a 642b7eaed25SJason Evans * future epoch. 643b7eaed25SJason Evans */ 644b7eaed25SJason Evans return; 645b7eaed25SJason Evans } 646b7eaed25SJason Evans 647b7eaed25SJason Evans if (info->state != background_thread_started) { 648b7eaed25SJason Evans goto label_done; 649b7eaed25SJason Evans } 650b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 651b7eaed25SJason Evans goto label_done; 652b7eaed25SJason Evans } 653b7eaed25SJason Evans 654b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 655b7eaed25SJason Evans if (decay_time <= 0) { 656b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */ 657b7eaed25SJason Evans goto label_done_unlock2; 658b7eaed25SJason Evans } 659b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval); 660b7eaed25SJason Evans assert(decay_interval_ns > 0); 661b7eaed25SJason Evans 662b7eaed25SJason Evans nstime_t diff; 663b7eaed25SJason Evans nstime_init(&diff, background_thread_wakeup_time_get(info)); 664b7eaed25SJason Evans if (nstime_compare(&diff, &decay->epoch) <= 0) { 665b7eaed25SJason Evans goto label_done_unlock2; 666b7eaed25SJason Evans } 667b7eaed25SJason Evans nstime_subtract(&diff, &decay->epoch); 668b7eaed25SJason Evans if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { 669b7eaed25SJason Evans goto label_done_unlock2; 670b7eaed25SJason Evans } 671b7eaed25SJason Evans 672b7eaed25SJason Evans if (npages_new > 0) { 673b7eaed25SJason Evans size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); 674b7eaed25SJason Evans /* 675b7eaed25SJason Evans * Compute how many new pages we would need to purge by the next 676b7eaed25SJason Evans * wakeup, which is used to determine if we should signal the 677b7eaed25SJason Evans * background thread. 678b7eaed25SJason Evans */ 679b7eaed25SJason Evans uint64_t npurge_new; 680b7eaed25SJason Evans if (n_epoch >= SMOOTHSTEP_NSTEPS) { 681b7eaed25SJason Evans npurge_new = npages_new; 682b7eaed25SJason Evans } else { 683b7eaed25SJason Evans uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; 684b7eaed25SJason Evans assert(h_steps_max >= 685b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); 686b7eaed25SJason Evans npurge_new = npages_new * (h_steps_max - 687b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); 688b7eaed25SJason Evans npurge_new >>= SMOOTHSTEP_BFP; 689b7eaed25SJason Evans } 690b7eaed25SJason Evans info->npages_to_purge_new += npurge_new; 691b7eaed25SJason Evans } 692b7eaed25SJason Evans 693b7eaed25SJason Evans bool should_signal; 694b7eaed25SJason Evans if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 695b7eaed25SJason Evans should_signal = true; 696b7eaed25SJason Evans } else if (unlikely(background_thread_indefinite_sleep(info)) && 697b7eaed25SJason Evans (extents_npages_get(&arena->extents_dirty) > 0 || 698b7eaed25SJason Evans extents_npages_get(&arena->extents_muzzy) > 0 || 699b7eaed25SJason Evans info->npages_to_purge_new > 0)) { 700b7eaed25SJason Evans should_signal = true; 701b7eaed25SJason Evans } else { 702b7eaed25SJason Evans should_signal = false; 703b7eaed25SJason Evans } 704b7eaed25SJason Evans 705b7eaed25SJason Evans if (should_signal) { 706b7eaed25SJason Evans info->npages_to_purge_new = 0; 707b7eaed25SJason Evans pthread_cond_signal(&info->cond); 708b7eaed25SJason Evans } 709b7eaed25SJason Evans label_done_unlock2: 710b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 711b7eaed25SJason Evans label_done: 712b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 713b7eaed25SJason Evans } 714b7eaed25SJason Evans 715b7eaed25SJason Evans void 716b7eaed25SJason Evans background_thread_prefork0(tsdn_t *tsdn) { 717b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_lock); 718b7eaed25SJason Evans background_thread_enabled_at_fork = background_thread_enabled(); 719b7eaed25SJason Evans } 720b7eaed25SJason Evans 721b7eaed25SJason Evans void 722b7eaed25SJason Evans background_thread_prefork1(tsdn_t *tsdn) { 723b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 724b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); 725b7eaed25SJason Evans } 726b7eaed25SJason Evans } 727b7eaed25SJason Evans 728b7eaed25SJason Evans void 729b7eaed25SJason Evans background_thread_postfork_parent(tsdn_t *tsdn) { 730b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 731b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, 732b7eaed25SJason Evans &background_thread_info[i].mtx); 733b7eaed25SJason Evans } 734b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &background_thread_lock); 735b7eaed25SJason Evans } 736b7eaed25SJason Evans 737b7eaed25SJason Evans void 738b7eaed25SJason Evans background_thread_postfork_child(tsdn_t *tsdn) { 739b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 740b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, 741b7eaed25SJason Evans &background_thread_info[i].mtx); 742b7eaed25SJason Evans } 743b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &background_thread_lock); 744b7eaed25SJason Evans if (!background_thread_enabled_at_fork) { 745b7eaed25SJason Evans return; 746b7eaed25SJason Evans } 747b7eaed25SJason Evans 748b7eaed25SJason Evans /* Clear background_thread state (reset to disabled for child). */ 749b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 750b7eaed25SJason Evans n_background_threads = 0; 751b7eaed25SJason Evans background_thread_enabled_set(tsdn, false); 752b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 753b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 754b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 755b7eaed25SJason Evans info->state = background_thread_stopped; 756b7eaed25SJason Evans int ret = pthread_cond_init(&info->cond, NULL); 757b7eaed25SJason Evans assert(ret == 0); 758b7eaed25SJason Evans background_thread_info_init(tsdn, info); 759b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 760b7eaed25SJason Evans } 761b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 762b7eaed25SJason Evans } 763b7eaed25SJason Evans 764b7eaed25SJason Evans bool 765b7eaed25SJason Evans background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { 766b7eaed25SJason Evans assert(config_stats); 767b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 768b7eaed25SJason Evans if (!background_thread_enabled()) { 769b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 770b7eaed25SJason Evans return true; 771b7eaed25SJason Evans } 772b7eaed25SJason Evans 773b7eaed25SJason Evans stats->num_threads = n_background_threads; 774b7eaed25SJason Evans uint64_t num_runs = 0; 775b7eaed25SJason Evans nstime_init(&stats->run_interval, 0); 776b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 777b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 778b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 779b7eaed25SJason Evans if (info->state != background_thread_stopped) { 780b7eaed25SJason Evans num_runs += info->tot_n_runs; 781b7eaed25SJason Evans nstime_add(&stats->run_interval, &info->tot_sleep_time); 782b7eaed25SJason Evans } 783b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 784b7eaed25SJason Evans } 785b7eaed25SJason Evans stats->num_runs = num_runs; 786b7eaed25SJason Evans if (num_runs > 0) { 787b7eaed25SJason Evans nstime_idivide(&stats->run_interval, num_runs); 788b7eaed25SJason Evans } 789b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 790b7eaed25SJason Evans 791b7eaed25SJason Evans return false; 792b7eaed25SJason Evans } 793b7eaed25SJason Evans 794b7eaed25SJason Evans #undef BACKGROUND_THREAD_NPAGES_THRESHOLD 795b7eaed25SJason Evans #undef BILLION 796b7eaed25SJason Evans #undef BACKGROUND_THREAD_MIN_INTERVAL_NS 797b7eaed25SJason Evans 798b7eaed25SJason Evans /* 799b7eaed25SJason Evans * When lazy lock is enabled, we need to make sure setting isthreaded before 800b7eaed25SJason Evans * taking any background_thread locks. This is called early in ctl (instead of 801b7eaed25SJason Evans * wait for the pthread_create calls to trigger) because the mutex is required 802b7eaed25SJason Evans * before creating background threads. 803b7eaed25SJason Evans */ 804b7eaed25SJason Evans void 805b7eaed25SJason Evans background_thread_ctl_init(tsdn_t *tsdn) { 806b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); 807b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 808b7eaed25SJason Evans pthread_once(&once_control, pthread_create_wrapper_once); 809b7eaed25SJason Evans #endif 810b7eaed25SJason Evans } 811b7eaed25SJason Evans 812b7eaed25SJason Evans #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ 813b7eaed25SJason Evans 814b7eaed25SJason Evans bool 815b7eaed25SJason Evans background_thread_boot0(void) { 816b7eaed25SJason Evans if (!have_background_thread && opt_background_thread) { 817b7eaed25SJason Evans malloc_printf("<jemalloc>: option background_thread currently " 818b7eaed25SJason Evans "supports pthread only\n"); 819b7eaed25SJason Evans return true; 820b7eaed25SJason Evans } 821b7eaed25SJason Evans 822b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 823b7eaed25SJason Evans pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); 824b7eaed25SJason Evans if (pthread_create_fptr == NULL) { 825b7eaed25SJason Evans can_enable_background_thread = false; 826b7eaed25SJason Evans if (config_lazy_lock || opt_background_thread) { 827b7eaed25SJason Evans malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " 828b7eaed25SJason Evans "\"pthread_create\")\n"); 829b7eaed25SJason Evans abort(); 830b7eaed25SJason Evans } 831b7eaed25SJason Evans } else { 832b7eaed25SJason Evans can_enable_background_thread = true; 833b7eaed25SJason Evans } 834b7eaed25SJason Evans #endif 835b7eaed25SJason Evans return false; 836b7eaed25SJason Evans } 837b7eaed25SJason Evans 838b7eaed25SJason Evans bool 839b7eaed25SJason Evans background_thread_boot1(tsdn_t *tsdn) { 840b7eaed25SJason Evans #ifdef JEMALLOC_BACKGROUND_THREAD 841b7eaed25SJason Evans assert(have_background_thread); 842b7eaed25SJason Evans assert(narenas_total_get() > 0); 843b7eaed25SJason Evans 844b7eaed25SJason Evans background_thread_enabled_set(tsdn, opt_background_thread); 845b7eaed25SJason Evans if (malloc_mutex_init(&background_thread_lock, 846b7eaed25SJason Evans "background_thread_global", 847b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, 848b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 849b7eaed25SJason Evans return true; 850b7eaed25SJason Evans } 851b7eaed25SJason Evans if (opt_background_thread) { 852b7eaed25SJason Evans background_thread_ctl_init(tsdn); 853b7eaed25SJason Evans } 854b7eaed25SJason Evans 855b7eaed25SJason Evans background_thread_info = (background_thread_info_t *)base_alloc(tsdn, 856b7eaed25SJason Evans b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE); 857b7eaed25SJason Evans if (background_thread_info == NULL) { 858b7eaed25SJason Evans return true; 859b7eaed25SJason Evans } 860b7eaed25SJason Evans 861b7eaed25SJason Evans for (unsigned i = 0; i < ncpus; i++) { 862b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 863b7eaed25SJason Evans /* Thread mutex is rank_inclusive because of thread0. */ 864b7eaed25SJason Evans if (malloc_mutex_init(&info->mtx, "background_thread", 865b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD, 866b7eaed25SJason Evans malloc_mutex_address_ordered)) { 867b7eaed25SJason Evans return true; 868b7eaed25SJason Evans } 869b7eaed25SJason Evans if (pthread_cond_init(&info->cond, NULL)) { 870b7eaed25SJason Evans return true; 871b7eaed25SJason Evans } 872b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 873b7eaed25SJason Evans info->state = background_thread_stopped; 874b7eaed25SJason Evans background_thread_info_init(tsdn, info); 875b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 876b7eaed25SJason Evans } 877b7eaed25SJason Evans #endif 878b7eaed25SJason Evans 879b7eaed25SJason Evans return false; 880b7eaed25SJason Evans } 881