1b7eaed25SJason Evans #define JEMALLOC_BACKGROUND_THREAD_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6b7eaed25SJason Evans 7b7eaed25SJason Evans /******************************************************************************/ 8b7eaed25SJason Evans /* Data. */ 9b7eaed25SJason Evans 10b7eaed25SJason Evans /* This option should be opt-in only. */ 11b7eaed25SJason Evans #define BACKGROUND_THREAD_DEFAULT false 12b7eaed25SJason Evans /* Read-only after initialization. */ 13b7eaed25SJason Evans bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; 14*0ef50b4eSJason Evans size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT; 15b7eaed25SJason Evans 16b7eaed25SJason Evans /* Used for thread creation, termination and stats. */ 17b7eaed25SJason Evans malloc_mutex_t background_thread_lock; 18b7eaed25SJason Evans /* Indicates global state. Atomic because decay reads this w/o locking. */ 19b7eaed25SJason Evans atomic_b_t background_thread_enabled_state; 20b7eaed25SJason Evans size_t n_background_threads; 21*0ef50b4eSJason Evans size_t max_background_threads; 22b7eaed25SJason Evans /* Thread info per-index. */ 23b7eaed25SJason Evans background_thread_info_t *background_thread_info; 24b7eaed25SJason Evans 25b7eaed25SJason Evans /* False if no necessary runtime support. */ 26b7eaed25SJason Evans bool can_enable_background_thread; 27b7eaed25SJason Evans 28b7eaed25SJason Evans /******************************************************************************/ 29b7eaed25SJason Evans 30b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 31b7eaed25SJason Evans #include <dlfcn.h> 32b7eaed25SJason Evans 33b7eaed25SJason Evans static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, 34b7eaed25SJason Evans void *(*)(void *), void *__restrict); 35b7eaed25SJason Evans 36b7eaed25SJason Evans static void 37*0ef50b4eSJason Evans pthread_create_wrapper_init(void) { 38b7eaed25SJason Evans #ifdef JEMALLOC_LAZY_LOCK 39*0ef50b4eSJason Evans if (!isthreaded) { 40b7eaed25SJason Evans isthreaded = true; 41*0ef50b4eSJason Evans } 42b7eaed25SJason Evans #endif 43b7eaed25SJason Evans } 44b7eaed25SJason Evans 45b7eaed25SJason Evans int 46b7eaed25SJason Evans pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, 47b7eaed25SJason Evans void *(*start_routine)(void *), void *__restrict arg) { 48*0ef50b4eSJason Evans pthread_create_wrapper_init(); 49b7eaed25SJason Evans 50b7eaed25SJason Evans return pthread_create_fptr(thread, attr, start_routine, arg); 51b7eaed25SJason Evans } 52b7eaed25SJason Evans #endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ 53b7eaed25SJason Evans 54b7eaed25SJason Evans #ifndef JEMALLOC_BACKGROUND_THREAD 55b7eaed25SJason Evans #define NOT_REACHED { not_reached(); } 56b7eaed25SJason Evans bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED 57b7eaed25SJason Evans bool background_threads_enable(tsd_t *tsd) NOT_REACHED 58b7eaed25SJason Evans bool background_threads_disable(tsd_t *tsd) NOT_REACHED 59b7eaed25SJason Evans void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, 60b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) NOT_REACHED 61b7eaed25SJason Evans void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED 62b7eaed25SJason Evans void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED 63b7eaed25SJason Evans void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED 64b7eaed25SJason Evans void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED 65b7eaed25SJason Evans bool background_thread_stats_read(tsdn_t *tsdn, 66b7eaed25SJason Evans background_thread_stats_t *stats) NOT_REACHED 67b7eaed25SJason Evans void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED 68b7eaed25SJason Evans #undef NOT_REACHED 69b7eaed25SJason Evans #else 70b7eaed25SJason Evans 71b7eaed25SJason Evans static bool background_thread_enabled_at_fork; 72b7eaed25SJason Evans 73b7eaed25SJason Evans static void 74b7eaed25SJason Evans background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { 75b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 0); 76b7eaed25SJason Evans info->npages_to_purge_new = 0; 77b7eaed25SJason Evans if (config_stats) { 78b7eaed25SJason Evans info->tot_n_runs = 0; 79b7eaed25SJason Evans nstime_init(&info->tot_sleep_time, 0); 80b7eaed25SJason Evans } 81b7eaed25SJason Evans } 82b7eaed25SJason Evans 83b7eaed25SJason Evans static inline bool 84b7eaed25SJason Evans set_current_thread_affinity(UNUSED int cpu) { 85b7eaed25SJason Evans #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) 86b7eaed25SJason Evans cpu_set_t cpuset; 87b7eaed25SJason Evans CPU_ZERO(&cpuset); 88b7eaed25SJason Evans CPU_SET(cpu, &cpuset); 89b7eaed25SJason Evans int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); 90b7eaed25SJason Evans 91b7eaed25SJason Evans return (ret != 0); 92b7eaed25SJason Evans #else 93b7eaed25SJason Evans return false; 94b7eaed25SJason Evans #endif 95b7eaed25SJason Evans } 96b7eaed25SJason Evans 97b7eaed25SJason Evans /* Threshold for determining when to wake up the background thread. */ 98b7eaed25SJason Evans #define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) 99b7eaed25SJason Evans #define BILLION UINT64_C(1000000000) 100b7eaed25SJason Evans /* Minimal sleep interval 100 ms. */ 101b7eaed25SJason Evans #define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) 102b7eaed25SJason Evans 103b7eaed25SJason Evans static inline size_t 104b7eaed25SJason Evans decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { 105b7eaed25SJason Evans size_t i; 106b7eaed25SJason Evans uint64_t sum = 0; 107b7eaed25SJason Evans for (i = 0; i < interval; i++) { 108b7eaed25SJason Evans sum += decay->backlog[i] * h_steps[i]; 109b7eaed25SJason Evans } 110b7eaed25SJason Evans for (; i < SMOOTHSTEP_NSTEPS; i++) { 111b7eaed25SJason Evans sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); 112b7eaed25SJason Evans } 113b7eaed25SJason Evans 114b7eaed25SJason Evans return (size_t)(sum >> SMOOTHSTEP_BFP); 115b7eaed25SJason Evans } 116b7eaed25SJason Evans 117b7eaed25SJason Evans static uint64_t 118b7eaed25SJason Evans arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, 119b7eaed25SJason Evans extents_t *extents) { 120b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 121b7eaed25SJason Evans /* Use minimal interval if decay is contended. */ 122b7eaed25SJason Evans return BACKGROUND_THREAD_MIN_INTERVAL_NS; 123b7eaed25SJason Evans } 124b7eaed25SJason Evans 125b7eaed25SJason Evans uint64_t interval; 126b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 127b7eaed25SJason Evans if (decay_time <= 0) { 128b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */ 129b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 130b7eaed25SJason Evans goto label_done; 131b7eaed25SJason Evans } 132b7eaed25SJason Evans 133b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval); 134b7eaed25SJason Evans assert(decay_interval_ns > 0); 135b7eaed25SJason Evans size_t npages = extents_npages_get(extents); 136b7eaed25SJason Evans if (npages == 0) { 137b7eaed25SJason Evans unsigned i; 138b7eaed25SJason Evans for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 139b7eaed25SJason Evans if (decay->backlog[i] > 0) { 140b7eaed25SJason Evans break; 141b7eaed25SJason Evans } 142b7eaed25SJason Evans } 143b7eaed25SJason Evans if (i == SMOOTHSTEP_NSTEPS) { 144b7eaed25SJason Evans /* No dirty pages recorded. Sleep indefinitely. */ 145b7eaed25SJason Evans interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 146b7eaed25SJason Evans goto label_done; 147b7eaed25SJason Evans } 148b7eaed25SJason Evans } 149b7eaed25SJason Evans if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { 150b7eaed25SJason Evans /* Use max interval. */ 151b7eaed25SJason Evans interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; 152b7eaed25SJason Evans goto label_done; 153b7eaed25SJason Evans } 154b7eaed25SJason Evans 155b7eaed25SJason Evans size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; 156b7eaed25SJason Evans size_t ub = SMOOTHSTEP_NSTEPS; 157b7eaed25SJason Evans /* Minimal 2 intervals to ensure reaching next epoch deadline. */ 158b7eaed25SJason Evans lb = (lb < 2) ? 2 : lb; 159b7eaed25SJason Evans if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || 160b7eaed25SJason Evans (lb + 2 > ub)) { 161b7eaed25SJason Evans interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; 162b7eaed25SJason Evans goto label_done; 163b7eaed25SJason Evans } 164b7eaed25SJason Evans 165b7eaed25SJason Evans assert(lb + 2 <= ub); 166b7eaed25SJason Evans size_t npurge_lb, npurge_ub; 167b7eaed25SJason Evans npurge_lb = decay_npurge_after_interval(decay, lb); 168b7eaed25SJason Evans if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 169b7eaed25SJason Evans interval = decay_interval_ns * lb; 170b7eaed25SJason Evans goto label_done; 171b7eaed25SJason Evans } 172b7eaed25SJason Evans npurge_ub = decay_npurge_after_interval(decay, ub); 173b7eaed25SJason Evans if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { 174b7eaed25SJason Evans interval = decay_interval_ns * ub; 175b7eaed25SJason Evans goto label_done; 176b7eaed25SJason Evans } 177b7eaed25SJason Evans 178b7eaed25SJason Evans unsigned n_search = 0; 179b7eaed25SJason Evans size_t target, npurge; 180b7eaed25SJason Evans while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) 181b7eaed25SJason Evans && (lb + 2 < ub)) { 182b7eaed25SJason Evans target = (lb + ub) / 2; 183b7eaed25SJason Evans npurge = decay_npurge_after_interval(decay, target); 184b7eaed25SJason Evans if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 185b7eaed25SJason Evans ub = target; 186b7eaed25SJason Evans npurge_ub = npurge; 187b7eaed25SJason Evans } else { 188b7eaed25SJason Evans lb = target; 189b7eaed25SJason Evans npurge_lb = npurge; 190b7eaed25SJason Evans } 191b7eaed25SJason Evans assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); 192b7eaed25SJason Evans } 193b7eaed25SJason Evans interval = decay_interval_ns * (ub + lb) / 2; 194b7eaed25SJason Evans label_done: 195b7eaed25SJason Evans interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? 196b7eaed25SJason Evans BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; 197b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 198b7eaed25SJason Evans 199b7eaed25SJason Evans return interval; 200b7eaed25SJason Evans } 201b7eaed25SJason Evans 202b7eaed25SJason Evans /* Compute purge interval for background threads. */ 203b7eaed25SJason Evans static uint64_t 204b7eaed25SJason Evans arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { 205b7eaed25SJason Evans uint64_t i1, i2; 206b7eaed25SJason Evans i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, 207b7eaed25SJason Evans &arena->extents_dirty); 208b7eaed25SJason Evans if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { 209b7eaed25SJason Evans return i1; 210b7eaed25SJason Evans } 211b7eaed25SJason Evans i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, 212b7eaed25SJason Evans &arena->extents_muzzy); 213b7eaed25SJason Evans 214b7eaed25SJason Evans return i1 < i2 ? i1 : i2; 215b7eaed25SJason Evans } 216b7eaed25SJason Evans 217b7eaed25SJason Evans static void 218b7eaed25SJason Evans background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, 219b7eaed25SJason Evans uint64_t interval) { 220b7eaed25SJason Evans if (config_stats) { 221b7eaed25SJason Evans info->tot_n_runs++; 222b7eaed25SJason Evans } 223b7eaed25SJason Evans info->npages_to_purge_new = 0; 224b7eaed25SJason Evans 225b7eaed25SJason Evans struct timeval tv; 226b7eaed25SJason Evans /* Specific clock required by timedwait. */ 227b7eaed25SJason Evans gettimeofday(&tv, NULL); 228b7eaed25SJason Evans nstime_t before_sleep; 229b7eaed25SJason Evans nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); 230b7eaed25SJason Evans 231b7eaed25SJason Evans int ret; 232b7eaed25SJason Evans if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { 233b7eaed25SJason Evans assert(background_thread_indefinite_sleep(info)); 234b7eaed25SJason Evans ret = pthread_cond_wait(&info->cond, &info->mtx.lock); 235b7eaed25SJason Evans assert(ret == 0); 236b7eaed25SJason Evans } else { 237b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && 238b7eaed25SJason Evans interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); 239b7eaed25SJason Evans /* We need malloc clock (can be different from tv). */ 240b7eaed25SJason Evans nstime_t next_wakeup; 241b7eaed25SJason Evans nstime_init(&next_wakeup, 0); 242b7eaed25SJason Evans nstime_update(&next_wakeup); 243b7eaed25SJason Evans nstime_iadd(&next_wakeup, interval); 244b7eaed25SJason Evans assert(nstime_ns(&next_wakeup) < 245b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 246b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 247b7eaed25SJason Evans nstime_ns(&next_wakeup)); 248b7eaed25SJason Evans 249b7eaed25SJason Evans nstime_t ts_wakeup; 250b7eaed25SJason Evans nstime_copy(&ts_wakeup, &before_sleep); 251b7eaed25SJason Evans nstime_iadd(&ts_wakeup, interval); 252b7eaed25SJason Evans struct timespec ts; 253b7eaed25SJason Evans ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); 254b7eaed25SJason Evans ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); 255b7eaed25SJason Evans 256b7eaed25SJason Evans assert(!background_thread_indefinite_sleep(info)); 257b7eaed25SJason Evans ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); 258b7eaed25SJason Evans assert(ret == ETIMEDOUT || ret == 0); 259b7eaed25SJason Evans background_thread_wakeup_time_set(tsdn, info, 260b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 261b7eaed25SJason Evans } 262b7eaed25SJason Evans if (config_stats) { 263b7eaed25SJason Evans gettimeofday(&tv, NULL); 264b7eaed25SJason Evans nstime_t after_sleep; 265b7eaed25SJason Evans nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); 266b7eaed25SJason Evans if (nstime_compare(&after_sleep, &before_sleep) > 0) { 267b7eaed25SJason Evans nstime_subtract(&after_sleep, &before_sleep); 268b7eaed25SJason Evans nstime_add(&info->tot_sleep_time, &after_sleep); 269b7eaed25SJason Evans } 270b7eaed25SJason Evans } 271b7eaed25SJason Evans } 272b7eaed25SJason Evans 273b7eaed25SJason Evans static bool 274b7eaed25SJason Evans background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { 275b7eaed25SJason Evans if (unlikely(info->state == background_thread_paused)) { 276b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 277b7eaed25SJason Evans /* Wait on global lock to update status. */ 278b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 279b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 280b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 281b7eaed25SJason Evans return true; 282b7eaed25SJason Evans } 283b7eaed25SJason Evans 284b7eaed25SJason Evans return false; 285b7eaed25SJason Evans } 286b7eaed25SJason Evans 287b7eaed25SJason Evans static inline void 288b7eaed25SJason Evans background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { 289b7eaed25SJason Evans uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; 290b7eaed25SJason Evans unsigned narenas = narenas_total_get(); 291b7eaed25SJason Evans 292*0ef50b4eSJason Evans for (unsigned i = ind; i < narenas; i += max_background_threads) { 293b7eaed25SJason Evans arena_t *arena = arena_get(tsdn, i, false); 294b7eaed25SJason Evans if (!arena) { 295b7eaed25SJason Evans continue; 296b7eaed25SJason Evans } 297b7eaed25SJason Evans arena_decay(tsdn, arena, true, false); 298b7eaed25SJason Evans if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { 299b7eaed25SJason Evans /* Min interval will be used. */ 300b7eaed25SJason Evans continue; 301b7eaed25SJason Evans } 302b7eaed25SJason Evans uint64_t interval = arena_decay_compute_purge_interval(tsdn, 303b7eaed25SJason Evans arena); 304b7eaed25SJason Evans assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); 305b7eaed25SJason Evans if (min_interval > interval) { 306b7eaed25SJason Evans min_interval = interval; 307b7eaed25SJason Evans } 308b7eaed25SJason Evans } 309b7eaed25SJason Evans background_thread_sleep(tsdn, info, min_interval); 310b7eaed25SJason Evans } 311b7eaed25SJason Evans 312b7eaed25SJason Evans static bool 313b7eaed25SJason Evans background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { 314b7eaed25SJason Evans if (info == &background_thread_info[0]) { 315b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), 316b7eaed25SJason Evans &background_thread_lock); 317b7eaed25SJason Evans } else { 318b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsd_tsdn(tsd), 319b7eaed25SJason Evans &background_thread_lock); 320b7eaed25SJason Evans } 321b7eaed25SJason Evans 3228b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 323b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 324b7eaed25SJason Evans bool has_thread; 325b7eaed25SJason Evans assert(info->state != background_thread_paused); 326b7eaed25SJason Evans if (info->state == background_thread_started) { 327b7eaed25SJason Evans has_thread = true; 328b7eaed25SJason Evans info->state = background_thread_stopped; 329b7eaed25SJason Evans pthread_cond_signal(&info->cond); 330b7eaed25SJason Evans } else { 331b7eaed25SJason Evans has_thread = false; 332b7eaed25SJason Evans } 333b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 334b7eaed25SJason Evans 335b7eaed25SJason Evans if (!has_thread) { 336b7eaed25SJason Evans post_reentrancy(tsd); 337b7eaed25SJason Evans return false; 338b7eaed25SJason Evans } 339b7eaed25SJason Evans void *ret; 340b7eaed25SJason Evans if (pthread_join(info->thread, &ret)) { 341b7eaed25SJason Evans post_reentrancy(tsd); 342b7eaed25SJason Evans return true; 343b7eaed25SJason Evans } 344b7eaed25SJason Evans assert(ret == NULL); 345b7eaed25SJason Evans n_background_threads--; 346b7eaed25SJason Evans post_reentrancy(tsd); 347b7eaed25SJason Evans 348b7eaed25SJason Evans return false; 349b7eaed25SJason Evans } 350b7eaed25SJason Evans 351b7eaed25SJason Evans static void *background_thread_entry(void *ind_arg); 352b7eaed25SJason Evans 3538b2f5aafSJason Evans static int 3548b2f5aafSJason Evans background_thread_create_signals_masked(pthread_t *thread, 3558b2f5aafSJason Evans const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { 3568b2f5aafSJason Evans /* 3578b2f5aafSJason Evans * Mask signals during thread creation so that the thread inherits 3588b2f5aafSJason Evans * an empty signal set. 3598b2f5aafSJason Evans */ 3608b2f5aafSJason Evans sigset_t set; 3618b2f5aafSJason Evans sigfillset(&set); 3628b2f5aafSJason Evans sigset_t oldset; 3638b2f5aafSJason Evans int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); 3648b2f5aafSJason Evans if (mask_err != 0) { 3658b2f5aafSJason Evans return mask_err; 3668b2f5aafSJason Evans } 3678b2f5aafSJason Evans int create_err = pthread_create_wrapper(thread, attr, start_routine, 3688b2f5aafSJason Evans arg); 3698b2f5aafSJason Evans /* 3708b2f5aafSJason Evans * Restore the signal mask. Failure to restore the signal mask here 3718b2f5aafSJason Evans * changes program behavior. 3728b2f5aafSJason Evans */ 3738b2f5aafSJason Evans int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); 3748b2f5aafSJason Evans if (restore_err != 0) { 3758b2f5aafSJason Evans malloc_printf("<jemalloc>: background thread creation " 3768b2f5aafSJason Evans "failed (%d), and signal mask restoration failed " 3778b2f5aafSJason Evans "(%d)\n", create_err, restore_err); 3788b2f5aafSJason Evans if (opt_abort) { 3798b2f5aafSJason Evans abort(); 3808b2f5aafSJason Evans } 3818b2f5aafSJason Evans } 3828b2f5aafSJason Evans return create_err; 3838b2f5aafSJason Evans } 3848b2f5aafSJason Evans 385*0ef50b4eSJason Evans static bool 386b7eaed25SJason Evans check_background_thread_creation(tsd_t *tsd, unsigned *n_created, 387b7eaed25SJason Evans bool *created_threads) { 388*0ef50b4eSJason Evans bool ret = false; 389b7eaed25SJason Evans if (likely(*n_created == n_background_threads)) { 390*0ef50b4eSJason Evans return ret; 391b7eaed25SJason Evans } 392b7eaed25SJason Evans 393*0ef50b4eSJason Evans tsdn_t *tsdn = tsd_tsdn(tsd); 394*0ef50b4eSJason Evans malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); 395*0ef50b4eSJason Evans for (unsigned i = 1; i < max_background_threads; i++) { 396b7eaed25SJason Evans if (created_threads[i]) { 397b7eaed25SJason Evans continue; 398b7eaed25SJason Evans } 399b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 400*0ef50b4eSJason Evans malloc_mutex_lock(tsdn, &info->mtx); 401*0ef50b4eSJason Evans /* 402*0ef50b4eSJason Evans * In case of the background_thread_paused state because of 403*0ef50b4eSJason Evans * arena reset, delay the creation. 404*0ef50b4eSJason Evans */ 405b7eaed25SJason Evans bool create = (info->state == background_thread_started); 406*0ef50b4eSJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 407b7eaed25SJason Evans if (!create) { 408b7eaed25SJason Evans continue; 409b7eaed25SJason Evans } 410b7eaed25SJason Evans 4118b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 4128b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread, 4138b2f5aafSJason Evans NULL, background_thread_entry, (void *)(uintptr_t)i); 414b7eaed25SJason Evans post_reentrancy(tsd); 415b7eaed25SJason Evans 416b7eaed25SJason Evans if (err == 0) { 417b7eaed25SJason Evans (*n_created)++; 418b7eaed25SJason Evans created_threads[i] = true; 419b7eaed25SJason Evans } else { 420b7eaed25SJason Evans malloc_printf("<jemalloc>: background thread " 421b7eaed25SJason Evans "creation failed (%d)\n", err); 422b7eaed25SJason Evans if (opt_abort) { 423b7eaed25SJason Evans abort(); 424b7eaed25SJason Evans } 425b7eaed25SJason Evans } 426*0ef50b4eSJason Evans /* Return to restart the loop since we unlocked. */ 427*0ef50b4eSJason Evans ret = true; 428*0ef50b4eSJason Evans break; 429b7eaed25SJason Evans } 430*0ef50b4eSJason Evans malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); 431*0ef50b4eSJason Evans 432*0ef50b4eSJason Evans return ret; 433b7eaed25SJason Evans } 434b7eaed25SJason Evans 435b7eaed25SJason Evans static void 436b7eaed25SJason Evans background_thread0_work(tsd_t *tsd) { 437b7eaed25SJason Evans /* Thread0 is also responsible for launching / terminating threads. */ 438*0ef50b4eSJason Evans VARIABLE_ARRAY(bool, created_threads, max_background_threads); 439b7eaed25SJason Evans unsigned i; 440*0ef50b4eSJason Evans for (i = 1; i < max_background_threads; i++) { 441b7eaed25SJason Evans created_threads[i] = false; 442b7eaed25SJason Evans } 443b7eaed25SJason Evans /* Start working, and create more threads when asked. */ 444b7eaed25SJason Evans unsigned n_created = 1; 445b7eaed25SJason Evans while (background_thread_info[0].state != background_thread_stopped) { 446b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd), 447b7eaed25SJason Evans &background_thread_info[0])) { 448b7eaed25SJason Evans continue; 449b7eaed25SJason Evans } 450*0ef50b4eSJason Evans if (check_background_thread_creation(tsd, &n_created, 451*0ef50b4eSJason Evans (bool *)&created_threads)) { 452*0ef50b4eSJason Evans continue; 453*0ef50b4eSJason Evans } 454b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd), 455b7eaed25SJason Evans &background_thread_info[0], 0); 456b7eaed25SJason Evans } 457b7eaed25SJason Evans 458b7eaed25SJason Evans /* 459b7eaed25SJason Evans * Shut down other threads at exit. Note that the ctl thread is holding 460b7eaed25SJason Evans * the global background_thread mutex (and is waiting) for us. 461b7eaed25SJason Evans */ 462b7eaed25SJason Evans assert(!background_thread_enabled()); 463*0ef50b4eSJason Evans for (i = 1; i < max_background_threads; i++) { 464b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 465b7eaed25SJason Evans assert(info->state != background_thread_paused); 466b7eaed25SJason Evans if (created_threads[i]) { 467b7eaed25SJason Evans background_threads_disable_single(tsd, info); 468b7eaed25SJason Evans } else { 469b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 470*0ef50b4eSJason Evans if (info->state != background_thread_stopped) { 471*0ef50b4eSJason Evans /* The thread was not created. */ 472*0ef50b4eSJason Evans assert(info->state == 473*0ef50b4eSJason Evans background_thread_started); 474*0ef50b4eSJason Evans n_background_threads--; 475b7eaed25SJason Evans info->state = background_thread_stopped; 476*0ef50b4eSJason Evans } 477b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 478b7eaed25SJason Evans } 479b7eaed25SJason Evans } 480b7eaed25SJason Evans background_thread_info[0].state = background_thread_stopped; 481b7eaed25SJason Evans assert(n_background_threads == 1); 482b7eaed25SJason Evans } 483b7eaed25SJason Evans 484b7eaed25SJason Evans static void 485b7eaed25SJason Evans background_work(tsd_t *tsd, unsigned ind) { 486b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[ind]; 487b7eaed25SJason Evans 488b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 489b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 490b7eaed25SJason Evans BACKGROUND_THREAD_INDEFINITE_SLEEP); 491b7eaed25SJason Evans if (ind == 0) { 492b7eaed25SJason Evans background_thread0_work(tsd); 493b7eaed25SJason Evans } else { 494b7eaed25SJason Evans while (info->state != background_thread_stopped) { 495b7eaed25SJason Evans if (background_thread_pause_check(tsd_tsdn(tsd), 496b7eaed25SJason Evans info)) { 497b7eaed25SJason Evans continue; 498b7eaed25SJason Evans } 499b7eaed25SJason Evans background_work_sleep_once(tsd_tsdn(tsd), info, ind); 500b7eaed25SJason Evans } 501b7eaed25SJason Evans } 502b7eaed25SJason Evans assert(info->state == background_thread_stopped); 503b7eaed25SJason Evans background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); 504b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 505b7eaed25SJason Evans } 506b7eaed25SJason Evans 507b7eaed25SJason Evans static void * 508b7eaed25SJason Evans background_thread_entry(void *ind_arg) { 509b7eaed25SJason Evans unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; 510*0ef50b4eSJason Evans assert(thread_ind < max_background_threads); 5118b2f5aafSJason Evans #ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP 5128b2f5aafSJason Evans pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); 5138b2f5aafSJason Evans #endif 514b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) { 515b7eaed25SJason Evans set_current_thread_affinity((int)thread_ind); 516b7eaed25SJason Evans } 517b7eaed25SJason Evans /* 518b7eaed25SJason Evans * Start periodic background work. We use internal tsd which avoids 519b7eaed25SJason Evans * side effects, for example triggering new arena creation (which in 520b7eaed25SJason Evans * turn triggers another background thread creation). 521b7eaed25SJason Evans */ 522b7eaed25SJason Evans background_work(tsd_internal_fetch(), thread_ind); 523b7eaed25SJason Evans assert(pthread_equal(pthread_self(), 524b7eaed25SJason Evans background_thread_info[thread_ind].thread)); 525b7eaed25SJason Evans 526b7eaed25SJason Evans return NULL; 527b7eaed25SJason Evans } 528b7eaed25SJason Evans 529b7eaed25SJason Evans static void 530b7eaed25SJason Evans background_thread_init(tsd_t *tsd, background_thread_info_t *info) { 531b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 532b7eaed25SJason Evans info->state = background_thread_started; 533b7eaed25SJason Evans background_thread_info_init(tsd_tsdn(tsd), info); 534b7eaed25SJason Evans n_background_threads++; 535b7eaed25SJason Evans } 536b7eaed25SJason Evans 537b7eaed25SJason Evans /* Create a new background thread if needed. */ 538b7eaed25SJason Evans bool 539b7eaed25SJason Evans background_thread_create(tsd_t *tsd, unsigned arena_ind) { 540b7eaed25SJason Evans assert(have_background_thread); 541b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 542b7eaed25SJason Evans 543b7eaed25SJason Evans /* We create at most NCPUs threads. */ 544*0ef50b4eSJason Evans size_t thread_ind = arena_ind % max_background_threads; 545b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[thread_ind]; 546b7eaed25SJason Evans 547b7eaed25SJason Evans bool need_new_thread; 548b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 549b7eaed25SJason Evans need_new_thread = background_thread_enabled() && 550b7eaed25SJason Evans (info->state == background_thread_stopped); 551b7eaed25SJason Evans if (need_new_thread) { 552b7eaed25SJason Evans background_thread_init(tsd, info); 553b7eaed25SJason Evans } 554b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 555b7eaed25SJason Evans if (!need_new_thread) { 556b7eaed25SJason Evans return false; 557b7eaed25SJason Evans } 558b7eaed25SJason Evans if (arena_ind != 0) { 559b7eaed25SJason Evans /* Threads are created asynchronously by Thread 0. */ 560b7eaed25SJason Evans background_thread_info_t *t0 = &background_thread_info[0]; 561b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); 562b7eaed25SJason Evans assert(t0->state == background_thread_started); 563b7eaed25SJason Evans pthread_cond_signal(&t0->cond); 564b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); 565b7eaed25SJason Evans 566b7eaed25SJason Evans return false; 567b7eaed25SJason Evans } 568b7eaed25SJason Evans 5698b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 570b7eaed25SJason Evans /* 571b7eaed25SJason Evans * To avoid complications (besides reentrancy), create internal 572b7eaed25SJason Evans * background threads with the underlying pthread_create. 573b7eaed25SJason Evans */ 5748b2f5aafSJason Evans int err = background_thread_create_signals_masked(&info->thread, NULL, 575b7eaed25SJason Evans background_thread_entry, (void *)thread_ind); 576b7eaed25SJason Evans post_reentrancy(tsd); 577b7eaed25SJason Evans 578b7eaed25SJason Evans if (err != 0) { 579b7eaed25SJason Evans malloc_printf("<jemalloc>: arena 0 background thread creation " 580b7eaed25SJason Evans "failed (%d)\n", err); 581b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 582b7eaed25SJason Evans info->state = background_thread_stopped; 583b7eaed25SJason Evans n_background_threads--; 584b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 585b7eaed25SJason Evans 586b7eaed25SJason Evans return true; 587b7eaed25SJason Evans } 588b7eaed25SJason Evans 589b7eaed25SJason Evans return false; 590b7eaed25SJason Evans } 591b7eaed25SJason Evans 592b7eaed25SJason Evans bool 593b7eaed25SJason Evans background_threads_enable(tsd_t *tsd) { 594b7eaed25SJason Evans assert(n_background_threads == 0); 595b7eaed25SJason Evans assert(background_thread_enabled()); 596b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 597b7eaed25SJason Evans 598*0ef50b4eSJason Evans VARIABLE_ARRAY(bool, marked, max_background_threads); 599b7eaed25SJason Evans unsigned i, nmarked; 600*0ef50b4eSJason Evans for (i = 0; i < max_background_threads; i++) { 601b7eaed25SJason Evans marked[i] = false; 602b7eaed25SJason Evans } 603b7eaed25SJason Evans nmarked = 0; 604*0ef50b4eSJason Evans /* Thread 0 is required and created at the end. */ 605*0ef50b4eSJason Evans marked[0] = true; 606b7eaed25SJason Evans /* Mark the threads we need to create for thread 0. */ 607b7eaed25SJason Evans unsigned n = narenas_total_get(); 608b7eaed25SJason Evans for (i = 1; i < n; i++) { 609*0ef50b4eSJason Evans if (marked[i % max_background_threads] || 610b7eaed25SJason Evans arena_get(tsd_tsdn(tsd), i, false) == NULL) { 611b7eaed25SJason Evans continue; 612b7eaed25SJason Evans } 613*0ef50b4eSJason Evans background_thread_info_t *info = &background_thread_info[ 614*0ef50b4eSJason Evans i % max_background_threads]; 615b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 616b7eaed25SJason Evans assert(info->state == background_thread_stopped); 617b7eaed25SJason Evans background_thread_init(tsd, info); 618b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 619*0ef50b4eSJason Evans marked[i % max_background_threads] = true; 620*0ef50b4eSJason Evans if (++nmarked == max_background_threads) { 621b7eaed25SJason Evans break; 622b7eaed25SJason Evans } 623b7eaed25SJason Evans } 624b7eaed25SJason Evans 625b7eaed25SJason Evans return background_thread_create(tsd, 0); 626b7eaed25SJason Evans } 627b7eaed25SJason Evans 628b7eaed25SJason Evans bool 629b7eaed25SJason Evans background_threads_disable(tsd_t *tsd) { 630b7eaed25SJason Evans assert(!background_thread_enabled()); 631b7eaed25SJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); 632b7eaed25SJason Evans 633b7eaed25SJason Evans /* Thread 0 will be responsible for terminating other threads. */ 634b7eaed25SJason Evans if (background_threads_disable_single(tsd, 635b7eaed25SJason Evans &background_thread_info[0])) { 636b7eaed25SJason Evans return true; 637b7eaed25SJason Evans } 638b7eaed25SJason Evans assert(n_background_threads == 0); 639b7eaed25SJason Evans 640b7eaed25SJason Evans return false; 641b7eaed25SJason Evans } 642b7eaed25SJason Evans 643b7eaed25SJason Evans /* Check if we need to signal the background thread early. */ 644b7eaed25SJason Evans void 645b7eaed25SJason Evans background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, 646b7eaed25SJason Evans arena_decay_t *decay, size_t npages_new) { 647b7eaed25SJason Evans background_thread_info_t *info = arena_background_thread_info_get( 648b7eaed25SJason Evans arena); 649b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &info->mtx)) { 650b7eaed25SJason Evans /* 651b7eaed25SJason Evans * Background thread may hold the mutex for a long period of 652b7eaed25SJason Evans * time. We'd like to avoid the variance on application 653b7eaed25SJason Evans * threads. So keep this non-blocking, and leave the work to a 654b7eaed25SJason Evans * future epoch. 655b7eaed25SJason Evans */ 656b7eaed25SJason Evans return; 657b7eaed25SJason Evans } 658b7eaed25SJason Evans 659b7eaed25SJason Evans if (info->state != background_thread_started) { 660b7eaed25SJason Evans goto label_done; 661b7eaed25SJason Evans } 662b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 663b7eaed25SJason Evans goto label_done; 664b7eaed25SJason Evans } 665b7eaed25SJason Evans 666b7eaed25SJason Evans ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 667b7eaed25SJason Evans if (decay_time <= 0) { 668b7eaed25SJason Evans /* Purging is eagerly done or disabled currently. */ 669b7eaed25SJason Evans goto label_done_unlock2; 670b7eaed25SJason Evans } 671b7eaed25SJason Evans uint64_t decay_interval_ns = nstime_ns(&decay->interval); 672b7eaed25SJason Evans assert(decay_interval_ns > 0); 673b7eaed25SJason Evans 674b7eaed25SJason Evans nstime_t diff; 675b7eaed25SJason Evans nstime_init(&diff, background_thread_wakeup_time_get(info)); 676b7eaed25SJason Evans if (nstime_compare(&diff, &decay->epoch) <= 0) { 677b7eaed25SJason Evans goto label_done_unlock2; 678b7eaed25SJason Evans } 679b7eaed25SJason Evans nstime_subtract(&diff, &decay->epoch); 680b7eaed25SJason Evans if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { 681b7eaed25SJason Evans goto label_done_unlock2; 682b7eaed25SJason Evans } 683b7eaed25SJason Evans 684b7eaed25SJason Evans if (npages_new > 0) { 685b7eaed25SJason Evans size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); 686b7eaed25SJason Evans /* 687b7eaed25SJason Evans * Compute how many new pages we would need to purge by the next 688b7eaed25SJason Evans * wakeup, which is used to determine if we should signal the 689b7eaed25SJason Evans * background thread. 690b7eaed25SJason Evans */ 691b7eaed25SJason Evans uint64_t npurge_new; 692b7eaed25SJason Evans if (n_epoch >= SMOOTHSTEP_NSTEPS) { 693b7eaed25SJason Evans npurge_new = npages_new; 694b7eaed25SJason Evans } else { 695b7eaed25SJason Evans uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; 696b7eaed25SJason Evans assert(h_steps_max >= 697b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); 698b7eaed25SJason Evans npurge_new = npages_new * (h_steps_max - 699b7eaed25SJason Evans h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); 700b7eaed25SJason Evans npurge_new >>= SMOOTHSTEP_BFP; 701b7eaed25SJason Evans } 702b7eaed25SJason Evans info->npages_to_purge_new += npurge_new; 703b7eaed25SJason Evans } 704b7eaed25SJason Evans 705b7eaed25SJason Evans bool should_signal; 706b7eaed25SJason Evans if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { 707b7eaed25SJason Evans should_signal = true; 708b7eaed25SJason Evans } else if (unlikely(background_thread_indefinite_sleep(info)) && 709b7eaed25SJason Evans (extents_npages_get(&arena->extents_dirty) > 0 || 710b7eaed25SJason Evans extents_npages_get(&arena->extents_muzzy) > 0 || 711b7eaed25SJason Evans info->npages_to_purge_new > 0)) { 712b7eaed25SJason Evans should_signal = true; 713b7eaed25SJason Evans } else { 714b7eaed25SJason Evans should_signal = false; 715b7eaed25SJason Evans } 716b7eaed25SJason Evans 717b7eaed25SJason Evans if (should_signal) { 718b7eaed25SJason Evans info->npages_to_purge_new = 0; 719b7eaed25SJason Evans pthread_cond_signal(&info->cond); 720b7eaed25SJason Evans } 721b7eaed25SJason Evans label_done_unlock2: 722b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 723b7eaed25SJason Evans label_done: 724b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 725b7eaed25SJason Evans } 726b7eaed25SJason Evans 727b7eaed25SJason Evans void 728b7eaed25SJason Evans background_thread_prefork0(tsdn_t *tsdn) { 729b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_lock); 730b7eaed25SJason Evans background_thread_enabled_at_fork = background_thread_enabled(); 731b7eaed25SJason Evans } 732b7eaed25SJason Evans 733b7eaed25SJason Evans void 734b7eaed25SJason Evans background_thread_prefork1(tsdn_t *tsdn) { 735*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 736b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); 737b7eaed25SJason Evans } 738b7eaed25SJason Evans } 739b7eaed25SJason Evans 740b7eaed25SJason Evans void 741b7eaed25SJason Evans background_thread_postfork_parent(tsdn_t *tsdn) { 742*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 743b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, 744b7eaed25SJason Evans &background_thread_info[i].mtx); 745b7eaed25SJason Evans } 746b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &background_thread_lock); 747b7eaed25SJason Evans } 748b7eaed25SJason Evans 749b7eaed25SJason Evans void 750b7eaed25SJason Evans background_thread_postfork_child(tsdn_t *tsdn) { 751*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 752b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, 753b7eaed25SJason Evans &background_thread_info[i].mtx); 754b7eaed25SJason Evans } 755b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &background_thread_lock); 756b7eaed25SJason Evans if (!background_thread_enabled_at_fork) { 757b7eaed25SJason Evans return; 758b7eaed25SJason Evans } 759b7eaed25SJason Evans 760b7eaed25SJason Evans /* Clear background_thread state (reset to disabled for child). */ 761b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 762b7eaed25SJason Evans n_background_threads = 0; 763b7eaed25SJason Evans background_thread_enabled_set(tsdn, false); 764*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 765b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 766b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 767b7eaed25SJason Evans info->state = background_thread_stopped; 768b7eaed25SJason Evans int ret = pthread_cond_init(&info->cond, NULL); 769b7eaed25SJason Evans assert(ret == 0); 770b7eaed25SJason Evans background_thread_info_init(tsdn, info); 771b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 772b7eaed25SJason Evans } 773b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 774b7eaed25SJason Evans } 775b7eaed25SJason Evans 776b7eaed25SJason Evans bool 777b7eaed25SJason Evans background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { 778b7eaed25SJason Evans assert(config_stats); 779b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 780b7eaed25SJason Evans if (!background_thread_enabled()) { 781b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 782b7eaed25SJason Evans return true; 783b7eaed25SJason Evans } 784b7eaed25SJason Evans 785b7eaed25SJason Evans stats->num_threads = n_background_threads; 786b7eaed25SJason Evans uint64_t num_runs = 0; 787b7eaed25SJason Evans nstime_init(&stats->run_interval, 0); 788*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 789b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 790b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 791b7eaed25SJason Evans if (info->state != background_thread_stopped) { 792b7eaed25SJason Evans num_runs += info->tot_n_runs; 793b7eaed25SJason Evans nstime_add(&stats->run_interval, &info->tot_sleep_time); 794b7eaed25SJason Evans } 795b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 796b7eaed25SJason Evans } 797b7eaed25SJason Evans stats->num_runs = num_runs; 798b7eaed25SJason Evans if (num_runs > 0) { 799b7eaed25SJason Evans nstime_idivide(&stats->run_interval, num_runs); 800b7eaed25SJason Evans } 801b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 802b7eaed25SJason Evans 803b7eaed25SJason Evans return false; 804b7eaed25SJason Evans } 805b7eaed25SJason Evans 806b7eaed25SJason Evans #undef BACKGROUND_THREAD_NPAGES_THRESHOLD 807b7eaed25SJason Evans #undef BILLION 808b7eaed25SJason Evans #undef BACKGROUND_THREAD_MIN_INTERVAL_NS 809b7eaed25SJason Evans 810*0ef50b4eSJason Evans static bool 811*0ef50b4eSJason Evans pthread_create_fptr_init(void) { 812*0ef50b4eSJason Evans if (pthread_create_fptr != NULL) { 813*0ef50b4eSJason Evans return false; 814*0ef50b4eSJason Evans } 815*0ef50b4eSJason Evans pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); 816*0ef50b4eSJason Evans if (pthread_create_fptr == NULL) { 817*0ef50b4eSJason Evans can_enable_background_thread = false; 818*0ef50b4eSJason Evans if (config_lazy_lock || opt_background_thread) { 819*0ef50b4eSJason Evans malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " 820*0ef50b4eSJason Evans "\"pthread_create\")\n"); 821*0ef50b4eSJason Evans abort(); 822*0ef50b4eSJason Evans } 823*0ef50b4eSJason Evans } else { 824*0ef50b4eSJason Evans can_enable_background_thread = true; 825*0ef50b4eSJason Evans } 826*0ef50b4eSJason Evans 827*0ef50b4eSJason Evans return false; 828*0ef50b4eSJason Evans } 829*0ef50b4eSJason Evans 830b7eaed25SJason Evans /* 831b7eaed25SJason Evans * When lazy lock is enabled, we need to make sure setting isthreaded before 832b7eaed25SJason Evans * taking any background_thread locks. This is called early in ctl (instead of 833b7eaed25SJason Evans * wait for the pthread_create calls to trigger) because the mutex is required 834b7eaed25SJason Evans * before creating background threads. 835b7eaed25SJason Evans */ 836b7eaed25SJason Evans void 837b7eaed25SJason Evans background_thread_ctl_init(tsdn_t *tsdn) { 838b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); 839b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 840*0ef50b4eSJason Evans pthread_create_fptr_init(); 841*0ef50b4eSJason Evans pthread_create_wrapper_init(); 842b7eaed25SJason Evans #endif 843b7eaed25SJason Evans } 844b7eaed25SJason Evans 845b7eaed25SJason Evans #endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ 846b7eaed25SJason Evans 847b7eaed25SJason Evans bool 848b7eaed25SJason Evans background_thread_boot0(void) { 849b7eaed25SJason Evans if (!have_background_thread && opt_background_thread) { 850b7eaed25SJason Evans malloc_printf("<jemalloc>: option background_thread currently " 851b7eaed25SJason Evans "supports pthread only\n"); 852b7eaed25SJason Evans return true; 853b7eaed25SJason Evans } 854b7eaed25SJason Evans #ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER 855*0ef50b4eSJason Evans if ((config_lazy_lock || opt_background_thread) && 856*0ef50b4eSJason Evans pthread_create_fptr_init()) { 857*0ef50b4eSJason Evans return true; 858b7eaed25SJason Evans } 859b7eaed25SJason Evans #endif 860b7eaed25SJason Evans return false; 861b7eaed25SJason Evans } 862b7eaed25SJason Evans 863b7eaed25SJason Evans bool 864b7eaed25SJason Evans background_thread_boot1(tsdn_t *tsdn) { 865b7eaed25SJason Evans #ifdef JEMALLOC_BACKGROUND_THREAD 866b7eaed25SJason Evans assert(have_background_thread); 867b7eaed25SJason Evans assert(narenas_total_get() > 0); 868b7eaed25SJason Evans 869*0ef50b4eSJason Evans if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT && 870*0ef50b4eSJason Evans ncpus < MAX_BACKGROUND_THREAD_LIMIT) { 871*0ef50b4eSJason Evans opt_max_background_threads = ncpus; 872*0ef50b4eSJason Evans } 873*0ef50b4eSJason Evans max_background_threads = opt_max_background_threads; 874*0ef50b4eSJason Evans 875b7eaed25SJason Evans background_thread_enabled_set(tsdn, opt_background_thread); 876b7eaed25SJason Evans if (malloc_mutex_init(&background_thread_lock, 877b7eaed25SJason Evans "background_thread_global", 878b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, 879b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 880b7eaed25SJason Evans return true; 881b7eaed25SJason Evans } 882b7eaed25SJason Evans 883b7eaed25SJason Evans background_thread_info = (background_thread_info_t *)base_alloc(tsdn, 884*0ef50b4eSJason Evans b0get(), opt_max_background_threads * 885*0ef50b4eSJason Evans sizeof(background_thread_info_t), CACHELINE); 886b7eaed25SJason Evans if (background_thread_info == NULL) { 887b7eaed25SJason Evans return true; 888b7eaed25SJason Evans } 889b7eaed25SJason Evans 890*0ef50b4eSJason Evans for (unsigned i = 0; i < max_background_threads; i++) { 891b7eaed25SJason Evans background_thread_info_t *info = &background_thread_info[i]; 892b7eaed25SJason Evans /* Thread mutex is rank_inclusive because of thread0. */ 893b7eaed25SJason Evans if (malloc_mutex_init(&info->mtx, "background_thread", 894b7eaed25SJason Evans WITNESS_RANK_BACKGROUND_THREAD, 895b7eaed25SJason Evans malloc_mutex_address_ordered)) { 896b7eaed25SJason Evans return true; 897b7eaed25SJason Evans } 898b7eaed25SJason Evans if (pthread_cond_init(&info->cond, NULL)) { 899b7eaed25SJason Evans return true; 900b7eaed25SJason Evans } 901b7eaed25SJason Evans malloc_mutex_lock(tsdn, &info->mtx); 902b7eaed25SJason Evans info->state = background_thread_stopped; 903b7eaed25SJason Evans background_thread_info_init(tsdn, info); 904b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &info->mtx); 905b7eaed25SJason Evans } 906b7eaed25SJason Evans #endif 907b7eaed25SJason Evans 908b7eaed25SJason Evans return false; 909b7eaed25SJason Evans } 910