/freebsd/contrib/jemalloc/src/ |
H A D | tsd.c | 35 * have a dependency on tsd. So we define the struct here, and only refer to it 69 tsd_in_nominal_list(tsd_t *tsd) { in tsd_in_nominal_list() argument 73 * We don't know that tsd is nominal; it might not be safe to get data in tsd_in_nominal_list() 78 if (tsd == tsd_list) { in tsd_in_nominal_list() 88 tsd_add_nominal(tsd_t *tsd) { in tsd_add_nominal() argument 89 assert(!tsd_in_nominal_list(tsd)); in tsd_add_nominal() 90 assert(tsd_state_get(tsd) <= tsd_state_nominal_max); in tsd_add_nominal() 91 ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link); in tsd_add_nominal() 92 malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); in tsd_add_nominal() 93 ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); in tsd_add_nominal() [all …]
|
H A D | tcache.c | 41 tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { in tcache_event_hard() argument 55 tcache_bin_flush_small(tsd, tcache, tbin, binind, in tcache_event_hard() 68 tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached in tcache_event_hard() 134 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, in tcache_bin_flush_small() argument 148 tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, in tcache_bin_flush_small() 152 item_extent[i] = iealloc(tsd_tsdn(tsd), in tcache_bin_flush_small() 160 arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, in tcache_bin_flush_small() 167 if (arena_prof_accum(tsd_tsdn(tsd), arena, in tcache_bin_flush_small() 169 prof_idump(tsd_tsdn(tsd)); in tcache_bin_flush_small() 174 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); in tcache_bin_flush_small() [all …]
|
H A D | jemalloc.c | 381 arena_bind(tsd_t *tsd, unsigned ind, bool internal) { in arena_bind() argument 382 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); in arena_bind() 386 tsd_iarena_set(tsd, arena); in arena_bind() 388 tsd_arena_set(tsd, arena); in arena_bind() 391 tsd_binshards_t *bins = tsd_binshardsp_get(tsd); in arena_bind() 401 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { in arena_migrate() argument 404 oldarena = arena_get(tsd_tsdn(tsd), oldind, false); in arena_migrate() 405 newarena = arena_get(tsd_tsdn(tsd), newind, false); in arena_migrate() 408 tsd_arena_set(tsd, newarena); in arena_migrate() 412 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { in arena_unbind() argument [all …]
|
H A D | prof.c | 237 static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); 240 static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, 313 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { in rb_gen() 325 tdata = prof_tdata_get(tsd, true); in rb_gen() 332 malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); in rb_gen() 334 if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { in rb_gen() 335 prof_tctx_destroy(tsd, tctx); in rb_gen() 337 malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); in rb_gen() 365 prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { in prof_log_bt_index() argument 367 malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); in prof_log_bt_index() [all …]
|
H A D | background_thread.c | 54 bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED in background_thread_create() argument 55 bool background_threads_enable(tsd_t *tsd) NOT_REACHED in background_thread_create() 56 bool background_threads_disable(tsd_t *tsd) NOT_REACHED in background_thread_create() 311 background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { 313 malloc_mutex_assert_owner(tsd_tsdn(tsd), 316 malloc_mutex_assert_not_owner(tsd_tsdn(tsd), 320 pre_reentrancy(tsd, NULL); 321 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 331 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 334 post_reentrancy(tsd); [all …]
|
H A D | ctl.c | 50 static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ 726 arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { in arenas_i_impl() argument 739 (struct container_s *)base_alloc(tsd_tsdn(tsd), in arenas_i_impl() 747 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), in arenas_i_impl() 1004 ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { in ctl_arena_init() argument 1017 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { in ctl_arena_init() 1022 if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { in ctl_arena_init() 1118 ctl_init(tsd_t *tsd) { in ctl_init() argument 1120 tsdn_t *tsdn = tsd_tsdn(tsd); in ctl_init() 1154 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, in ctl_init() [all …]
|
H A D | ckh.c | 53 static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); 54 static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); 257 ckh_grow(tsd_t *tsd, ckh_t *ckh) { in ckh_grow() argument 283 tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, in ckh_grow() 284 true, NULL, true, arena_ichoose(tsd, NULL)); in ckh_grow() 296 idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); in ckh_grow() 301 idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); in ckh_grow() 312 ckh_shrink(tsd_t *tsd, ckh_t *ckh) { in ckh_shrink() argument 327 tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, in ckh_shrink() 328 true, arena_ichoose(tsd, NULL)); in ckh_shrink() [all …]
|
/freebsd/contrib/jemalloc/include/jemalloc/internal/ |
H A D | tsd.h | 33 * Loading TSD data is on the critical path of basically all malloc operations. 44 * Note: the entire tcache is embedded into TSD and spans multiple cachelines. 110 tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); 111 void tsd_state_set(tsd_t *tsd, uint8_t new_state); 112 void tsd_slow_update(tsd_t *tsd); 113 void tsd_prefork(tsd_t *tsd); 114 void tsd_postfork_parent(tsd_t *tsd); 115 void tsd_postfork_child(tsd_t *tsd); 149 * in such scenarios, we need tsd, but set up in such a way that no 157 * What it says on the tin; tsd that hasn't been initialized. Note [all …]
|
H A D | prof_inlines_b.h | 18 prof_tdata_get(tsd_t *tsd, bool create) { in prof_tdata_get() argument 23 tdata = tsd_prof_tdata_get(tsd); in prof_tdata_get() 26 if (tsd_nominal(tsd)) { in prof_tdata_get() 27 tdata = prof_tdata_init(tsd); in prof_tdata_get() 28 tsd_prof_tdata_set(tsd, tdata); in prof_tdata_get() 31 tdata = prof_tdata_reinit(tsd, tdata); in prof_tdata_get() 32 tsd_prof_tdata_set(tsd, tdata); in prof_tdata_get() 83 prof_sample_check(tsd_t *tsd, size_t usize, bool update) { in prof_sample_check() argument 86 int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); in prof_sample_check() 89 if (tsd_nominal(tsd)) { in prof_sample_check() [all …]
|
H A D | tcache_inlines.h | 12 tcache_enabled_get(tsd_t *tsd) { in tcache_enabled_get() argument 13 return tsd_tcache_enabled_get(tsd); in tcache_enabled_get() 17 tcache_enabled_set(tsd_t *tsd, bool enabled) { in tcache_enabled_set() argument 18 bool was_enabled = tsd_tcache_enabled_get(tsd); in tcache_enabled_set() 21 tsd_tcache_data_init(tsd); in tcache_enabled_set() 23 tcache_cleanup(tsd); in tcache_enabled_set() 26 tsd_tcache_enabled_set(tsd, enabled); in tcache_enabled_set() 27 tsd_slow_update(tsd); in tcache_enabled_set() 31 tcache_event(tsd_t *tsd, tcache_t *tcache) { in tcache_event() argument 37 tcache_event_hard(tsd, tcache); in tcache_event() [all …]
|
H A D | jemalloc_internal_inlines_a.h | 60 arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { in arena_tdata_get() argument 62 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); in arena_tdata_get() 66 return arena_tdata_get_hard(tsd, ind); in arena_tdata_get() 68 if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { in arena_tdata_get() 73 return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : in arena_tdata_get() 81 return arena_tdata_get_hard(tsd, ind); in arena_tdata_get() 101 decay_ticker_get(tsd_t *tsd, unsigned ind) { in decay_ticker_get() argument 104 tdata = arena_tdata_get(tsd, ind, true); in decay_ticker_get() 124 tcache_available(tsd_t *tsd) { in tcache_available() argument 130 if (likely(tsd_tcache_enabled_get(tsd))) { in tcache_available() [all …]
|
H A D | jemalloc_internal_inlines_b.h | 8 arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { in arena_choose_impl() argument 16 if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { in arena_choose_impl() 17 return arena_get(tsd_tsdn(tsd), 0, true); in arena_choose_impl() 20 ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); in arena_choose_impl() 22 ret = arena_choose_hard(tsd, internal); in arena_choose_impl() 24 if (tcache_available(tsd)) { in arena_choose_impl() 25 tcache_t *tcache = tcache_get(tsd); in arena_choose_impl() 29 arena_get(tsd_tsdn(tsd), 0, false)); in arena_choose_impl() 31 tcache_arena_reassociate(tsd_tsdn(tsd), in arena_choose_impl() 35 tcache_arena_associate(tsd_tsdn(tsd), tcache, in arena_choose_impl() [all …]
|
H A D | tcache_externs.h | 29 void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); 32 void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, 34 void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, 38 tcache_t *tcache_create_explicit(tsd_t *tsd); 39 void tcache_cleanup(tsd_t *tsd); 41 bool tcaches_create(tsd_t *tsd, unsigned *r_ind); 42 void tcaches_flush(tsd_t *tsd, unsigned ind); 43 void tcaches_destroy(tsd_t *tsd, unsigned ind); 49 void tcache_flush(tsd_t *tsd); 50 bool tsd_tcache_data_init(tsd_t *tsd); [all …]
|
H A D | prof_externs.h | 46 void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); 49 void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, 53 prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); 69 bool prof_mdump(tsd_t *tsd, const char *filename); 71 prof_tdata_t *prof_tdata_init(tsd_t *tsd); 72 prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); 73 void prof_reset(tsd_t *tsd, size_t lg_sample); 74 void prof_tdata_cleanup(tsd_t *tsd); 77 const char *prof_thread_name_get(tsd_t *tsd); 78 int prof_thread_name_set(tsd_t *tsd, const char *thread_name); [all …]
|
H A D | jemalloc_internal_externs.h | 7 /* TSD checks this to set thread local slow state accordingly. */ 45 arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); 46 arena_t *arena_choose_hard(tsd_t *tsd, bool internal); 47 void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); 48 void iarena_cleanup(tsd_t *tsd); 49 void arena_cleanup(tsd_t *tsd); 50 void arenas_tdata_cleanup(tsd_t *tsd);
|
H A D | jemalloc_internal_inlines_c.h | 61 ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { in ialloc() argument 62 return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, in ialloc() 93 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { in ipalloc() argument 94 return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, in ipalloc() 95 tcache_get(tsd), false, NULL); in ipalloc() 122 idalloc(tsd_t *tsd, void *ptr) { in idalloc() argument 123 idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); in idalloc() 197 iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, in iralloc() argument 199 return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, in iralloc() 200 tcache_get(tsd), NULL, hook_args); in iralloc()
|
H A D | arena_inlines_a.h | 36 percpu_arena_update(tsd_t *tsd, unsigned cpu) { in percpu_arena_update() argument 38 arena_t *oldarena = tsd_arena_get(tsd); in percpu_arena_update() 44 arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); in percpu_arena_update() 48 arena_migrate(tsd, oldind, newind); in percpu_arena_update() 49 tcache_t *tcache = tcache_get(tsd); in percpu_arena_update() 51 tcache_arena_reassociate(tsd_tsdn(tsd), tcache, in percpu_arena_update()
|
H A D | tsd_generic.h | 2 #error This file should be included only once, by tsd.h. 13 /* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ 42 malloc_write("<jemalloc>: Error setting TSD\n"); in tsd_cleanup_wrapper() 56 malloc_write("<jemalloc>: Error setting TSD\n"); in tsd_wrapper_set() 76 malloc_write("<jemalloc>: Error allocating TSD\n"); in tsd_wrapper_get() 107 malloc_write("<jemalloc>: Error allocating TSD\n"); in tsd_boot1()
|
H A D | ckh.h | 4 #include "jemalloc/internal/tsd.h" 70 bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, 72 void ckh_delete(tsd_t *tsd, ckh_t *ckh); 90 bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); 91 bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
/freebsd/contrib/llvm-project/compiler-rt/lib/scudo/standalone/ |
H A D | tsd_shared.h | 12 #include "tsd.h" 39 TSD<Allocator> &operator*() { return *CurrentTSD; } 41 TSD<Allocator> *operator->() { 47 TSD<Allocator> *CurrentTSD; 111 // Not supported by the TSD Registry, but not an error either. in setOption() 124 // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper in getStats() 125 // thread annotations. However, given the TSD is only locked on shared in getStats() 129 Str->append(" Shared TSD[%zu]:\n", I); in getStats() 136 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock() NO_THREAD_SAFETY_ANALYSIS { in getTSDAndLock() 137 TSD<Allocator> *TSD = getCurrentTSD(); in getTSDAndLock() local [all …]
|
H A D | tsd_exclusive.h | 12 #include "tsd.h" 43 TSD<Allocator> &operator*() { return *CurrentTSD; } 45 TSD<Allocator> *operator->() { 51 TSD<Allocator> *CurrentTSD; 102 // To disable the exclusive TSD registry, we effectively lock the fallback TSD 130 Str->append("Exclusive TSD don't support iterating each TSD\n"); in getStats() 134 ALWAYS_INLINE TSD<Allocator> * 163 TSD<Allocator> FallbackTSD; 166 static thread_local TSD<Allocator> ThreadTSD; 172 thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
|
H A D | tsd.h | 1 //===-- tsd.h ---------------------------------------------------*- C++ -*-===// 27 template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { struct 28 using ThisT = TSD<Allocator>; argument 58 // As the comments attached to `getCache()`, the TSD doesn't always need to be argument 68 // current architecture of accessing TSD is not easy to cooperate with the argument 73 // TSD doesn't always require holding the lock. Add this assertion while the
|
/freebsd/contrib/llvm-project/compiler-rt/lib/memprof/ |
H A D | memprof_posix.cpp | 26 // ---------------------- TSD ---------------- {{{1 30 void TSDInit(void (*destructor)(void *tsd)) { in TSDInit() argument 41 void TSDSet(void *tsd) { in TSDSet() argument 43 pthread_setspecific(tsd_key, tsd); in TSDSet() 46 void PlatformTSDDtor(void *tsd) { in PlatformTSDDtor() argument 47 MemprofThreadContext *context = (MemprofThreadContext *)tsd; in PlatformTSDDtor() 50 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); in PlatformTSDDtor() 53 MemprofThread::TSDDtor(tsd); in PlatformTSDDtor()
|
/freebsd/contrib/llvm-project/compiler-rt/lib/msan/ |
H A D | msan_linux.cpp | 218 // ---------------------- TSD ---------------- {{{1 222 // Reuse the MSan TSD API for compatibility with existing code 225 static void (*tsd_destructor)(void *tsd) = nullptr; 239 void MsanTSDInit(void (*destructor)(void *tsd)) { in MsanTSDInit() argument 249 void SetCurrentThread(MsanThread *tsd) { in SetCurrentThread() argument 251 CHECK(tsd); in SetCurrentThread() 253 key.key = tsd; in SetCurrentThread() 256 void MsanTSDDtor(void *tsd) { in MsanTSDDtor() argument 258 CHECK_EQ(key.key, tsd); in MsanTSDDtor() 262 MsanThread::TSDDtor(tsd); in MsanTSDDtor() [all …]
|
/freebsd/contrib/llvm-project/compiler-rt/lib/dfsan/ |
H A D | dfsan_thread.cpp | 51 void DFsanThread::TSDDtor(void *tsd) { in TSDDtor() argument 52 DFsanThread *t = (DFsanThread *)tsd; in TSDDtor() 59 // some code may still be executing in later TSD destructors in Destroy() 102 void DFsanTSDInit(void (*destructor)(void *tsd)) { in DFsanTSDInit() argument 121 void DFsanTSDDtor(void *tsd) { in DFsanTSDDtor() argument 122 DFsanThread *t = (DFsanThread *)tsd; in DFsanTSDDtor() 125 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); in DFsanTSDDtor() 131 DFsanThread::TSDDtor(tsd); in DFsanTSDDtor()
|