1*c43cad87SWarner Losh #include "jemalloc/internal/jemalloc_preamble.h" 2*c43cad87SWarner Losh #include "jemalloc/internal/jemalloc_internal_includes.h" 3*c43cad87SWarner Losh 4*c43cad87SWarner Losh #include "jemalloc/internal/assert.h" 5*c43cad87SWarner Losh #include "jemalloc/internal/ckh.h" 6*c43cad87SWarner Losh #include "jemalloc/internal/hash.h" 7*c43cad87SWarner Losh #include "jemalloc/internal/malloc_io.h" 8*c43cad87SWarner Losh #include "jemalloc/internal/prof_data.h" 9*c43cad87SWarner Losh 10*c43cad87SWarner Losh /* 11*c43cad87SWarner Losh * This file defines and manages the core profiling data structures. 12*c43cad87SWarner Losh * 13*c43cad87SWarner Losh * Conceptually, profiling data can be imagined as a table with three columns: 14*c43cad87SWarner Losh * thread, stack trace, and current allocation size. (When prof_accum is on, 15*c43cad87SWarner Losh * there's one additional column which is the cumulative allocation size.) 16*c43cad87SWarner Losh * 17*c43cad87SWarner Losh * Implementation wise, each thread maintains a hash recording the stack trace 18*c43cad87SWarner Losh * to allocation size correspondences, which are basically the individual rows 19*c43cad87SWarner Losh * in the table. In addition, two global "indices" are built to make data 20*c43cad87SWarner Losh * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically 21*c43cad87SWarner Losh * the "grouped by stack trace" and "grouped by thread" views of the same table, 22*c43cad87SWarner Losh * respectively. Note that the allocation size is only aggregated to the two 23*c43cad87SWarner Losh * indices at dumping time, so as to optimize for performance. 24*c43cad87SWarner Losh */ 25*c43cad87SWarner Losh 26*c43cad87SWarner Losh /******************************************************************************/ 27*c43cad87SWarner Losh 28*c43cad87SWarner Losh malloc_mutex_t bt2gctx_mtx; 29*c43cad87SWarner Losh malloc_mutex_t tdatas_mtx; 30*c43cad87SWarner Losh malloc_mutex_t prof_dump_mtx; 31*c43cad87SWarner Losh 32*c43cad87SWarner Losh /* 33*c43cad87SWarner Losh * Table of mutexes that are shared among gctx's. These are leaf locks, so 34*c43cad87SWarner Losh * there is no problem with using them for more than one gctx at the same time. 35*c43cad87SWarner Losh * The primary motivation for this sharing though is that gctx's are ephemeral, 36*c43cad87SWarner Losh * and destroying mutexes causes complications for systems that allocate when 37*c43cad87SWarner Losh * creating/destroying mutexes. 38*c43cad87SWarner Losh */ 39*c43cad87SWarner Losh malloc_mutex_t *gctx_locks; 40*c43cad87SWarner Losh static atomic_u_t cum_gctxs; /* Atomic counter. */ 41*c43cad87SWarner Losh 42*c43cad87SWarner Losh /* 43*c43cad87SWarner Losh * Table of mutexes that are shared among tdata's. No operations require 44*c43cad87SWarner Losh * holding multiple tdata locks, so there is no problem with using them for more 45*c43cad87SWarner Losh * than one tdata at the same time, even though a gctx lock may be acquired 46*c43cad87SWarner Losh * while holding a tdata lock. 47*c43cad87SWarner Losh */ 48*c43cad87SWarner Losh malloc_mutex_t *tdata_locks; 49*c43cad87SWarner Losh 50*c43cad87SWarner Losh /* 51*c43cad87SWarner Losh * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data 52*c43cad87SWarner Losh * structure that knows about all backtraces currently captured. 53*c43cad87SWarner Losh */ 54*c43cad87SWarner Losh static ckh_t bt2gctx; 55*c43cad87SWarner Losh 56*c43cad87SWarner Losh /* 57*c43cad87SWarner Losh * Tree of all extant prof_tdata_t structures, regardless of state, 58*c43cad87SWarner Losh * {attached,detached,expired}. 59*c43cad87SWarner Losh */ 60*c43cad87SWarner Losh static prof_tdata_tree_t tdatas; 61*c43cad87SWarner Losh 62*c43cad87SWarner Losh size_t prof_unbiased_sz[PROF_SC_NSIZES]; 63*c43cad87SWarner Losh size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES]; 64*c43cad87SWarner Losh 65*c43cad87SWarner Losh /******************************************************************************/ 66*c43cad87SWarner Losh /* Red-black trees. */ 67*c43cad87SWarner Losh 68*c43cad87SWarner Losh static int 69*c43cad87SWarner Losh prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { 70*c43cad87SWarner Losh uint64_t a_thr_uid = a->thr_uid; 71*c43cad87SWarner Losh uint64_t b_thr_uid = b->thr_uid; 72*c43cad87SWarner Losh int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); 73*c43cad87SWarner Losh if (ret == 0) { 74*c43cad87SWarner Losh uint64_t a_thr_discrim = a->thr_discrim; 75*c43cad87SWarner Losh uint64_t b_thr_discrim = b->thr_discrim; 76*c43cad87SWarner Losh ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < 77*c43cad87SWarner Losh b_thr_discrim); 78*c43cad87SWarner Losh if (ret == 0) { 79*c43cad87SWarner Losh uint64_t a_tctx_uid = a->tctx_uid; 80*c43cad87SWarner Losh uint64_t b_tctx_uid = b->tctx_uid; 81*c43cad87SWarner Losh ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < 82*c43cad87SWarner Losh b_tctx_uid); 83*c43cad87SWarner Losh } 84*c43cad87SWarner Losh } 85*c43cad87SWarner Losh return ret; 86*c43cad87SWarner Losh } 87*c43cad87SWarner Losh 88*c43cad87SWarner Losh rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, 89*c43cad87SWarner Losh tctx_link, prof_tctx_comp) 90*c43cad87SWarner Losh 91*c43cad87SWarner Losh static int 92*c43cad87SWarner Losh prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { 93*c43cad87SWarner Losh unsigned a_len = a->bt.len; 94*c43cad87SWarner Losh unsigned b_len = b->bt.len; 95*c43cad87SWarner Losh unsigned comp_len = (a_len < b_len) ? a_len : b_len; 96*c43cad87SWarner Losh int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); 97*c43cad87SWarner Losh if (ret == 0) { 98*c43cad87SWarner Losh ret = (a_len > b_len) - (a_len < b_len); 99*c43cad87SWarner Losh } 100*c43cad87SWarner Losh return ret; 101*c43cad87SWarner Losh } 102*c43cad87SWarner Losh 103*c43cad87SWarner Losh rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, 104*c43cad87SWarner Losh prof_gctx_comp) 105*c43cad87SWarner Losh 106*c43cad87SWarner Losh static int 107*c43cad87SWarner Losh prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { 108*c43cad87SWarner Losh int ret; 109*c43cad87SWarner Losh uint64_t a_uid = a->thr_uid; 110*c43cad87SWarner Losh uint64_t b_uid = b->thr_uid; 111*c43cad87SWarner Losh 112*c43cad87SWarner Losh ret = ((a_uid > b_uid) - (a_uid < b_uid)); 113*c43cad87SWarner Losh if (ret == 0) { 114*c43cad87SWarner Losh uint64_t a_discrim = a->thr_discrim; 115*c43cad87SWarner Losh uint64_t b_discrim = b->thr_discrim; 116*c43cad87SWarner Losh 117*c43cad87SWarner Losh ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); 118*c43cad87SWarner Losh } 119*c43cad87SWarner Losh return ret; 120*c43cad87SWarner Losh } 121*c43cad87SWarner Losh 122*c43cad87SWarner Losh rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, 123*c43cad87SWarner Losh prof_tdata_comp) 124*c43cad87SWarner Losh 125*c43cad87SWarner Losh /******************************************************************************/ 126*c43cad87SWarner Losh 127*c43cad87SWarner Losh static malloc_mutex_t * 128*c43cad87SWarner Losh prof_gctx_mutex_choose(void) { 129*c43cad87SWarner Losh unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); 130*c43cad87SWarner Losh 131*c43cad87SWarner Losh return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; 132*c43cad87SWarner Losh } 133*c43cad87SWarner Losh 134*c43cad87SWarner Losh static malloc_mutex_t * 135*c43cad87SWarner Losh prof_tdata_mutex_choose(uint64_t thr_uid) { 136*c43cad87SWarner Losh return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; 137*c43cad87SWarner Losh } 138*c43cad87SWarner Losh 139*c43cad87SWarner Losh bool 140*c43cad87SWarner Losh prof_data_init(tsd_t *tsd) { 141*c43cad87SWarner Losh tdata_tree_new(&tdatas); 142*c43cad87SWarner Losh return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, 143*c43cad87SWarner Losh prof_bt_hash, prof_bt_keycomp); 144*c43cad87SWarner Losh } 145*c43cad87SWarner Losh 146*c43cad87SWarner Losh static void 147*c43cad87SWarner Losh prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { 148*c43cad87SWarner Losh cassert(config_prof); 149*c43cad87SWarner Losh assert(tdata == prof_tdata_get(tsd, false)); 150*c43cad87SWarner Losh 151*c43cad87SWarner Losh if (tdata != NULL) { 152*c43cad87SWarner Losh assert(!tdata->enq); 153*c43cad87SWarner Losh tdata->enq = true; 154*c43cad87SWarner Losh } 155*c43cad87SWarner Losh 156*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); 157*c43cad87SWarner Losh } 158*c43cad87SWarner Losh 159*c43cad87SWarner Losh static void 160*c43cad87SWarner Losh prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { 161*c43cad87SWarner Losh cassert(config_prof); 162*c43cad87SWarner Losh assert(tdata == prof_tdata_get(tsd, false)); 163*c43cad87SWarner Losh 164*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); 165*c43cad87SWarner Losh 166*c43cad87SWarner Losh if (tdata != NULL) { 167*c43cad87SWarner Losh bool idump, gdump; 168*c43cad87SWarner Losh 169*c43cad87SWarner Losh assert(tdata->enq); 170*c43cad87SWarner Losh tdata->enq = false; 171*c43cad87SWarner Losh idump = tdata->enq_idump; 172*c43cad87SWarner Losh tdata->enq_idump = false; 173*c43cad87SWarner Losh gdump = tdata->enq_gdump; 174*c43cad87SWarner Losh tdata->enq_gdump = false; 175*c43cad87SWarner Losh 176*c43cad87SWarner Losh if (idump) { 177*c43cad87SWarner Losh prof_idump(tsd_tsdn(tsd)); 178*c43cad87SWarner Losh } 179*c43cad87SWarner Losh if (gdump) { 180*c43cad87SWarner Losh prof_gdump(tsd_tsdn(tsd)); 181*c43cad87SWarner Losh } 182*c43cad87SWarner Losh } 183*c43cad87SWarner Losh } 184*c43cad87SWarner Losh 185*c43cad87SWarner Losh static prof_gctx_t * 186*c43cad87SWarner Losh prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { 187*c43cad87SWarner Losh /* 188*c43cad87SWarner Losh * Create a single allocation that has space for vec of length bt->len. 189*c43cad87SWarner Losh */ 190*c43cad87SWarner Losh size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); 191*c43cad87SWarner Losh prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, 192*c43cad87SWarner Losh sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), 193*c43cad87SWarner Losh true); 194*c43cad87SWarner Losh if (gctx == NULL) { 195*c43cad87SWarner Losh return NULL; 196*c43cad87SWarner Losh } 197*c43cad87SWarner Losh gctx->lock = prof_gctx_mutex_choose(); 198*c43cad87SWarner Losh /* 199*c43cad87SWarner Losh * Set nlimbo to 1, in order to avoid a race condition with 200*c43cad87SWarner Losh * prof_tctx_destroy()/prof_gctx_try_destroy(). 201*c43cad87SWarner Losh */ 202*c43cad87SWarner Losh gctx->nlimbo = 1; 203*c43cad87SWarner Losh tctx_tree_new(&gctx->tctxs); 204*c43cad87SWarner Losh /* Duplicate bt. */ 205*c43cad87SWarner Losh memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); 206*c43cad87SWarner Losh gctx->bt.vec = gctx->vec; 207*c43cad87SWarner Losh gctx->bt.len = bt->len; 208*c43cad87SWarner Losh return gctx; 209*c43cad87SWarner Losh } 210*c43cad87SWarner Losh 211*c43cad87SWarner Losh static void 212*c43cad87SWarner Losh prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, 213*c43cad87SWarner Losh prof_gctx_t *gctx) { 214*c43cad87SWarner Losh cassert(config_prof); 215*c43cad87SWarner Losh 216*c43cad87SWarner Losh /* 217*c43cad87SWarner Losh * Check that gctx is still unused by any thread cache before destroying 218*c43cad87SWarner Losh * it. prof_lookup() increments gctx->nlimbo in order to avoid a race 219*c43cad87SWarner Losh * condition with this function, as does prof_tctx_destroy() in order to 220*c43cad87SWarner Losh * avoid a race between the main body of prof_tctx_destroy() and entry 221*c43cad87SWarner Losh * into this function. 222*c43cad87SWarner Losh */ 223*c43cad87SWarner Losh prof_enter(tsd, tdata_self); 224*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); 225*c43cad87SWarner Losh assert(gctx->nlimbo != 0); 226*c43cad87SWarner Losh if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { 227*c43cad87SWarner Losh /* Remove gctx from bt2gctx. */ 228*c43cad87SWarner Losh if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { 229*c43cad87SWarner Losh not_reached(); 230*c43cad87SWarner Losh } 231*c43cad87SWarner Losh prof_leave(tsd, tdata_self); 232*c43cad87SWarner Losh /* Destroy gctx. */ 233*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 234*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); 235*c43cad87SWarner Losh } else { 236*c43cad87SWarner Losh /* 237*c43cad87SWarner Losh * Compensate for increment in prof_tctx_destroy() or 238*c43cad87SWarner Losh * prof_lookup(). 239*c43cad87SWarner Losh */ 240*c43cad87SWarner Losh gctx->nlimbo--; 241*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 242*c43cad87SWarner Losh prof_leave(tsd, tdata_self); 243*c43cad87SWarner Losh } 244*c43cad87SWarner Losh } 245*c43cad87SWarner Losh 246*c43cad87SWarner Losh static bool 247*c43cad87SWarner Losh prof_gctx_should_destroy(prof_gctx_t *gctx) { 248*c43cad87SWarner Losh if (opt_prof_accum) { 249*c43cad87SWarner Losh return false; 250*c43cad87SWarner Losh } 251*c43cad87SWarner Losh if (!tctx_tree_empty(&gctx->tctxs)) { 252*c43cad87SWarner Losh return false; 253*c43cad87SWarner Losh } 254*c43cad87SWarner Losh if (gctx->nlimbo != 0) { 255*c43cad87SWarner Losh return false; 256*c43cad87SWarner Losh } 257*c43cad87SWarner Losh return true; 258*c43cad87SWarner Losh } 259*c43cad87SWarner Losh 260*c43cad87SWarner Losh static bool 261*c43cad87SWarner Losh prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, 262*c43cad87SWarner Losh void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { 263*c43cad87SWarner Losh union { 264*c43cad87SWarner Losh prof_gctx_t *p; 265*c43cad87SWarner Losh void *v; 266*c43cad87SWarner Losh } gctx, tgctx; 267*c43cad87SWarner Losh union { 268*c43cad87SWarner Losh prof_bt_t *p; 269*c43cad87SWarner Losh void *v; 270*c43cad87SWarner Losh } btkey; 271*c43cad87SWarner Losh bool new_gctx; 272*c43cad87SWarner Losh 273*c43cad87SWarner Losh prof_enter(tsd, tdata); 274*c43cad87SWarner Losh if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { 275*c43cad87SWarner Losh /* bt has never been seen before. Insert it. */ 276*c43cad87SWarner Losh prof_leave(tsd, tdata); 277*c43cad87SWarner Losh tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); 278*c43cad87SWarner Losh if (tgctx.v == NULL) { 279*c43cad87SWarner Losh return true; 280*c43cad87SWarner Losh } 281*c43cad87SWarner Losh prof_enter(tsd, tdata); 282*c43cad87SWarner Losh if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { 283*c43cad87SWarner Losh gctx.p = tgctx.p; 284*c43cad87SWarner Losh btkey.p = &gctx.p->bt; 285*c43cad87SWarner Losh if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { 286*c43cad87SWarner Losh /* OOM. */ 287*c43cad87SWarner Losh prof_leave(tsd, tdata); 288*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, 289*c43cad87SWarner Losh true, true); 290*c43cad87SWarner Losh return true; 291*c43cad87SWarner Losh } 292*c43cad87SWarner Losh new_gctx = true; 293*c43cad87SWarner Losh } else { 294*c43cad87SWarner Losh new_gctx = false; 295*c43cad87SWarner Losh } 296*c43cad87SWarner Losh } else { 297*c43cad87SWarner Losh tgctx.v = NULL; 298*c43cad87SWarner Losh new_gctx = false; 299*c43cad87SWarner Losh } 300*c43cad87SWarner Losh 301*c43cad87SWarner Losh if (!new_gctx) { 302*c43cad87SWarner Losh /* 303*c43cad87SWarner Losh * Increment nlimbo, in order to avoid a race condition with 304*c43cad87SWarner Losh * prof_tctx_destroy()/prof_gctx_try_destroy(). 305*c43cad87SWarner Losh */ 306*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); 307*c43cad87SWarner Losh gctx.p->nlimbo++; 308*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); 309*c43cad87SWarner Losh new_gctx = false; 310*c43cad87SWarner Losh 311*c43cad87SWarner Losh if (tgctx.v != NULL) { 312*c43cad87SWarner Losh /* Lost race to insert. */ 313*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, 314*c43cad87SWarner Losh true); 315*c43cad87SWarner Losh } 316*c43cad87SWarner Losh } 317*c43cad87SWarner Losh prof_leave(tsd, tdata); 318*c43cad87SWarner Losh 319*c43cad87SWarner Losh *p_btkey = btkey.v; 320*c43cad87SWarner Losh *p_gctx = gctx.p; 321*c43cad87SWarner Losh *p_new_gctx = new_gctx; 322*c43cad87SWarner Losh return false; 323*c43cad87SWarner Losh } 324*c43cad87SWarner Losh 325*c43cad87SWarner Losh prof_tctx_t * 326*c43cad87SWarner Losh prof_lookup(tsd_t *tsd, prof_bt_t *bt) { 327*c43cad87SWarner Losh union { 328*c43cad87SWarner Losh prof_tctx_t *p; 329*c43cad87SWarner Losh void *v; 330*c43cad87SWarner Losh } ret; 331*c43cad87SWarner Losh prof_tdata_t *tdata; 332*c43cad87SWarner Losh bool not_found; 333*c43cad87SWarner Losh 334*c43cad87SWarner Losh cassert(config_prof); 335*c43cad87SWarner Losh 336*c43cad87SWarner Losh tdata = prof_tdata_get(tsd, false); 337*c43cad87SWarner Losh assert(tdata != NULL); 338*c43cad87SWarner Losh 339*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); 340*c43cad87SWarner Losh not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); 341*c43cad87SWarner Losh if (!not_found) { /* Note double negative! */ 342*c43cad87SWarner Losh ret.p->prepared = true; 343*c43cad87SWarner Losh } 344*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); 345*c43cad87SWarner Losh if (not_found) { 346*c43cad87SWarner Losh void *btkey; 347*c43cad87SWarner Losh prof_gctx_t *gctx; 348*c43cad87SWarner Losh bool new_gctx, error; 349*c43cad87SWarner Losh 350*c43cad87SWarner Losh /* 351*c43cad87SWarner Losh * This thread's cache lacks bt. Look for it in the global 352*c43cad87SWarner Losh * cache. 353*c43cad87SWarner Losh */ 354*c43cad87SWarner Losh if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, 355*c43cad87SWarner Losh &new_gctx)) { 356*c43cad87SWarner Losh return NULL; 357*c43cad87SWarner Losh } 358*c43cad87SWarner Losh 359*c43cad87SWarner Losh /* Link a prof_tctx_t into gctx for this thread. */ 360*c43cad87SWarner Losh ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), 361*c43cad87SWarner Losh sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, 362*c43cad87SWarner Losh arena_ichoose(tsd, NULL), true); 363*c43cad87SWarner Losh if (ret.p == NULL) { 364*c43cad87SWarner Losh if (new_gctx) { 365*c43cad87SWarner Losh prof_gctx_try_destroy(tsd, tdata, gctx); 366*c43cad87SWarner Losh } 367*c43cad87SWarner Losh return NULL; 368*c43cad87SWarner Losh } 369*c43cad87SWarner Losh ret.p->tdata = tdata; 370*c43cad87SWarner Losh ret.p->thr_uid = tdata->thr_uid; 371*c43cad87SWarner Losh ret.p->thr_discrim = tdata->thr_discrim; 372*c43cad87SWarner Losh ret.p->recent_count = 0; 373*c43cad87SWarner Losh memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); 374*c43cad87SWarner Losh ret.p->gctx = gctx; 375*c43cad87SWarner Losh ret.p->tctx_uid = tdata->tctx_uid_next++; 376*c43cad87SWarner Losh ret.p->prepared = true; 377*c43cad87SWarner Losh ret.p->state = prof_tctx_state_initializing; 378*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); 379*c43cad87SWarner Losh error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); 380*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); 381*c43cad87SWarner Losh if (error) { 382*c43cad87SWarner Losh if (new_gctx) { 383*c43cad87SWarner Losh prof_gctx_try_destroy(tsd, tdata, gctx); 384*c43cad87SWarner Losh } 385*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); 386*c43cad87SWarner Losh return NULL; 387*c43cad87SWarner Losh } 388*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); 389*c43cad87SWarner Losh ret.p->state = prof_tctx_state_nominal; 390*c43cad87SWarner Losh tctx_tree_insert(&gctx->tctxs, ret.p); 391*c43cad87SWarner Losh gctx->nlimbo--; 392*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 393*c43cad87SWarner Losh } 394*c43cad87SWarner Losh 395*c43cad87SWarner Losh return ret.p; 396*c43cad87SWarner Losh } 397*c43cad87SWarner Losh 398*c43cad87SWarner Losh /* Used in unit tests. */ 399*c43cad87SWarner Losh static prof_tdata_t * 400*c43cad87SWarner Losh prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, 401*c43cad87SWarner Losh void *arg) { 402*c43cad87SWarner Losh size_t *tdata_count = (size_t *)arg; 403*c43cad87SWarner Losh 404*c43cad87SWarner Losh (*tdata_count)++; 405*c43cad87SWarner Losh 406*c43cad87SWarner Losh return NULL; 407*c43cad87SWarner Losh } 408*c43cad87SWarner Losh 409*c43cad87SWarner Losh /* Used in unit tests. */ 410*c43cad87SWarner Losh size_t 411*c43cad87SWarner Losh prof_tdata_count(void) { 412*c43cad87SWarner Losh size_t tdata_count = 0; 413*c43cad87SWarner Losh tsdn_t *tsdn; 414*c43cad87SWarner Losh 415*c43cad87SWarner Losh tsdn = tsdn_fetch(); 416*c43cad87SWarner Losh malloc_mutex_lock(tsdn, &tdatas_mtx); 417*c43cad87SWarner Losh tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, 418*c43cad87SWarner Losh (void *)&tdata_count); 419*c43cad87SWarner Losh malloc_mutex_unlock(tsdn, &tdatas_mtx); 420*c43cad87SWarner Losh 421*c43cad87SWarner Losh return tdata_count; 422*c43cad87SWarner Losh } 423*c43cad87SWarner Losh 424*c43cad87SWarner Losh /* Used in unit tests. */ 425*c43cad87SWarner Losh size_t 426*c43cad87SWarner Losh prof_bt_count(void) { 427*c43cad87SWarner Losh size_t bt_count; 428*c43cad87SWarner Losh tsd_t *tsd; 429*c43cad87SWarner Losh prof_tdata_t *tdata; 430*c43cad87SWarner Losh 431*c43cad87SWarner Losh tsd = tsd_fetch(); 432*c43cad87SWarner Losh tdata = prof_tdata_get(tsd, false); 433*c43cad87SWarner Losh if (tdata == NULL) { 434*c43cad87SWarner Losh return 0; 435*c43cad87SWarner Losh } 436*c43cad87SWarner Losh 437*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); 438*c43cad87SWarner Losh bt_count = ckh_count(&bt2gctx); 439*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); 440*c43cad87SWarner Losh 441*c43cad87SWarner Losh return bt_count; 442*c43cad87SWarner Losh } 443*c43cad87SWarner Losh 444*c43cad87SWarner Losh char * 445*c43cad87SWarner Losh prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) { 446*c43cad87SWarner Losh char *ret; 447*c43cad87SWarner Losh size_t size; 448*c43cad87SWarner Losh 449*c43cad87SWarner Losh if (thread_name == NULL) { 450*c43cad87SWarner Losh return NULL; 451*c43cad87SWarner Losh } 452*c43cad87SWarner Losh 453*c43cad87SWarner Losh size = strlen(thread_name) + 1; 454*c43cad87SWarner Losh if (size == 1) { 455*c43cad87SWarner Losh return ""; 456*c43cad87SWarner Losh } 457*c43cad87SWarner Losh 458*c43cad87SWarner Losh ret = iallocztm(tsd_tsdn(tsd), size, sz_size2index(size), false, NULL, 459*c43cad87SWarner Losh true, arena_get(TSDN_NULL, 0, true), true); 460*c43cad87SWarner Losh if (ret == NULL) { 461*c43cad87SWarner Losh return NULL; 462*c43cad87SWarner Losh } 463*c43cad87SWarner Losh memcpy(ret, thread_name, size); 464*c43cad87SWarner Losh return ret; 465*c43cad87SWarner Losh } 466*c43cad87SWarner Losh 467*c43cad87SWarner Losh int 468*c43cad87SWarner Losh prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) { 469*c43cad87SWarner Losh assert(tsd_reentrancy_level_get(tsd) == 0); 470*c43cad87SWarner Losh 471*c43cad87SWarner Losh prof_tdata_t *tdata; 472*c43cad87SWarner Losh unsigned i; 473*c43cad87SWarner Losh char *s; 474*c43cad87SWarner Losh 475*c43cad87SWarner Losh tdata = prof_tdata_get(tsd, true); 476*c43cad87SWarner Losh if (tdata == NULL) { 477*c43cad87SWarner Losh return EAGAIN; 478*c43cad87SWarner Losh } 479*c43cad87SWarner Losh 480*c43cad87SWarner Losh /* Validate input. */ 481*c43cad87SWarner Losh if (thread_name == NULL) { 482*c43cad87SWarner Losh return EFAULT; 483*c43cad87SWarner Losh } 484*c43cad87SWarner Losh for (i = 0; thread_name[i] != '\0'; i++) { 485*c43cad87SWarner Losh char c = thread_name[i]; 486*c43cad87SWarner Losh if (!isgraph(c) && !isblank(c)) { 487*c43cad87SWarner Losh return EFAULT; 488*c43cad87SWarner Losh } 489*c43cad87SWarner Losh } 490*c43cad87SWarner Losh 491*c43cad87SWarner Losh s = prof_thread_name_alloc(tsd, thread_name); 492*c43cad87SWarner Losh if (s == NULL) { 493*c43cad87SWarner Losh return EAGAIN; 494*c43cad87SWarner Losh } 495*c43cad87SWarner Losh 496*c43cad87SWarner Losh if (tdata->thread_name != NULL) { 497*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, 498*c43cad87SWarner Losh true); 499*c43cad87SWarner Losh tdata->thread_name = NULL; 500*c43cad87SWarner Losh } 501*c43cad87SWarner Losh if (strlen(s) > 0) { 502*c43cad87SWarner Losh tdata->thread_name = s; 503*c43cad87SWarner Losh } 504*c43cad87SWarner Losh return 0; 505*c43cad87SWarner Losh } 506*c43cad87SWarner Losh 507*c43cad87SWarner Losh JEMALLOC_FORMAT_PRINTF(3, 4) 508*c43cad87SWarner Losh static void 509*c43cad87SWarner Losh prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque, 510*c43cad87SWarner Losh const char *format, ...) { 511*c43cad87SWarner Losh va_list ap; 512*c43cad87SWarner Losh char buf[PROF_PRINTF_BUFSIZE]; 513*c43cad87SWarner Losh 514*c43cad87SWarner Losh va_start(ap, format); 515*c43cad87SWarner Losh malloc_vsnprintf(buf, sizeof(buf), format, ap); 516*c43cad87SWarner Losh va_end(ap); 517*c43cad87SWarner Losh prof_dump_write(cbopaque, buf); 518*c43cad87SWarner Losh } 519*c43cad87SWarner Losh 520*c43cad87SWarner Losh /* 521*c43cad87SWarner Losh * Casting a double to a uint64_t may not necessarily be in range; this can be 522*c43cad87SWarner Losh * UB. I don't think this is practically possible with the cur counters, but 523*c43cad87SWarner Losh * plausibly could be with the accum counters. 524*c43cad87SWarner Losh */ 525*c43cad87SWarner Losh #ifdef JEMALLOC_PROF 526*c43cad87SWarner Losh static uint64_t 527*c43cad87SWarner Losh prof_double_uint64_cast(double d) { 528*c43cad87SWarner Losh /* 529*c43cad87SWarner Losh * Note: UINT64_MAX + 1 is exactly representable as a double on all 530*c43cad87SWarner Losh * reasonable platforms (certainly those we'll support). Writing this 531*c43cad87SWarner Losh * as !(a < b) instead of (a >= b) means that we're NaN-safe. 532*c43cad87SWarner Losh */ 533*c43cad87SWarner Losh double rounded = round(d); 534*c43cad87SWarner Losh if (!(rounded < (double)UINT64_MAX)) { 535*c43cad87SWarner Losh return UINT64_MAX; 536*c43cad87SWarner Losh } 537*c43cad87SWarner Losh return (uint64_t)rounded; 538*c43cad87SWarner Losh } 539*c43cad87SWarner Losh #endif 540*c43cad87SWarner Losh 541*c43cad87SWarner Losh void prof_unbias_map_init() { 542*c43cad87SWarner Losh /* See the comment in prof_sample_new_event_wait */ 543*c43cad87SWarner Losh #ifdef JEMALLOC_PROF 544*c43cad87SWarner Losh for (szind_t i = 0; i < SC_NSIZES; i++) { 545*c43cad87SWarner Losh double sz = (double)sz_index2size(i); 546*c43cad87SWarner Losh double rate = (double)(ZU(1) << lg_prof_sample); 547*c43cad87SWarner Losh double div_val = 1.0 - exp(-sz / rate); 548*c43cad87SWarner Losh double unbiased_sz = sz / div_val; 549*c43cad87SWarner Losh /* 550*c43cad87SWarner Losh * The "true" right value for the unbiased count is 551*c43cad87SWarner Losh * 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts 552*c43cad87SWarner Losh * as integers (for a variety of reasons -- rounding errors 553*c43cad87SWarner Losh * could trigger asserts, and not all libcs can properly handle 554*c43cad87SWarner Losh * floating point arithmetic during malloc calls inside libc). 555*c43cad87SWarner Losh * Rounding to an integer, though, can lead to rounding errors 556*c43cad87SWarner Losh * of over 30% for sizes close to the sampling rate. So 557*c43cad87SWarner Losh * instead, we multiply by a constant, dividing the maximum 558*c43cad87SWarner Losh * possible roundoff error by that constant. To avoid overflow 559*c43cad87SWarner Losh * in summing up size_t values, the largest safe constant we can 560*c43cad87SWarner Losh * pick is the size of the smallest allocation. 561*c43cad87SWarner Losh */ 562*c43cad87SWarner Losh double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN); 563*c43cad87SWarner Losh double shifted_unbiased_cnt = cnt_shift / div_val; 564*c43cad87SWarner Losh prof_unbiased_sz[i] = (size_t)round(unbiased_sz); 565*c43cad87SWarner Losh prof_shifted_unbiased_cnt[i] = (size_t)round( 566*c43cad87SWarner Losh shifted_unbiased_cnt); 567*c43cad87SWarner Losh } 568*c43cad87SWarner Losh #else 569*c43cad87SWarner Losh unreachable(); 570*c43cad87SWarner Losh #endif 571*c43cad87SWarner Losh } 572*c43cad87SWarner Losh 573*c43cad87SWarner Losh /* 574*c43cad87SWarner Losh * The unbiasing story is long. The jeprof unbiasing logic was copied from 575*c43cad87SWarner Losh * pprof. Both shared an issue: they unbiased using the average size of the 576*c43cad87SWarner Losh * allocations at a particular stack trace. This can work out OK if allocations 577*c43cad87SWarner Losh * are mostly of the same size given some stack, but not otherwise. We now 578*c43cad87SWarner Losh * internally track what the unbiased results ought to be. We can't just report 579*c43cad87SWarner Losh * them as they are though; they'll still go through the jeprof unbiasing 580*c43cad87SWarner Losh * process. Instead, we figure out what values we can feed *into* jeprof's 581*c43cad87SWarner Losh * unbiasing mechanism that will lead to getting the right values out. 582*c43cad87SWarner Losh * 583*c43cad87SWarner Losh * It'll unbias count and aggregate size as: 584*c43cad87SWarner Losh * 585*c43cad87SWarner Losh * c_out = c_in * 1/(1-exp(-s_in/c_in/R) 586*c43cad87SWarner Losh * s_out = s_in * 1/(1-exp(-s_in/c_in/R) 587*c43cad87SWarner Losh * 588*c43cad87SWarner Losh * We want to solve for the values of c_in and s_in that will 589*c43cad87SWarner Losh * give the c_out and s_out that we've computed internally. 590*c43cad87SWarner Losh * 591*c43cad87SWarner Losh * Let's do a change of variables (both to make the math easier and to make it 592*c43cad87SWarner Losh * easier to write): 593*c43cad87SWarner Losh * x = s_in / c_in 594*c43cad87SWarner Losh * y = s_in 595*c43cad87SWarner Losh * k = 1/R. 596*c43cad87SWarner Losh * 597*c43cad87SWarner Losh * Then 598*c43cad87SWarner Losh * c_out = y/x * 1/(1-exp(-k*x)) 599*c43cad87SWarner Losh * s_out = y * 1/(1-exp(-k*x)) 600*c43cad87SWarner Losh * 601*c43cad87SWarner Losh * The first equation gives: 602*c43cad87SWarner Losh * y = x * c_out * (1-exp(-k*x)) 603*c43cad87SWarner Losh * The second gives: 604*c43cad87SWarner Losh * y = s_out * (1-exp(-k*x)) 605*c43cad87SWarner Losh * So we have 606*c43cad87SWarner Losh * x = s_out / c_out. 607*c43cad87SWarner Losh * And all the other values fall out from that. 608*c43cad87SWarner Losh * 609*c43cad87SWarner Losh * This is all a fair bit of work. The thing we get out of it is that we don't 610*c43cad87SWarner Losh * break backwards compatibility with jeprof (and the various tools that have 611*c43cad87SWarner Losh * copied its unbiasing logic). Eventually, we anticipate a v3 heap profile 612*c43cad87SWarner Losh * dump format based on JSON, at which point I think much of this logic can get 613*c43cad87SWarner Losh * cleaned up (since we'll be taking a compatibility break there anyways). 614*c43cad87SWarner Losh */ 615*c43cad87SWarner Losh static void 616*c43cad87SWarner Losh prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in, 617*c43cad87SWarner Losh uint64_t *r_s_in) { 618*c43cad87SWarner Losh #ifdef JEMALLOC_PROF 619*c43cad87SWarner Losh if (c_out_shifted_i == 0 || s_out_i == 0) { 620*c43cad87SWarner Losh *r_c_in = 0; 621*c43cad87SWarner Losh *r_s_in = 0; 622*c43cad87SWarner Losh return; 623*c43cad87SWarner Losh } 624*c43cad87SWarner Losh /* 625*c43cad87SWarner Losh * See the note in prof_unbias_map_init() to see why we take c_out in a 626*c43cad87SWarner Losh * shifted form. 627*c43cad87SWarner Losh */ 628*c43cad87SWarner Losh double c_out = (double)c_out_shifted_i 629*c43cad87SWarner Losh / (double)(ZU(1) << SC_LG_TINY_MIN); 630*c43cad87SWarner Losh double s_out = (double)s_out_i; 631*c43cad87SWarner Losh double R = (double)(ZU(1) << lg_prof_sample); 632*c43cad87SWarner Losh 633*c43cad87SWarner Losh double x = s_out / c_out; 634*c43cad87SWarner Losh double y = s_out * (1.0 - exp(-x / R)); 635*c43cad87SWarner Losh 636*c43cad87SWarner Losh double c_in = y / x; 637*c43cad87SWarner Losh double s_in = y; 638*c43cad87SWarner Losh 639*c43cad87SWarner Losh *r_c_in = prof_double_uint64_cast(c_in); 640*c43cad87SWarner Losh *r_s_in = prof_double_uint64_cast(s_in); 641*c43cad87SWarner Losh #else 642*c43cad87SWarner Losh unreachable(); 643*c43cad87SWarner Losh #endif 644*c43cad87SWarner Losh } 645*c43cad87SWarner Losh 646*c43cad87SWarner Losh static void 647*c43cad87SWarner Losh prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque, 648*c43cad87SWarner Losh const prof_cnt_t *cnts) { 649*c43cad87SWarner Losh uint64_t curobjs; 650*c43cad87SWarner Losh uint64_t curbytes; 651*c43cad87SWarner Losh uint64_t accumobjs; 652*c43cad87SWarner Losh uint64_t accumbytes; 653*c43cad87SWarner Losh if (opt_prof_unbias) { 654*c43cad87SWarner Losh prof_do_unbias(cnts->curobjs_shifted_unbiased, 655*c43cad87SWarner Losh cnts->curbytes_unbiased, &curobjs, &curbytes); 656*c43cad87SWarner Losh prof_do_unbias(cnts->accumobjs_shifted_unbiased, 657*c43cad87SWarner Losh cnts->accumbytes_unbiased, &accumobjs, &accumbytes); 658*c43cad87SWarner Losh } else { 659*c43cad87SWarner Losh curobjs = cnts->curobjs; 660*c43cad87SWarner Losh curbytes = cnts->curbytes; 661*c43cad87SWarner Losh accumobjs = cnts->accumobjs; 662*c43cad87SWarner Losh accumbytes = cnts->accumbytes; 663*c43cad87SWarner Losh } 664*c43cad87SWarner Losh prof_dump_printf(prof_dump_write, cbopaque, 665*c43cad87SWarner Losh "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]", 666*c43cad87SWarner Losh curobjs, curbytes, accumobjs, accumbytes); 667*c43cad87SWarner Losh } 668*c43cad87SWarner Losh 669*c43cad87SWarner Losh static void 670*c43cad87SWarner Losh prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { 671*c43cad87SWarner Losh malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); 672*c43cad87SWarner Losh 673*c43cad87SWarner Losh malloc_mutex_lock(tsdn, tctx->gctx->lock); 674*c43cad87SWarner Losh 675*c43cad87SWarner Losh switch (tctx->state) { 676*c43cad87SWarner Losh case prof_tctx_state_initializing: 677*c43cad87SWarner Losh malloc_mutex_unlock(tsdn, tctx->gctx->lock); 678*c43cad87SWarner Losh return; 679*c43cad87SWarner Losh case prof_tctx_state_nominal: 680*c43cad87SWarner Losh tctx->state = prof_tctx_state_dumping; 681*c43cad87SWarner Losh malloc_mutex_unlock(tsdn, tctx->gctx->lock); 682*c43cad87SWarner Losh 683*c43cad87SWarner Losh memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); 684*c43cad87SWarner Losh 685*c43cad87SWarner Losh tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; 686*c43cad87SWarner Losh tdata->cnt_summed.curobjs_shifted_unbiased 687*c43cad87SWarner Losh += tctx->dump_cnts.curobjs_shifted_unbiased; 688*c43cad87SWarner Losh tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; 689*c43cad87SWarner Losh tdata->cnt_summed.curbytes_unbiased 690*c43cad87SWarner Losh += tctx->dump_cnts.curbytes_unbiased; 691*c43cad87SWarner Losh if (opt_prof_accum) { 692*c43cad87SWarner Losh tdata->cnt_summed.accumobjs += 693*c43cad87SWarner Losh tctx->dump_cnts.accumobjs; 694*c43cad87SWarner Losh tdata->cnt_summed.accumobjs_shifted_unbiased += 695*c43cad87SWarner Losh tctx->dump_cnts.accumobjs_shifted_unbiased; 696*c43cad87SWarner Losh tdata->cnt_summed.accumbytes += 697*c43cad87SWarner Losh tctx->dump_cnts.accumbytes; 698*c43cad87SWarner Losh tdata->cnt_summed.accumbytes_unbiased += 699*c43cad87SWarner Losh tctx->dump_cnts.accumbytes_unbiased; 700*c43cad87SWarner Losh } 701*c43cad87SWarner Losh break; 702*c43cad87SWarner Losh case prof_tctx_state_dumping: 703*c43cad87SWarner Losh case prof_tctx_state_purgatory: 704*c43cad87SWarner Losh not_reached(); 705*c43cad87SWarner Losh } 706*c43cad87SWarner Losh } 707*c43cad87SWarner Losh 708*c43cad87SWarner Losh static void 709*c43cad87SWarner Losh prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { 710*c43cad87SWarner Losh malloc_mutex_assert_owner(tsdn, gctx->lock); 711*c43cad87SWarner Losh 712*c43cad87SWarner Losh gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; 713*c43cad87SWarner Losh gctx->cnt_summed.curobjs_shifted_unbiased 714*c43cad87SWarner Losh += tctx->dump_cnts.curobjs_shifted_unbiased; 715*c43cad87SWarner Losh gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; 716*c43cad87SWarner Losh gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased; 717*c43cad87SWarner Losh if (opt_prof_accum) { 718*c43cad87SWarner Losh gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; 719*c43cad87SWarner Losh gctx->cnt_summed.accumobjs_shifted_unbiased 720*c43cad87SWarner Losh += tctx->dump_cnts.accumobjs_shifted_unbiased; 721*c43cad87SWarner Losh gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; 722*c43cad87SWarner Losh gctx->cnt_summed.accumbytes_unbiased 723*c43cad87SWarner Losh += tctx->dump_cnts.accumbytes_unbiased; 724*c43cad87SWarner Losh } 725*c43cad87SWarner Losh } 726*c43cad87SWarner Losh 727*c43cad87SWarner Losh static prof_tctx_t * 728*c43cad87SWarner Losh prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { 729*c43cad87SWarner Losh tsdn_t *tsdn = (tsdn_t *)arg; 730*c43cad87SWarner Losh 731*c43cad87SWarner Losh malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); 732*c43cad87SWarner Losh 733*c43cad87SWarner Losh switch (tctx->state) { 734*c43cad87SWarner Losh case prof_tctx_state_nominal: 735*c43cad87SWarner Losh /* New since dumping started; ignore. */ 736*c43cad87SWarner Losh break; 737*c43cad87SWarner Losh case prof_tctx_state_dumping: 738*c43cad87SWarner Losh case prof_tctx_state_purgatory: 739*c43cad87SWarner Losh prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); 740*c43cad87SWarner Losh break; 741*c43cad87SWarner Losh default: 742*c43cad87SWarner Losh not_reached(); 743*c43cad87SWarner Losh } 744*c43cad87SWarner Losh 745*c43cad87SWarner Losh return NULL; 746*c43cad87SWarner Losh } 747*c43cad87SWarner Losh 748*c43cad87SWarner Losh typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t; 749*c43cad87SWarner Losh struct prof_dump_iter_arg_s { 750*c43cad87SWarner Losh tsdn_t *tsdn; 751*c43cad87SWarner Losh write_cb_t *prof_dump_write; 752*c43cad87SWarner Losh void *cbopaque; 753*c43cad87SWarner Losh }; 754*c43cad87SWarner Losh 755*c43cad87SWarner Losh static prof_tctx_t * 756*c43cad87SWarner Losh prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { 757*c43cad87SWarner Losh prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; 758*c43cad87SWarner Losh malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); 759*c43cad87SWarner Losh 760*c43cad87SWarner Losh switch (tctx->state) { 761*c43cad87SWarner Losh case prof_tctx_state_initializing: 762*c43cad87SWarner Losh case prof_tctx_state_nominal: 763*c43cad87SWarner Losh /* Not captured by this dump. */ 764*c43cad87SWarner Losh break; 765*c43cad87SWarner Losh case prof_tctx_state_dumping: 766*c43cad87SWarner Losh case prof_tctx_state_purgatory: 767*c43cad87SWarner Losh prof_dump_printf(arg->prof_dump_write, arg->cbopaque, 768*c43cad87SWarner Losh " t%"FMTu64": ", tctx->thr_uid); 769*c43cad87SWarner Losh prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, 770*c43cad87SWarner Losh &tctx->dump_cnts); 771*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "\n"); 772*c43cad87SWarner Losh break; 773*c43cad87SWarner Losh default: 774*c43cad87SWarner Losh not_reached(); 775*c43cad87SWarner Losh } 776*c43cad87SWarner Losh return NULL; 777*c43cad87SWarner Losh } 778*c43cad87SWarner Losh 779*c43cad87SWarner Losh static prof_tctx_t * 780*c43cad87SWarner Losh prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { 781*c43cad87SWarner Losh tsdn_t *tsdn = (tsdn_t *)arg; 782*c43cad87SWarner Losh prof_tctx_t *ret; 783*c43cad87SWarner Losh 784*c43cad87SWarner Losh malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); 785*c43cad87SWarner Losh 786*c43cad87SWarner Losh switch (tctx->state) { 787*c43cad87SWarner Losh case prof_tctx_state_nominal: 788*c43cad87SWarner Losh /* New since dumping started; ignore. */ 789*c43cad87SWarner Losh break; 790*c43cad87SWarner Losh case prof_tctx_state_dumping: 791*c43cad87SWarner Losh tctx->state = prof_tctx_state_nominal; 792*c43cad87SWarner Losh break; 793*c43cad87SWarner Losh case prof_tctx_state_purgatory: 794*c43cad87SWarner Losh ret = tctx; 795*c43cad87SWarner Losh goto label_return; 796*c43cad87SWarner Losh default: 797*c43cad87SWarner Losh not_reached(); 798*c43cad87SWarner Losh } 799*c43cad87SWarner Losh 800*c43cad87SWarner Losh ret = NULL; 801*c43cad87SWarner Losh label_return: 802*c43cad87SWarner Losh return ret; 803*c43cad87SWarner Losh } 804*c43cad87SWarner Losh 805*c43cad87SWarner Losh static void 806*c43cad87SWarner Losh prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { 807*c43cad87SWarner Losh cassert(config_prof); 808*c43cad87SWarner Losh 809*c43cad87SWarner Losh malloc_mutex_lock(tsdn, gctx->lock); 810*c43cad87SWarner Losh 811*c43cad87SWarner Losh /* 812*c43cad87SWarner Losh * Increment nlimbo so that gctx won't go away before dump. 813*c43cad87SWarner Losh * Additionally, link gctx into the dump list so that it is included in 814*c43cad87SWarner Losh * prof_dump()'s second pass. 815*c43cad87SWarner Losh */ 816*c43cad87SWarner Losh gctx->nlimbo++; 817*c43cad87SWarner Losh gctx_tree_insert(gctxs, gctx); 818*c43cad87SWarner Losh 819*c43cad87SWarner Losh memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); 820*c43cad87SWarner Losh 821*c43cad87SWarner Losh malloc_mutex_unlock(tsdn, gctx->lock); 822*c43cad87SWarner Losh } 823*c43cad87SWarner Losh 824*c43cad87SWarner Losh typedef struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg_t; 825*c43cad87SWarner Losh struct prof_gctx_merge_iter_arg_s { 826*c43cad87SWarner Losh tsdn_t *tsdn; 827*c43cad87SWarner Losh size_t *leak_ngctx; 828*c43cad87SWarner Losh }; 829*c43cad87SWarner Losh 830*c43cad87SWarner Losh static prof_gctx_t * 831*c43cad87SWarner Losh prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { 832*c43cad87SWarner Losh prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque; 833*c43cad87SWarner Losh 834*c43cad87SWarner Losh malloc_mutex_lock(arg->tsdn, gctx->lock); 835*c43cad87SWarner Losh tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, 836*c43cad87SWarner Losh (void *)arg->tsdn); 837*c43cad87SWarner Losh if (gctx->cnt_summed.curobjs != 0) { 838*c43cad87SWarner Losh (*arg->leak_ngctx)++; 839*c43cad87SWarner Losh } 840*c43cad87SWarner Losh malloc_mutex_unlock(arg->tsdn, gctx->lock); 841*c43cad87SWarner Losh 842*c43cad87SWarner Losh return NULL; 843*c43cad87SWarner Losh } 844*c43cad87SWarner Losh 845*c43cad87SWarner Losh static void 846*c43cad87SWarner Losh prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { 847*c43cad87SWarner Losh prof_tdata_t *tdata = prof_tdata_get(tsd, false); 848*c43cad87SWarner Losh prof_gctx_t *gctx; 849*c43cad87SWarner Losh 850*c43cad87SWarner Losh /* 851*c43cad87SWarner Losh * Standard tree iteration won't work here, because as soon as we 852*c43cad87SWarner Losh * decrement gctx->nlimbo and unlock gctx, another thread can 853*c43cad87SWarner Losh * concurrently destroy it, which will corrupt the tree. Therefore, 854*c43cad87SWarner Losh * tear down the tree one node at a time during iteration. 855*c43cad87SWarner Losh */ 856*c43cad87SWarner Losh while ((gctx = gctx_tree_first(gctxs)) != NULL) { 857*c43cad87SWarner Losh gctx_tree_remove(gctxs, gctx); 858*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); 859*c43cad87SWarner Losh { 860*c43cad87SWarner Losh prof_tctx_t *next; 861*c43cad87SWarner Losh 862*c43cad87SWarner Losh next = NULL; 863*c43cad87SWarner Losh do { 864*c43cad87SWarner Losh prof_tctx_t *to_destroy = 865*c43cad87SWarner Losh tctx_tree_iter(&gctx->tctxs, next, 866*c43cad87SWarner Losh prof_tctx_finish_iter, 867*c43cad87SWarner Losh (void *)tsd_tsdn(tsd)); 868*c43cad87SWarner Losh if (to_destroy != NULL) { 869*c43cad87SWarner Losh next = tctx_tree_next(&gctx->tctxs, 870*c43cad87SWarner Losh to_destroy); 871*c43cad87SWarner Losh tctx_tree_remove(&gctx->tctxs, 872*c43cad87SWarner Losh to_destroy); 873*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), to_destroy, 874*c43cad87SWarner Losh NULL, NULL, true, true); 875*c43cad87SWarner Losh } else { 876*c43cad87SWarner Losh next = NULL; 877*c43cad87SWarner Losh } 878*c43cad87SWarner Losh } while (next != NULL); 879*c43cad87SWarner Losh } 880*c43cad87SWarner Losh gctx->nlimbo--; 881*c43cad87SWarner Losh if (prof_gctx_should_destroy(gctx)) { 882*c43cad87SWarner Losh gctx->nlimbo++; 883*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 884*c43cad87SWarner Losh prof_gctx_try_destroy(tsd, tdata, gctx); 885*c43cad87SWarner Losh } else { 886*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 887*c43cad87SWarner Losh } 888*c43cad87SWarner Losh } 889*c43cad87SWarner Losh } 890*c43cad87SWarner Losh 891*c43cad87SWarner Losh typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t; 892*c43cad87SWarner Losh struct prof_tdata_merge_iter_arg_s { 893*c43cad87SWarner Losh tsdn_t *tsdn; 894*c43cad87SWarner Losh prof_cnt_t *cnt_all; 895*c43cad87SWarner Losh }; 896*c43cad87SWarner Losh 897*c43cad87SWarner Losh static prof_tdata_t * 898*c43cad87SWarner Losh prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, 899*c43cad87SWarner Losh void *opaque) { 900*c43cad87SWarner Losh prof_tdata_merge_iter_arg_t *arg = 901*c43cad87SWarner Losh (prof_tdata_merge_iter_arg_t *)opaque; 902*c43cad87SWarner Losh 903*c43cad87SWarner Losh malloc_mutex_lock(arg->tsdn, tdata->lock); 904*c43cad87SWarner Losh if (!tdata->expired) { 905*c43cad87SWarner Losh size_t tabind; 906*c43cad87SWarner Losh union { 907*c43cad87SWarner Losh prof_tctx_t *p; 908*c43cad87SWarner Losh void *v; 909*c43cad87SWarner Losh } tctx; 910*c43cad87SWarner Losh 911*c43cad87SWarner Losh tdata->dumping = true; 912*c43cad87SWarner Losh memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); 913*c43cad87SWarner Losh for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, 914*c43cad87SWarner Losh &tctx.v);) { 915*c43cad87SWarner Losh prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); 916*c43cad87SWarner Losh } 917*c43cad87SWarner Losh 918*c43cad87SWarner Losh arg->cnt_all->curobjs += tdata->cnt_summed.curobjs; 919*c43cad87SWarner Losh arg->cnt_all->curobjs_shifted_unbiased 920*c43cad87SWarner Losh += tdata->cnt_summed.curobjs_shifted_unbiased; 921*c43cad87SWarner Losh arg->cnt_all->curbytes += tdata->cnt_summed.curbytes; 922*c43cad87SWarner Losh arg->cnt_all->curbytes_unbiased 923*c43cad87SWarner Losh += tdata->cnt_summed.curbytes_unbiased; 924*c43cad87SWarner Losh if (opt_prof_accum) { 925*c43cad87SWarner Losh arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs; 926*c43cad87SWarner Losh arg->cnt_all->accumobjs_shifted_unbiased 927*c43cad87SWarner Losh += tdata->cnt_summed.accumobjs_shifted_unbiased; 928*c43cad87SWarner Losh arg->cnt_all->accumbytes += 929*c43cad87SWarner Losh tdata->cnt_summed.accumbytes; 930*c43cad87SWarner Losh arg->cnt_all->accumbytes_unbiased += 931*c43cad87SWarner Losh tdata->cnt_summed.accumbytes_unbiased; 932*c43cad87SWarner Losh } 933*c43cad87SWarner Losh } else { 934*c43cad87SWarner Losh tdata->dumping = false; 935*c43cad87SWarner Losh } 936*c43cad87SWarner Losh malloc_mutex_unlock(arg->tsdn, tdata->lock); 937*c43cad87SWarner Losh 938*c43cad87SWarner Losh return NULL; 939*c43cad87SWarner Losh } 940*c43cad87SWarner Losh 941*c43cad87SWarner Losh static prof_tdata_t * 942*c43cad87SWarner Losh prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, 943*c43cad87SWarner Losh void *opaque) { 944*c43cad87SWarner Losh if (!tdata->dumping) { 945*c43cad87SWarner Losh return NULL; 946*c43cad87SWarner Losh } 947*c43cad87SWarner Losh 948*c43cad87SWarner Losh prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; 949*c43cad87SWarner Losh prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ", 950*c43cad87SWarner Losh tdata->thr_uid); 951*c43cad87SWarner Losh prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, 952*c43cad87SWarner Losh &tdata->cnt_summed); 953*c43cad87SWarner Losh if (tdata->thread_name != NULL) { 954*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, " "); 955*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, tdata->thread_name); 956*c43cad87SWarner Losh } 957*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "\n"); 958*c43cad87SWarner Losh return NULL; 959*c43cad87SWarner Losh } 960*c43cad87SWarner Losh 961*c43cad87SWarner Losh static void 962*c43cad87SWarner Losh prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) { 963*c43cad87SWarner Losh prof_dump_printf(arg->prof_dump_write, arg->cbopaque, 964*c43cad87SWarner Losh "heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample)); 965*c43cad87SWarner Losh prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all); 966*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "\n"); 967*c43cad87SWarner Losh 968*c43cad87SWarner Losh malloc_mutex_lock(arg->tsdn, &tdatas_mtx); 969*c43cad87SWarner Losh tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, arg); 970*c43cad87SWarner Losh malloc_mutex_unlock(arg->tsdn, &tdatas_mtx); 971*c43cad87SWarner Losh } 972*c43cad87SWarner Losh 973*c43cad87SWarner Losh static void 974*c43cad87SWarner Losh prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx, 975*c43cad87SWarner Losh const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { 976*c43cad87SWarner Losh cassert(config_prof); 977*c43cad87SWarner Losh malloc_mutex_assert_owner(arg->tsdn, gctx->lock); 978*c43cad87SWarner Losh 979*c43cad87SWarner Losh /* Avoid dumping such gctx's that have no useful data. */ 980*c43cad87SWarner Losh if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || 981*c43cad87SWarner Losh (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { 982*c43cad87SWarner Losh assert(gctx->cnt_summed.curobjs == 0); 983*c43cad87SWarner Losh assert(gctx->cnt_summed.curbytes == 0); 984*c43cad87SWarner Losh /* 985*c43cad87SWarner Losh * These asserts would not be correct -- see the comment on races 986*c43cad87SWarner Losh * in prof.c 987*c43cad87SWarner Losh * assert(gctx->cnt_summed.curobjs_unbiased == 0); 988*c43cad87SWarner Losh * assert(gctx->cnt_summed.curbytes_unbiased == 0); 989*c43cad87SWarner Losh */ 990*c43cad87SWarner Losh assert(gctx->cnt_summed.accumobjs == 0); 991*c43cad87SWarner Losh assert(gctx->cnt_summed.accumobjs_shifted_unbiased == 0); 992*c43cad87SWarner Losh assert(gctx->cnt_summed.accumbytes == 0); 993*c43cad87SWarner Losh assert(gctx->cnt_summed.accumbytes_unbiased == 0); 994*c43cad87SWarner Losh return; 995*c43cad87SWarner Losh } 996*c43cad87SWarner Losh 997*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "@"); 998*c43cad87SWarner Losh for (unsigned i = 0; i < bt->len; i++) { 999*c43cad87SWarner Losh prof_dump_printf(arg->prof_dump_write, arg->cbopaque, 1000*c43cad87SWarner Losh " %#"FMTxPTR, (uintptr_t)bt->vec[i]); 1001*c43cad87SWarner Losh } 1002*c43cad87SWarner Losh 1003*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "\n t*: "); 1004*c43cad87SWarner Losh prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, 1005*c43cad87SWarner Losh &gctx->cnt_summed); 1006*c43cad87SWarner Losh arg->prof_dump_write(arg->cbopaque, "\n"); 1007*c43cad87SWarner Losh 1008*c43cad87SWarner Losh tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg); 1009*c43cad87SWarner Losh } 1010*c43cad87SWarner Losh 1011*c43cad87SWarner Losh /* 1012*c43cad87SWarner Losh * See prof_sample_new_event_wait() comment for why the body of this function 1013*c43cad87SWarner Losh * is conditionally compiled. 1014*c43cad87SWarner Losh */ 1015*c43cad87SWarner Losh static void 1016*c43cad87SWarner Losh prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) { 1017*c43cad87SWarner Losh #ifdef JEMALLOC_PROF 1018*c43cad87SWarner Losh /* 1019*c43cad87SWarner Losh * Scaling is equivalent AdjustSamples() in jeprof, but the result may 1020*c43cad87SWarner Losh * differ slightly from what jeprof reports, because here we scale the 1021*c43cad87SWarner Losh * summary values, whereas jeprof scales each context individually and 1022*c43cad87SWarner Losh * reports the sums of the scaled values. 1023*c43cad87SWarner Losh */ 1024*c43cad87SWarner Losh if (cnt_all->curbytes != 0) { 1025*c43cad87SWarner Losh double sample_period = (double)((uint64_t)1 << lg_prof_sample); 1026*c43cad87SWarner Losh double ratio = (((double)cnt_all->curbytes) / 1027*c43cad87SWarner Losh (double)cnt_all->curobjs) / sample_period; 1028*c43cad87SWarner Losh double scale_factor = 1.0 / (1.0 - exp(-ratio)); 1029*c43cad87SWarner Losh uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) 1030*c43cad87SWarner Losh * scale_factor); 1031*c43cad87SWarner Losh uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * 1032*c43cad87SWarner Losh scale_factor); 1033*c43cad87SWarner Losh 1034*c43cad87SWarner Losh malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 1035*c43cad87SWarner Losh " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", 1036*c43cad87SWarner Losh curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != 1037*c43cad87SWarner Losh 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); 1038*c43cad87SWarner Losh malloc_printf( 1039*c43cad87SWarner Losh "<jemalloc>: Run jeprof on dump output for leak detail\n"); 1040*c43cad87SWarner Losh if (opt_prof_leak_error) { 1041*c43cad87SWarner Losh malloc_printf( 1042*c43cad87SWarner Losh "<jemalloc>: Exiting with error code because memory" 1043*c43cad87SWarner Losh " leaks were detected\n"); 1044*c43cad87SWarner Losh /* 1045*c43cad87SWarner Losh * Use _exit() with underscore to avoid calling atexit() 1046*c43cad87SWarner Losh * and entering endless cycle. 1047*c43cad87SWarner Losh */ 1048*c43cad87SWarner Losh _exit(1); 1049*c43cad87SWarner Losh } 1050*c43cad87SWarner Losh } 1051*c43cad87SWarner Losh #endif 1052*c43cad87SWarner Losh } 1053*c43cad87SWarner Losh 1054*c43cad87SWarner Losh static prof_gctx_t * 1055*c43cad87SWarner Losh prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { 1056*c43cad87SWarner Losh prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque; 1057*c43cad87SWarner Losh malloc_mutex_lock(arg->tsdn, gctx->lock); 1058*c43cad87SWarner Losh prof_dump_gctx(arg, gctx, &gctx->bt, gctxs); 1059*c43cad87SWarner Losh malloc_mutex_unlock(arg->tsdn, gctx->lock); 1060*c43cad87SWarner Losh return NULL; 1061*c43cad87SWarner Losh } 1062*c43cad87SWarner Losh 1063*c43cad87SWarner Losh static void 1064*c43cad87SWarner Losh prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all, 1065*c43cad87SWarner Losh size_t *leak_ngctx, prof_gctx_tree_t *gctxs) { 1066*c43cad87SWarner Losh size_t tabind; 1067*c43cad87SWarner Losh union { 1068*c43cad87SWarner Losh prof_gctx_t *p; 1069*c43cad87SWarner Losh void *v; 1070*c43cad87SWarner Losh } gctx; 1071*c43cad87SWarner Losh 1072*c43cad87SWarner Losh prof_enter(tsd, tdata); 1073*c43cad87SWarner Losh 1074*c43cad87SWarner Losh /* 1075*c43cad87SWarner Losh * Put gctx's in limbo and clear their counters in preparation for 1076*c43cad87SWarner Losh * summing. 1077*c43cad87SWarner Losh */ 1078*c43cad87SWarner Losh gctx_tree_new(gctxs); 1079*c43cad87SWarner Losh for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { 1080*c43cad87SWarner Losh prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); 1081*c43cad87SWarner Losh } 1082*c43cad87SWarner Losh 1083*c43cad87SWarner Losh /* 1084*c43cad87SWarner Losh * Iterate over tdatas, and for the non-expired ones snapshot their tctx 1085*c43cad87SWarner Losh * stats and merge them into the associated gctx's. 1086*c43cad87SWarner Losh */ 1087*c43cad87SWarner Losh memset(cnt_all, 0, sizeof(prof_cnt_t)); 1088*c43cad87SWarner Losh prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd), 1089*c43cad87SWarner Losh cnt_all}; 1090*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); 1091*c43cad87SWarner Losh tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, 1092*c43cad87SWarner Losh &prof_tdata_merge_iter_arg); 1093*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); 1094*c43cad87SWarner Losh 1095*c43cad87SWarner Losh /* Merge tctx stats into gctx's. */ 1096*c43cad87SWarner Losh *leak_ngctx = 0; 1097*c43cad87SWarner Losh prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd), 1098*c43cad87SWarner Losh leak_ngctx}; 1099*c43cad87SWarner Losh gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, 1100*c43cad87SWarner Losh &prof_gctx_merge_iter_arg); 1101*c43cad87SWarner Losh 1102*c43cad87SWarner Losh prof_leave(tsd, tdata); 1103*c43cad87SWarner Losh } 1104*c43cad87SWarner Losh 1105*c43cad87SWarner Losh void 1106*c43cad87SWarner Losh prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, 1107*c43cad87SWarner Losh prof_tdata_t *tdata, bool leakcheck) { 1108*c43cad87SWarner Losh malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx); 1109*c43cad87SWarner Losh prof_cnt_t cnt_all; 1110*c43cad87SWarner Losh size_t leak_ngctx; 1111*c43cad87SWarner Losh prof_gctx_tree_t gctxs; 1112*c43cad87SWarner Losh prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs); 1113*c43cad87SWarner Losh prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd), 1114*c43cad87SWarner Losh prof_dump_write, cbopaque}; 1115*c43cad87SWarner Losh prof_dump_header(&prof_dump_iter_arg, &cnt_all); 1116*c43cad87SWarner Losh gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg); 1117*c43cad87SWarner Losh prof_gctx_finish(tsd, &gctxs); 1118*c43cad87SWarner Losh if (leakcheck) { 1119*c43cad87SWarner Losh prof_leakcheck(&cnt_all, leak_ngctx); 1120*c43cad87SWarner Losh } 1121*c43cad87SWarner Losh } 1122*c43cad87SWarner Losh 1123*c43cad87SWarner Losh /* Used in unit tests. */ 1124*c43cad87SWarner Losh void 1125*c43cad87SWarner Losh prof_cnt_all(prof_cnt_t *cnt_all) { 1126*c43cad87SWarner Losh tsd_t *tsd = tsd_fetch(); 1127*c43cad87SWarner Losh prof_tdata_t *tdata = prof_tdata_get(tsd, false); 1128*c43cad87SWarner Losh if (tdata == NULL) { 1129*c43cad87SWarner Losh memset(cnt_all, 0, sizeof(prof_cnt_t)); 1130*c43cad87SWarner Losh } else { 1131*c43cad87SWarner Losh size_t leak_ngctx; 1132*c43cad87SWarner Losh prof_gctx_tree_t gctxs; 1133*c43cad87SWarner Losh prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs); 1134*c43cad87SWarner Losh prof_gctx_finish(tsd, &gctxs); 1135*c43cad87SWarner Losh } 1136*c43cad87SWarner Losh } 1137*c43cad87SWarner Losh 1138*c43cad87SWarner Losh void 1139*c43cad87SWarner Losh prof_bt_hash(const void *key, size_t r_hash[2]) { 1140*c43cad87SWarner Losh prof_bt_t *bt = (prof_bt_t *)key; 1141*c43cad87SWarner Losh 1142*c43cad87SWarner Losh cassert(config_prof); 1143*c43cad87SWarner Losh 1144*c43cad87SWarner Losh hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); 1145*c43cad87SWarner Losh } 1146*c43cad87SWarner Losh 1147*c43cad87SWarner Losh bool 1148*c43cad87SWarner Losh prof_bt_keycomp(const void *k1, const void *k2) { 1149*c43cad87SWarner Losh const prof_bt_t *bt1 = (prof_bt_t *)k1; 1150*c43cad87SWarner Losh const prof_bt_t *bt2 = (prof_bt_t *)k2; 1151*c43cad87SWarner Losh 1152*c43cad87SWarner Losh cassert(config_prof); 1153*c43cad87SWarner Losh 1154*c43cad87SWarner Losh if (bt1->len != bt2->len) { 1155*c43cad87SWarner Losh return false; 1156*c43cad87SWarner Losh } 1157*c43cad87SWarner Losh return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); 1158*c43cad87SWarner Losh } 1159*c43cad87SWarner Losh 1160*c43cad87SWarner Losh prof_tdata_t * 1161*c43cad87SWarner Losh prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, 1162*c43cad87SWarner Losh char *thread_name, bool active) { 1163*c43cad87SWarner Losh assert(tsd_reentrancy_level_get(tsd) == 0); 1164*c43cad87SWarner Losh 1165*c43cad87SWarner Losh prof_tdata_t *tdata; 1166*c43cad87SWarner Losh 1167*c43cad87SWarner Losh cassert(config_prof); 1168*c43cad87SWarner Losh 1169*c43cad87SWarner Losh /* Initialize an empty cache for this thread. */ 1170*c43cad87SWarner Losh tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), 1171*c43cad87SWarner Losh sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, 1172*c43cad87SWarner Losh arena_get(TSDN_NULL, 0, true), true); 1173*c43cad87SWarner Losh if (tdata == NULL) { 1174*c43cad87SWarner Losh return NULL; 1175*c43cad87SWarner Losh } 1176*c43cad87SWarner Losh 1177*c43cad87SWarner Losh tdata->lock = prof_tdata_mutex_choose(thr_uid); 1178*c43cad87SWarner Losh tdata->thr_uid = thr_uid; 1179*c43cad87SWarner Losh tdata->thr_discrim = thr_discrim; 1180*c43cad87SWarner Losh tdata->thread_name = thread_name; 1181*c43cad87SWarner Losh tdata->attached = true; 1182*c43cad87SWarner Losh tdata->expired = false; 1183*c43cad87SWarner Losh tdata->tctx_uid_next = 0; 1184*c43cad87SWarner Losh 1185*c43cad87SWarner Losh if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, 1186*c43cad87SWarner Losh prof_bt_keycomp)) { 1187*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); 1188*c43cad87SWarner Losh return NULL; 1189*c43cad87SWarner Losh } 1190*c43cad87SWarner Losh 1191*c43cad87SWarner Losh tdata->enq = false; 1192*c43cad87SWarner Losh tdata->enq_idump = false; 1193*c43cad87SWarner Losh tdata->enq_gdump = false; 1194*c43cad87SWarner Losh 1195*c43cad87SWarner Losh tdata->dumping = false; 1196*c43cad87SWarner Losh tdata->active = active; 1197*c43cad87SWarner Losh 1198*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); 1199*c43cad87SWarner Losh tdata_tree_insert(&tdatas, tdata); 1200*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); 1201*c43cad87SWarner Losh 1202*c43cad87SWarner Losh return tdata; 1203*c43cad87SWarner Losh } 1204*c43cad87SWarner Losh 1205*c43cad87SWarner Losh static bool 1206*c43cad87SWarner Losh prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { 1207*c43cad87SWarner Losh if (tdata->attached && !even_if_attached) { 1208*c43cad87SWarner Losh return false; 1209*c43cad87SWarner Losh } 1210*c43cad87SWarner Losh if (ckh_count(&tdata->bt2tctx) != 0) { 1211*c43cad87SWarner Losh return false; 1212*c43cad87SWarner Losh } 1213*c43cad87SWarner Losh return true; 1214*c43cad87SWarner Losh } 1215*c43cad87SWarner Losh 1216*c43cad87SWarner Losh static bool 1217*c43cad87SWarner Losh prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, 1218*c43cad87SWarner Losh bool even_if_attached) { 1219*c43cad87SWarner Losh malloc_mutex_assert_owner(tsdn, tdata->lock); 1220*c43cad87SWarner Losh 1221*c43cad87SWarner Losh return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); 1222*c43cad87SWarner Losh } 1223*c43cad87SWarner Losh 1224*c43cad87SWarner Losh static void 1225*c43cad87SWarner Losh prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, 1226*c43cad87SWarner Losh bool even_if_attached) { 1227*c43cad87SWarner Losh malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); 1228*c43cad87SWarner Losh malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock); 1229*c43cad87SWarner Losh 1230*c43cad87SWarner Losh tdata_tree_remove(&tdatas, tdata); 1231*c43cad87SWarner Losh 1232*c43cad87SWarner Losh assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); 1233*c43cad87SWarner Losh 1234*c43cad87SWarner Losh if (tdata->thread_name != NULL) { 1235*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, 1236*c43cad87SWarner Losh true); 1237*c43cad87SWarner Losh } 1238*c43cad87SWarner Losh ckh_delete(tsd, &tdata->bt2tctx); 1239*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); 1240*c43cad87SWarner Losh } 1241*c43cad87SWarner Losh 1242*c43cad87SWarner Losh static void 1243*c43cad87SWarner Losh prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { 1244*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); 1245*c43cad87SWarner Losh prof_tdata_destroy_locked(tsd, tdata, even_if_attached); 1246*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); 1247*c43cad87SWarner Losh } 1248*c43cad87SWarner Losh 1249*c43cad87SWarner Losh void 1250*c43cad87SWarner Losh prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { 1251*c43cad87SWarner Losh bool destroy_tdata; 1252*c43cad87SWarner Losh 1253*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); 1254*c43cad87SWarner Losh if (tdata->attached) { 1255*c43cad87SWarner Losh destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, 1256*c43cad87SWarner Losh true); 1257*c43cad87SWarner Losh /* 1258*c43cad87SWarner Losh * Only detach if !destroy_tdata, because detaching would allow 1259*c43cad87SWarner Losh * another thread to win the race to destroy tdata. 1260*c43cad87SWarner Losh */ 1261*c43cad87SWarner Losh if (!destroy_tdata) { 1262*c43cad87SWarner Losh tdata->attached = false; 1263*c43cad87SWarner Losh } 1264*c43cad87SWarner Losh tsd_prof_tdata_set(tsd, NULL); 1265*c43cad87SWarner Losh } else { 1266*c43cad87SWarner Losh destroy_tdata = false; 1267*c43cad87SWarner Losh } 1268*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); 1269*c43cad87SWarner Losh if (destroy_tdata) { 1270*c43cad87SWarner Losh prof_tdata_destroy(tsd, tdata, true); 1271*c43cad87SWarner Losh } 1272*c43cad87SWarner Losh } 1273*c43cad87SWarner Losh 1274*c43cad87SWarner Losh static bool 1275*c43cad87SWarner Losh prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { 1276*c43cad87SWarner Losh bool destroy_tdata; 1277*c43cad87SWarner Losh 1278*c43cad87SWarner Losh malloc_mutex_lock(tsdn, tdata->lock); 1279*c43cad87SWarner Losh if (!tdata->expired) { 1280*c43cad87SWarner Losh tdata->expired = true; 1281*c43cad87SWarner Losh destroy_tdata = prof_tdata_should_destroy(tsdn, tdata, false); 1282*c43cad87SWarner Losh } else { 1283*c43cad87SWarner Losh destroy_tdata = false; 1284*c43cad87SWarner Losh } 1285*c43cad87SWarner Losh malloc_mutex_unlock(tsdn, tdata->lock); 1286*c43cad87SWarner Losh 1287*c43cad87SWarner Losh return destroy_tdata; 1288*c43cad87SWarner Losh } 1289*c43cad87SWarner Losh 1290*c43cad87SWarner Losh static prof_tdata_t * 1291*c43cad87SWarner Losh prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata, 1292*c43cad87SWarner Losh void *arg) { 1293*c43cad87SWarner Losh tsdn_t *tsdn = (tsdn_t *)arg; 1294*c43cad87SWarner Losh 1295*c43cad87SWarner Losh return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); 1296*c43cad87SWarner Losh } 1297*c43cad87SWarner Losh 1298*c43cad87SWarner Losh void 1299*c43cad87SWarner Losh prof_reset(tsd_t *tsd, size_t lg_sample) { 1300*c43cad87SWarner Losh prof_tdata_t *next; 1301*c43cad87SWarner Losh 1302*c43cad87SWarner Losh assert(lg_sample < (sizeof(uint64_t) << 3)); 1303*c43cad87SWarner Losh 1304*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); 1305*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); 1306*c43cad87SWarner Losh 1307*c43cad87SWarner Losh lg_prof_sample = lg_sample; 1308*c43cad87SWarner Losh prof_unbias_map_init(); 1309*c43cad87SWarner Losh 1310*c43cad87SWarner Losh next = NULL; 1311*c43cad87SWarner Losh do { 1312*c43cad87SWarner Losh prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, 1313*c43cad87SWarner Losh prof_tdata_reset_iter, (void *)tsd); 1314*c43cad87SWarner Losh if (to_destroy != NULL) { 1315*c43cad87SWarner Losh next = tdata_tree_next(&tdatas, to_destroy); 1316*c43cad87SWarner Losh prof_tdata_destroy_locked(tsd, to_destroy, false); 1317*c43cad87SWarner Losh } else { 1318*c43cad87SWarner Losh next = NULL; 1319*c43cad87SWarner Losh } 1320*c43cad87SWarner Losh } while (next != NULL); 1321*c43cad87SWarner Losh 1322*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); 1323*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); 1324*c43cad87SWarner Losh } 1325*c43cad87SWarner Losh 1326*c43cad87SWarner Losh static bool 1327*c43cad87SWarner Losh prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) { 1328*c43cad87SWarner Losh malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); 1329*c43cad87SWarner Losh 1330*c43cad87SWarner Losh if (opt_prof_accum) { 1331*c43cad87SWarner Losh return false; 1332*c43cad87SWarner Losh } 1333*c43cad87SWarner Losh if (tctx->cnts.curobjs != 0) { 1334*c43cad87SWarner Losh return false; 1335*c43cad87SWarner Losh } 1336*c43cad87SWarner Losh if (tctx->prepared) { 1337*c43cad87SWarner Losh return false; 1338*c43cad87SWarner Losh } 1339*c43cad87SWarner Losh if (tctx->recent_count != 0) { 1340*c43cad87SWarner Losh return false; 1341*c43cad87SWarner Losh } 1342*c43cad87SWarner Losh return true; 1343*c43cad87SWarner Losh } 1344*c43cad87SWarner Losh 1345*c43cad87SWarner Losh static void 1346*c43cad87SWarner Losh prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { 1347*c43cad87SWarner Losh malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); 1348*c43cad87SWarner Losh 1349*c43cad87SWarner Losh assert(tctx->cnts.curobjs == 0); 1350*c43cad87SWarner Losh assert(tctx->cnts.curbytes == 0); 1351*c43cad87SWarner Losh /* 1352*c43cad87SWarner Losh * These asserts are not correct -- see the comment about races in 1353*c43cad87SWarner Losh * prof.c 1354*c43cad87SWarner Losh * 1355*c43cad87SWarner Losh * assert(tctx->cnts.curobjs_shifted_unbiased == 0); 1356*c43cad87SWarner Losh * assert(tctx->cnts.curbytes_unbiased == 0); 1357*c43cad87SWarner Losh */ 1358*c43cad87SWarner Losh assert(!opt_prof_accum); 1359*c43cad87SWarner Losh assert(tctx->cnts.accumobjs == 0); 1360*c43cad87SWarner Losh assert(tctx->cnts.accumbytes == 0); 1361*c43cad87SWarner Losh /* 1362*c43cad87SWarner Losh * These ones are, since accumbyte counts never go down. Either 1363*c43cad87SWarner Losh * prof_accum is off (in which case these should never have changed from 1364*c43cad87SWarner Losh * their initial value of zero), or it's on (in which case we shouldn't 1365*c43cad87SWarner Losh * be destroying this tctx). 1366*c43cad87SWarner Losh */ 1367*c43cad87SWarner Losh assert(tctx->cnts.accumobjs_shifted_unbiased == 0); 1368*c43cad87SWarner Losh assert(tctx->cnts.accumbytes_unbiased == 0); 1369*c43cad87SWarner Losh 1370*c43cad87SWarner Losh prof_gctx_t *gctx = tctx->gctx; 1371*c43cad87SWarner Losh 1372*c43cad87SWarner Losh { 1373*c43cad87SWarner Losh prof_tdata_t *tdata = tctx->tdata; 1374*c43cad87SWarner Losh tctx->tdata = NULL; 1375*c43cad87SWarner Losh ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); 1376*c43cad87SWarner Losh bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), 1377*c43cad87SWarner Losh tdata, false); 1378*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); 1379*c43cad87SWarner Losh if (destroy_tdata) { 1380*c43cad87SWarner Losh prof_tdata_destroy(tsd, tdata, false); 1381*c43cad87SWarner Losh } 1382*c43cad87SWarner Losh } 1383*c43cad87SWarner Losh 1384*c43cad87SWarner Losh bool destroy_tctx, destroy_gctx; 1385*c43cad87SWarner Losh 1386*c43cad87SWarner Losh malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); 1387*c43cad87SWarner Losh switch (tctx->state) { 1388*c43cad87SWarner Losh case prof_tctx_state_nominal: 1389*c43cad87SWarner Losh tctx_tree_remove(&gctx->tctxs, tctx); 1390*c43cad87SWarner Losh destroy_tctx = true; 1391*c43cad87SWarner Losh if (prof_gctx_should_destroy(gctx)) { 1392*c43cad87SWarner Losh /* 1393*c43cad87SWarner Losh * Increment gctx->nlimbo in order to keep another 1394*c43cad87SWarner Losh * thread from winning the race to destroy gctx while 1395*c43cad87SWarner Losh * this one has gctx->lock dropped. Without this, it 1396*c43cad87SWarner Losh * would be possible for another thread to: 1397*c43cad87SWarner Losh * 1398*c43cad87SWarner Losh * 1) Sample an allocation associated with gctx. 1399*c43cad87SWarner Losh * 2) Deallocate the sampled object. 1400*c43cad87SWarner Losh * 3) Successfully prof_gctx_try_destroy(gctx). 1401*c43cad87SWarner Losh * 1402*c43cad87SWarner Losh * The result would be that gctx no longer exists by the 1403*c43cad87SWarner Losh * time this thread accesses it in 1404*c43cad87SWarner Losh * prof_gctx_try_destroy(). 1405*c43cad87SWarner Losh */ 1406*c43cad87SWarner Losh gctx->nlimbo++; 1407*c43cad87SWarner Losh destroy_gctx = true; 1408*c43cad87SWarner Losh } else { 1409*c43cad87SWarner Losh destroy_gctx = false; 1410*c43cad87SWarner Losh } 1411*c43cad87SWarner Losh break; 1412*c43cad87SWarner Losh case prof_tctx_state_dumping: 1413*c43cad87SWarner Losh /* 1414*c43cad87SWarner Losh * A dumping thread needs tctx to remain valid until dumping 1415*c43cad87SWarner Losh * has finished. Change state such that the dumping thread will 1416*c43cad87SWarner Losh * complete destruction during a late dump iteration phase. 1417*c43cad87SWarner Losh */ 1418*c43cad87SWarner Losh tctx->state = prof_tctx_state_purgatory; 1419*c43cad87SWarner Losh destroy_tctx = false; 1420*c43cad87SWarner Losh destroy_gctx = false; 1421*c43cad87SWarner Losh break; 1422*c43cad87SWarner Losh default: 1423*c43cad87SWarner Losh not_reached(); 1424*c43cad87SWarner Losh destroy_tctx = false; 1425*c43cad87SWarner Losh destroy_gctx = false; 1426*c43cad87SWarner Losh } 1427*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); 1428*c43cad87SWarner Losh if (destroy_gctx) { 1429*c43cad87SWarner Losh prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx); 1430*c43cad87SWarner Losh } 1431*c43cad87SWarner Losh if (destroy_tctx) { 1432*c43cad87SWarner Losh idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); 1433*c43cad87SWarner Losh } 1434*c43cad87SWarner Losh } 1435*c43cad87SWarner Losh 1436*c43cad87SWarner Losh void 1437*c43cad87SWarner Losh prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx) { 1438*c43cad87SWarner Losh malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); 1439*c43cad87SWarner Losh if (prof_tctx_should_destroy(tsd, tctx)) { 1440*c43cad87SWarner Losh /* tctx->tdata->lock will be released in prof_tctx_destroy(). */ 1441*c43cad87SWarner Losh prof_tctx_destroy(tsd, tctx); 1442*c43cad87SWarner Losh } else { 1443*c43cad87SWarner Losh malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); 1444*c43cad87SWarner Losh } 1445*c43cad87SWarner Losh } 1446*c43cad87SWarner Losh 1447*c43cad87SWarner Losh /******************************************************************************/ 1448