xref: /freebsd/contrib/jemalloc/src/prof_log.c (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/buf_writer.h"
6 #include "jemalloc/internal/ckh.h"
7 #include "jemalloc/internal/emitter.h"
8 #include "jemalloc/internal/hash.h"
9 #include "jemalloc/internal/malloc_io.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/prof_data.h"
12 #include "jemalloc/internal/prof_log.h"
13 #include "jemalloc/internal/prof_sys.h"
14 
15 bool opt_prof_log = false;
16 typedef enum prof_logging_state_e prof_logging_state_t;
17 enum prof_logging_state_e {
18 	prof_logging_state_stopped,
19 	prof_logging_state_started,
20 	prof_logging_state_dumping
21 };
22 
23 /*
24  * - stopped: log_start never called, or previous log_stop has completed.
25  * - started: log_start called, log_stop not called yet. Allocations are logged.
26  * - dumping: log_stop called but not finished; samples are not logged anymore.
27  */
28 prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
29 
30 /* Used in unit tests. */
31 static bool prof_log_dummy = false;
32 
33 /* Incremented for every log file that is output. */
34 static uint64_t log_seq = 0;
35 static char log_filename[
36     /* Minimize memory bloat for non-prof builds. */
37 #ifdef JEMALLOC_PROF
38     PATH_MAX +
39 #endif
40     1];
41 
42 /* Timestamp for most recent call to log_start(). */
43 static nstime_t log_start_timestamp;
44 
45 /* Increment these when adding to the log_bt and log_thr linked lists. */
46 static size_t log_bt_index = 0;
47 static size_t log_thr_index = 0;
48 
49 /* Linked list node definitions. These are only used in this file. */
50 typedef struct prof_bt_node_s prof_bt_node_t;
51 
52 struct prof_bt_node_s {
53 	prof_bt_node_t *next;
54 	size_t index;
55 	prof_bt_t bt;
56 	/* Variable size backtrace vector pointed to by bt. */
57 	void *vec[1];
58 };
59 
60 typedef struct prof_thr_node_s prof_thr_node_t;
61 
62 struct prof_thr_node_s {
63 	prof_thr_node_t *next;
64 	size_t index;
65 	uint64_t thr_uid;
66 	/* Variable size based on thr_name_sz. */
67 	char name[1];
68 };
69 
70 typedef struct prof_alloc_node_s prof_alloc_node_t;
71 
72 /* This is output when logging sampled allocations. */
73 struct prof_alloc_node_s {
74 	prof_alloc_node_t *next;
75 	/* Indices into an array of thread data. */
76 	size_t alloc_thr_ind;
77 	size_t free_thr_ind;
78 
79 	/* Indices into an array of backtraces. */
80 	size_t alloc_bt_ind;
81 	size_t free_bt_ind;
82 
83 	uint64_t alloc_time_ns;
84 	uint64_t free_time_ns;
85 
86 	size_t usize;
87 };
88 
89 /*
90  * Created on the first call to prof_try_log and deleted on prof_log_stop.
91  * These are the backtraces and threads that have already been logged by an
92  * allocation.
93  */
94 static bool log_tables_initialized = false;
95 static ckh_t log_bt_node_set;
96 static ckh_t log_thr_node_set;
97 
98 /* Store linked lists for logged data. */
99 static prof_bt_node_t *log_bt_first = NULL;
100 static prof_bt_node_t *log_bt_last = NULL;
101 static prof_thr_node_t *log_thr_first = NULL;
102 static prof_thr_node_t *log_thr_last = NULL;
103 static prof_alloc_node_t *log_alloc_first = NULL;
104 static prof_alloc_node_t *log_alloc_last = NULL;
105 
106 /* Protects the prof_logging_state and any log_{...} variable. */
107 malloc_mutex_t log_mtx;
108 
109 /******************************************************************************/
110 /*
111  * Function prototypes for static functions that are referenced prior to
112  * definition.
113  */
114 
115 /* Hashtable functions for log_bt_node_set and log_thr_node_set. */
116 static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
117 static bool prof_thr_node_keycomp(const void *k1, const void *k2);
118 static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
119 static bool prof_bt_node_keycomp(const void *k1, const void *k2);
120 
121 /******************************************************************************/
122 
123 static size_t
124 prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
125 	assert(prof_logging_state == prof_logging_state_started);
126 	malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
127 
128 	prof_bt_node_t dummy_node;
129 	dummy_node.bt = *bt;
130 	prof_bt_node_t *node;
131 
132 	/* See if this backtrace is already cached in the table. */
133 	if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
134 	    (void **)(&node), NULL)) {
135 		size_t sz = offsetof(prof_bt_node_t, vec) +
136 			        (bt->len * sizeof(void *));
137 		prof_bt_node_t *new_node = (prof_bt_node_t *)
138 		    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
139 		    true, arena_get(TSDN_NULL, 0, true), true);
140 		if (log_bt_first == NULL) {
141 			log_bt_first = new_node;
142 			log_bt_last = new_node;
143 		} else {
144 			log_bt_last->next = new_node;
145 			log_bt_last = new_node;
146 		}
147 
148 		new_node->next = NULL;
149 		new_node->index = log_bt_index;
150 		/*
151 		 * Copy the backtrace: bt is inside a tdata or gctx, which
152 		 * might die before prof_log_stop is called.
153 		 */
154 		new_node->bt.len = bt->len;
155 		memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
156 		new_node->bt.vec = new_node->vec;
157 
158 		log_bt_index++;
159 		ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
160 		return new_node->index;
161 	} else {
162 		return node->index;
163 	}
164 }
165 
166 static size_t
167 prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
168 	assert(prof_logging_state == prof_logging_state_started);
169 	malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
170 
171 	prof_thr_node_t dummy_node;
172 	dummy_node.thr_uid = thr_uid;
173 	prof_thr_node_t *node;
174 
175 	/* See if this thread is already cached in the table. */
176 	if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
177 	    (void **)(&node), NULL)) {
178 		size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
179 		prof_thr_node_t *new_node = (prof_thr_node_t *)
180 		    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
181 		    true, arena_get(TSDN_NULL, 0, true), true);
182 		if (log_thr_first == NULL) {
183 			log_thr_first = new_node;
184 			log_thr_last = new_node;
185 		} else {
186 			log_thr_last->next = new_node;
187 			log_thr_last = new_node;
188 		}
189 
190 		new_node->next = NULL;
191 		new_node->index = log_thr_index;
192 		new_node->thr_uid = thr_uid;
193 		strcpy(new_node->name, name);
194 
195 		log_thr_index++;
196 		ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
197 		return new_node->index;
198 	} else {
199 		return node->index;
200 	}
201 }
202 
203 JEMALLOC_COLD
204 void
205 prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
206 	cassert(config_prof);
207 	prof_tctx_t *tctx = prof_info->alloc_tctx;
208 	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
209 
210 	prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
211 	if (cons_tdata == NULL) {
212 		/*
213 		 * We decide not to log these allocations. cons_tdata will be
214 		 * NULL only when the current thread is in a weird state (e.g.
215 		 * it's being destroyed).
216 		 */
217 		return;
218 	}
219 
220 	malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
221 
222 	if (prof_logging_state != prof_logging_state_started) {
223 		goto label_done;
224 	}
225 
226 	if (!log_tables_initialized) {
227 		bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
228 				prof_bt_node_hash, prof_bt_node_keycomp);
229 		bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
230 				prof_thr_node_hash, prof_thr_node_keycomp);
231 		if (err1 || err2) {
232 			goto label_done;
233 		}
234 		log_tables_initialized = true;
235 	}
236 
237 	nstime_t alloc_time = prof_info->alloc_time;
238 	nstime_t free_time;
239 	nstime_prof_init_update(&free_time);
240 
241 	size_t sz = sizeof(prof_alloc_node_t);
242 	prof_alloc_node_t *new_node = (prof_alloc_node_t *)
243 	    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
244 	    arena_get(TSDN_NULL, 0, true), true);
245 
246 	const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
247 				        "" : tctx->tdata->thread_name;
248 	const char *cons_thr_name = prof_thread_name_get(tsd);
249 
250 	prof_bt_t bt;
251 	/* Initialize the backtrace, using the buffer in tdata to store it. */
252 	bt_init(&bt, cons_tdata->vec);
253 	prof_backtrace(tsd, &bt);
254 	prof_bt_t *cons_bt = &bt;
255 
256 	/* We haven't destroyed tctx yet, so gctx should be good to read. */
257 	prof_bt_t *prod_bt = &tctx->gctx->bt;
258 
259 	new_node->next = NULL;
260 	new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
261 				      prod_thr_name);
262 	new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
263 				     cons_thr_name);
264 	new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
265 	new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
266 	new_node->alloc_time_ns = nstime_ns(&alloc_time);
267 	new_node->free_time_ns = nstime_ns(&free_time);
268 	new_node->usize = usize;
269 
270 	if (log_alloc_first == NULL) {
271 		log_alloc_first = new_node;
272 		log_alloc_last = new_node;
273 	} else {
274 		log_alloc_last->next = new_node;
275 		log_alloc_last = new_node;
276 	}
277 
278 label_done:
279 	malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
280 }
281 
282 static void
283 prof_bt_node_hash(const void *key, size_t r_hash[2]) {
284 	const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
285 	prof_bt_hash((void *)(&bt_node->bt), r_hash);
286 }
287 
288 static bool
289 prof_bt_node_keycomp(const void *k1, const void *k2) {
290 	const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
291 	const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
292 	return prof_bt_keycomp((void *)(&bt_node1->bt),
293 	    (void *)(&bt_node2->bt));
294 }
295 
296 static void
297 prof_thr_node_hash(const void *key, size_t r_hash[2]) {
298 	const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
299 	hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
300 }
301 
302 static bool
303 prof_thr_node_keycomp(const void *k1, const void *k2) {
304 	const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
305 	const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
306 	return thr_node1->thr_uid == thr_node2->thr_uid;
307 }
308 
309 /* Used in unit tests. */
310 size_t
311 prof_log_bt_count(void) {
312 	cassert(config_prof);
313 	size_t cnt = 0;
314 	prof_bt_node_t *node = log_bt_first;
315 	while (node != NULL) {
316 		cnt++;
317 		node = node->next;
318 	}
319 	return cnt;
320 }
321 
322 /* Used in unit tests. */
323 size_t
324 prof_log_alloc_count(void) {
325 	cassert(config_prof);
326 	size_t cnt = 0;
327 	prof_alloc_node_t *node = log_alloc_first;
328 	while (node != NULL) {
329 		cnt++;
330 		node = node->next;
331 	}
332 	return cnt;
333 }
334 
335 /* Used in unit tests. */
336 size_t
337 prof_log_thr_count(void) {
338 	cassert(config_prof);
339 	size_t cnt = 0;
340 	prof_thr_node_t *node = log_thr_first;
341 	while (node != NULL) {
342 		cnt++;
343 		node = node->next;
344 	}
345 	return cnt;
346 }
347 
348 /* Used in unit tests. */
349 bool
350 prof_log_is_logging(void) {
351 	cassert(config_prof);
352 	return prof_logging_state == prof_logging_state_started;
353 }
354 
355 /* Used in unit tests. */
356 bool
357 prof_log_rep_check(void) {
358 	cassert(config_prof);
359 	if (prof_logging_state == prof_logging_state_stopped
360 	    && log_tables_initialized) {
361 		return true;
362 	}
363 
364 	if (log_bt_last != NULL && log_bt_last->next != NULL) {
365 		return true;
366 	}
367 	if (log_thr_last != NULL && log_thr_last->next != NULL) {
368 		return true;
369 	}
370 	if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
371 		return true;
372 	}
373 
374 	size_t bt_count = prof_log_bt_count();
375 	size_t thr_count = prof_log_thr_count();
376 	size_t alloc_count = prof_log_alloc_count();
377 
378 
379 	if (prof_logging_state == prof_logging_state_stopped) {
380 		if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
381 			return true;
382 		}
383 	}
384 
385 	prof_alloc_node_t *node = log_alloc_first;
386 	while (node != NULL) {
387 		if (node->alloc_bt_ind >= bt_count) {
388 			return true;
389 		}
390 		if (node->free_bt_ind >= bt_count) {
391 			return true;
392 		}
393 		if (node->alloc_thr_ind >= thr_count) {
394 			return true;
395 		}
396 		if (node->free_thr_ind >= thr_count) {
397 			return true;
398 		}
399 		if (node->alloc_time_ns > node->free_time_ns) {
400 			return true;
401 		}
402 		node = node->next;
403 	}
404 
405 	return false;
406 }
407 
408 /* Used in unit tests. */
409 void
410 prof_log_dummy_set(bool new_value) {
411 	cassert(config_prof);
412 	prof_log_dummy = new_value;
413 }
414 
415 /* Used as an atexit function to stop logging on exit. */
416 static void
417 prof_log_stop_final(void) {
418 	tsd_t *tsd = tsd_fetch();
419 	prof_log_stop(tsd_tsdn(tsd));
420 }
421 
422 JEMALLOC_COLD
423 bool
424 prof_log_start(tsdn_t *tsdn, const char *filename) {
425 	cassert(config_prof);
426 
427 	if (!opt_prof) {
428 		return true;
429 	}
430 
431 	bool ret = false;
432 
433 	malloc_mutex_lock(tsdn, &log_mtx);
434 
435 	static bool prof_log_atexit_called = false;
436 	if (!prof_log_atexit_called) {
437 		prof_log_atexit_called = true;
438 		if (atexit(prof_log_stop_final) != 0) {
439 			malloc_write("<jemalloc>: Error in atexit() "
440 			    "for logging\n");
441 			if (opt_abort) {
442 				abort();
443 			}
444 			ret = true;
445 			goto label_done;
446 		}
447 	}
448 
449 	if (prof_logging_state != prof_logging_state_stopped) {
450 		ret = true;
451 	} else if (filename == NULL) {
452 		/* Make default name. */
453 		prof_get_default_filename(tsdn, log_filename, log_seq);
454 		log_seq++;
455 		prof_logging_state = prof_logging_state_started;
456 	} else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) {
457 		ret = true;
458 	} else {
459 		strcpy(log_filename, filename);
460 		prof_logging_state = prof_logging_state_started;
461 	}
462 
463 	if (!ret) {
464 		nstime_prof_init_update(&log_start_timestamp);
465 	}
466 label_done:
467 	malloc_mutex_unlock(tsdn, &log_mtx);
468 
469 	return ret;
470 }
471 
472 struct prof_emitter_cb_arg_s {
473 	int fd;
474 	ssize_t ret;
475 };
476 
477 static void
478 prof_emitter_write_cb(void *opaque, const char *to_write) {
479 	struct prof_emitter_cb_arg_s *arg =
480 	    (struct prof_emitter_cb_arg_s *)opaque;
481 	size_t bytes = strlen(to_write);
482 	if (prof_log_dummy) {
483 		return;
484 	}
485 	arg->ret = malloc_write_fd(arg->fd, to_write, bytes);
486 }
487 
488 /*
489  * prof_log_emit_{...} goes through the appropriate linked list, emitting each
490  * node to the json and deallocating it.
491  */
492 static void
493 prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
494 	emitter_json_array_kv_begin(emitter, "threads");
495 	prof_thr_node_t *thr_node = log_thr_first;
496 	prof_thr_node_t *thr_old_node;
497 	while (thr_node != NULL) {
498 		emitter_json_object_begin(emitter);
499 
500 		emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
501 		    &thr_node->thr_uid);
502 
503 		char *thr_name = thr_node->name;
504 
505 		emitter_json_kv(emitter, "thr_name", emitter_type_string,
506 		    &thr_name);
507 
508 		emitter_json_object_end(emitter);
509 		thr_old_node = thr_node;
510 		thr_node = thr_node->next;
511 		idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true);
512 	}
513 	emitter_json_array_end(emitter);
514 }
515 
516 static void
517 prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
518 	emitter_json_array_kv_begin(emitter, "stack_traces");
519 	prof_bt_node_t *bt_node = log_bt_first;
520 	prof_bt_node_t *bt_old_node;
521 	/*
522 	 * Calculate how many hex digits we need: twice number of bytes, two for
523 	 * "0x", and then one more for terminating '\0'.
524 	 */
525 	char buf[2 * sizeof(intptr_t) + 3];
526 	size_t buf_sz = sizeof(buf);
527 	while (bt_node != NULL) {
528 		emitter_json_array_begin(emitter);
529 		size_t i;
530 		for (i = 0; i < bt_node->bt.len; i++) {
531 			malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
532 			char *trace_str = buf;
533 			emitter_json_value(emitter, emitter_type_string,
534 			    &trace_str);
535 		}
536 		emitter_json_array_end(emitter);
537 
538 		bt_old_node = bt_node;
539 		bt_node = bt_node->next;
540 		idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true);
541 	}
542 	emitter_json_array_end(emitter);
543 }
544 
545 static void
546 prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
547 	emitter_json_array_kv_begin(emitter, "allocations");
548 	prof_alloc_node_t *alloc_node = log_alloc_first;
549 	prof_alloc_node_t *alloc_old_node;
550 	while (alloc_node != NULL) {
551 		emitter_json_object_begin(emitter);
552 
553 		emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
554 		    &alloc_node->alloc_thr_ind);
555 
556 		emitter_json_kv(emitter, "free_thread", emitter_type_size,
557 		    &alloc_node->free_thr_ind);
558 
559 		emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
560 		    &alloc_node->alloc_bt_ind);
561 
562 		emitter_json_kv(emitter, "free_trace", emitter_type_size,
563 		    &alloc_node->free_bt_ind);
564 
565 		emitter_json_kv(emitter, "alloc_timestamp",
566 		    emitter_type_uint64, &alloc_node->alloc_time_ns);
567 
568 		emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
569 		    &alloc_node->free_time_ns);
570 
571 		emitter_json_kv(emitter, "usize", emitter_type_uint64,
572 		    &alloc_node->usize);
573 
574 		emitter_json_object_end(emitter);
575 
576 		alloc_old_node = alloc_node;
577 		alloc_node = alloc_node->next;
578 		idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
579 		    true);
580 	}
581 	emitter_json_array_end(emitter);
582 }
583 
584 static void
585 prof_log_emit_metadata(emitter_t *emitter) {
586 	emitter_json_object_kv_begin(emitter, "info");
587 
588 	nstime_t now;
589 
590 	nstime_prof_init_update(&now);
591 	uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
592 	emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
593 
594 	char *vers = JEMALLOC_VERSION;
595 	emitter_json_kv(emitter, "version",
596 	    emitter_type_string, &vers);
597 
598 	emitter_json_kv(emitter, "lg_sample_rate",
599 	    emitter_type_int, &lg_prof_sample);
600 
601 	const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
602 	emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
603 	    &res_type);
604 
605 	int pid = prof_getpid();
606 	emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
607 
608 	emitter_json_object_end(emitter);
609 }
610 
611 #define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE
612 JEMALLOC_COLD
613 bool
614 prof_log_stop(tsdn_t *tsdn) {
615 	cassert(config_prof);
616 	if (!opt_prof || !prof_booted) {
617 		return true;
618 	}
619 
620 	tsd_t *tsd = tsdn_tsd(tsdn);
621 	malloc_mutex_lock(tsdn, &log_mtx);
622 
623 	if (prof_logging_state != prof_logging_state_started) {
624 		malloc_mutex_unlock(tsdn, &log_mtx);
625 		return true;
626 	}
627 
628 	/*
629 	 * Set the state to dumping. We'll set it to stopped when we're done.
630 	 * Since other threads won't be able to start/stop/log when the state is
631 	 * dumping, we don't have to hold the lock during the whole method.
632 	 */
633 	prof_logging_state = prof_logging_state_dumping;
634 	malloc_mutex_unlock(tsdn, &log_mtx);
635 
636 
637 	emitter_t emitter;
638 
639 	/* Create a file. */
640 
641 	int fd;
642 	if (prof_log_dummy) {
643 		fd = 0;
644 	} else {
645 		fd = creat(log_filename, 0644);
646 	}
647 
648 	if (fd == -1) {
649 		malloc_printf("<jemalloc>: creat() for log file \"%s\" "
650 			      " failed with %d\n", log_filename, errno);
651 		if (opt_abort) {
652 			abort();
653 		}
654 		return true;
655 	}
656 
657 	struct prof_emitter_cb_arg_s arg;
658 	arg.fd = fd;
659 
660 	buf_writer_t buf_writer;
661 	buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
662 	    PROF_LOG_STOP_BUFSIZE);
663 	emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
664 	    &buf_writer);
665 
666 	emitter_begin(&emitter);
667 	prof_log_emit_metadata(&emitter);
668 	prof_log_emit_threads(tsd, &emitter);
669 	prof_log_emit_traces(tsd, &emitter);
670 	prof_log_emit_allocs(tsd, &emitter);
671 	emitter_end(&emitter);
672 
673 	buf_writer_terminate(tsdn, &buf_writer);
674 
675 	/* Reset global state. */
676 	if (log_tables_initialized) {
677 		ckh_delete(tsd, &log_bt_node_set);
678 		ckh_delete(tsd, &log_thr_node_set);
679 	}
680 	log_tables_initialized = false;
681 	log_bt_index = 0;
682 	log_thr_index = 0;
683 	log_bt_first = NULL;
684 	log_bt_last = NULL;
685 	log_thr_first = NULL;
686 	log_thr_last = NULL;
687 	log_alloc_first = NULL;
688 	log_alloc_last = NULL;
689 
690 	malloc_mutex_lock(tsdn, &log_mtx);
691 	prof_logging_state = prof_logging_state_stopped;
692 	malloc_mutex_unlock(tsdn, &log_mtx);
693 
694 	if (prof_log_dummy) {
695 		return false;
696 	}
697 	return close(fd) || arg.ret == -1;
698 }
699 #undef PROF_LOG_STOP_BUFSIZE
700 
701 JEMALLOC_COLD
702 bool
703 prof_log_init(tsd_t *tsd) {
704 	cassert(config_prof);
705 	if (malloc_mutex_init(&log_mtx, "prof_log",
706 	    WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
707 		return true;
708 	}
709 
710 	if (opt_prof_log) {
711 		prof_log_start(tsd_tsdn(tsd), NULL);
712 	}
713 
714 	return false;
715 }
716 
717 /******************************************************************************/
718