xref: /freebsd/sys/dev/hwpmc/hwpmc_logging.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 2005-2007 Joseph Koshy
3  * Copyright (c) 2007 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by A. Joseph Koshy under
7  * sponsorship from the FreeBSD Foundation and Google, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * Logging code for hwpmc(4)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/file.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/pmc.h>
47 #include <sys/pmclog.h>
48 #include <sys/proc.h>
49 #include <sys/signalvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/uio.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 
56 /*
57  * Sysctl tunables
58  */
59 
60 SYSCTL_DECL(_kern_hwpmc);
61 
62 /*
63  * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers.
64  */
65 
66 static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
67 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
68 SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
69     &pmclog_buffer_size, 0, "size of log buffers in kilobytes");
70 
71 /*
72  * kern.hwpmc.nbuffer -- number of global log buffers
73  */
74 
75 static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
76 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
77 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD,
78     &pmc_nlogbuffers, 0, "number of global log buffers");
79 
80 /*
81  * Global log buffer list and associated spin lock.
82  */
83 
84 TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist =
85 	TAILQ_HEAD_INITIALIZER(pmc_bufferlist);
86 static struct mtx pmc_bufferlist_mtx;	/* spin lock */
87 static struct mtx pmc_kthread_mtx;	/* sleep lock */
88 
89 #define	PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do {				\
90 		const int __roundup = roundup(sizeof(*D),		\
91 			sizeof(uint32_t));				\
92 		(D)->plb_fence = ((char *) (D)) +			\
93 			 1024*pmclog_buffer_size;			\
94 		(D)->plb_base  = (D)->plb_ptr = ((char *) (D)) +	\
95 			__roundup;					\
96 	} while (0)
97 
98 
99 /*
100  * Log file record constructors.
101  */
102 #define	_PMCLOG_TO_HEADER(T,L)						\
103 	((PMCLOG_HEADER_MAGIC << 24) |					\
104 	 (PMCLOG_TYPE_ ## T << 16)   |					\
105 	 ((L) & 0xFFFF))
106 
107 /* reserve LEN bytes of space and initialize the entry header */
108 #define	_PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do {			\
109 		uint32_t *_le;						\
110 		int _len = roundup((LEN), sizeof(uint32_t));		\
111 		if ((_le = pmclog_reserve((PO), _len)) == NULL) {	\
112 			ACTION;						\
113 		}							\
114 		*_le = _PMCLOG_TO_HEADER(TYPE,_len);			\
115 		_le += 3	/* skip over timestamp */
116 
117 #define	PMCLOG_RESERVE(P,T,L)		_PMCLOG_RESERVE(P,T,L,return)
118 #define	PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L,		\
119 	error=ENOMEM;goto error)
120 
121 #define	PMCLOG_EMIT32(V)	do { *_le++ = (V); } while (0)
122 #define	PMCLOG_EMIT64(V)	do { 					\
123 		*_le++ = (uint32_t) ((V) & 0xFFFFFFFF);			\
124 		*_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF);		\
125 	} while (0)
126 
127 
128 /* Emit a string.  Caution: does NOT update _le, so needs to be last */
129 #define	PMCLOG_EMITSTRING(S,L)	do { bcopy((S), _le, (L)); } while (0)
130 
131 #define	PMCLOG_DESPATCH(PO)						\
132 		pmclog_release((PO));					\
133 	} while (0)
134 
135 
136 /*
137  * Assertions about the log file format.
138  */
139 
140 CTASSERT(sizeof(struct pmclog_callchain) == 6*4 +
141     PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t));
142 CTASSERT(sizeof(struct pmclog_closelog) == 3*4);
143 CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4);
144 CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX +
145     4*4 + sizeof(uintfptr_t));
146 CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) ==
147     4*4 + sizeof(uintfptr_t));
148 CTASSERT(sizeof(struct pmclog_map_out) == 4*4 + 2*sizeof(uintfptr_t));
149 CTASSERT(sizeof(struct pmclog_pcsample) == 6*4 + sizeof(uintfptr_t));
150 CTASSERT(sizeof(struct pmclog_pmcallocate) == 6*4);
151 CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX);
152 CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4);
153 CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4);
154 CTASSERT(sizeof(struct pmclog_proccsw) == 5*4 + 8);
155 CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX +
156     sizeof(uintfptr_t));
157 CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 +
158     sizeof(uintfptr_t));
159 CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8);
160 CTASSERT(sizeof(struct pmclog_procfork) == 5*4);
161 CTASSERT(sizeof(struct pmclog_sysexit) == 4*4);
162 CTASSERT(sizeof(struct pmclog_userdata) == 4*4);
163 
164 /*
165  * Log buffer structure
166  */
167 
168 struct pmclog_buffer {
169 	TAILQ_ENTRY(pmclog_buffer) plb_next;
170 	char 		*plb_base;
171 	char		*plb_ptr;
172 	char 		*plb_fence;
173 };
174 
175 /*
176  * Prototypes
177  */
178 
179 static int pmclog_get_buffer(struct pmc_owner *po);
180 static void pmclog_loop(void *arg);
181 static void pmclog_release(struct pmc_owner *po);
182 static uint32_t *pmclog_reserve(struct pmc_owner *po, int length);
183 static void pmclog_schedule_io(struct pmc_owner *po);
184 static void pmclog_stop_kthread(struct pmc_owner *po);
185 
186 /*
187  * Helper functions
188  */
189 
190 /*
191  * Get a log buffer
192  */
193 
194 static int
195 pmclog_get_buffer(struct pmc_owner *po)
196 {
197 	struct pmclog_buffer *plb;
198 
199 	mtx_assert(&po->po_mtx, MA_OWNED);
200 
201 	KASSERT(po->po_curbuf == NULL,
202 	    ("[pmc,%d] po=%p current buffer still valid", __LINE__, po));
203 
204 	mtx_lock_spin(&pmc_bufferlist_mtx);
205 	if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL)
206 		TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
207 	mtx_unlock_spin(&pmc_bufferlist_mtx);
208 
209 	PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb);
210 
211 #ifdef	DEBUG
212 	if (plb)
213 		KASSERT(plb->plb_ptr == plb->plb_base &&
214 		    plb->plb_base < plb->plb_fence,
215 		    ("[pmc,%d] po=%p buffer invariants: ptr=%p "
216 		    "base=%p fence=%p", __LINE__, po, plb->plb_ptr,
217 		    plb->plb_base, plb->plb_fence));
218 #endif
219 
220 	po->po_curbuf = plb;
221 
222 	/* update stats */
223 	atomic_add_int(&pmc_stats.pm_buffer_requests, 1);
224 	if (plb == NULL)
225 		atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1);
226 
227 	return plb ? 0 : ENOMEM;
228 }
229 
230 /*
231  * Log handler loop.
232  *
233  * This function is executed by each pmc owner's helper thread.
234  */
235 
236 static void
237 pmclog_loop(void *arg)
238 {
239 	int error;
240 	struct pmc_owner *po;
241 	struct pmclog_buffer *lb;
242 	struct ucred *ownercred;
243 	struct ucred *mycred;
244 	struct thread *td;
245 	struct uio auio;
246 	struct iovec aiov;
247 	size_t nbytes;
248 
249 	po = (struct pmc_owner *) arg;
250 	td = curthread;
251 	mycred = td->td_ucred;
252 
253 	PROC_LOCK(po->po_owner);
254 	ownercred = crhold(po->po_owner->p_ucred);
255 	PROC_UNLOCK(po->po_owner);
256 
257 	PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
258 	KASSERT(po->po_kthread == curthread->td_proc,
259 	    ("[pmc,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
260 		po, po->po_kthread, curthread->td_proc));
261 
262 	lb = NULL;
263 
264 
265 	/*
266 	 * Loop waiting for I/O requests to be added to the owner
267 	 * struct's queue.  The loop is exited when the log file
268 	 * is deconfigured.
269 	 */
270 
271 	mtx_lock(&pmc_kthread_mtx);
272 
273 	for (;;) {
274 
275 		/* check if we've been asked to exit */
276 		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
277 			break;
278 
279 		if (lb == NULL) { /* look for a fresh buffer to write */
280 			mtx_lock_spin(&po->po_mtx);
281 			if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
282 				mtx_unlock_spin(&po->po_mtx);
283 
284 				/* wakeup any processes waiting for a FLUSH */
285 				if (po->po_flags & PMC_PO_IN_FLUSH) {
286 					po->po_flags &= ~PMC_PO_IN_FLUSH;
287 					wakeup_one(po->po_kthread);
288 				}
289 
290 				(void) msleep(po, &pmc_kthread_mtx, PWAIT,
291 				    "pmcloop", 0);
292 				continue;
293 			}
294 
295 			TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
296 			mtx_unlock_spin(&po->po_mtx);
297 		}
298 
299 		mtx_unlock(&pmc_kthread_mtx);
300 
301 		/* process the request */
302 		PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
303 		    lb->plb_base, lb->plb_ptr);
304 		/* change our thread's credentials before issuing the I/O */
305 
306 		aiov.iov_base = lb->plb_base;
307 		aiov.iov_len  = nbytes = lb->plb_ptr - lb->plb_base;
308 
309 		auio.uio_iov    = &aiov;
310 		auio.uio_iovcnt = 1;
311 		auio.uio_offset = -1;
312 		auio.uio_resid  = nbytes;
313 		auio.uio_rw     = UIO_WRITE;
314 		auio.uio_segflg = UIO_SYSSPACE;
315 		auio.uio_td     = td;
316 
317 		/* switch thread credentials -- see kern_ktrace.c */
318 		td->td_ucred = ownercred;
319 		error = fo_write(po->po_file, &auio, ownercred, 0, td);
320 		td->td_ucred = mycred;
321 
322 		mtx_lock(&pmc_kthread_mtx);
323 
324 		if (error) {
325 			/* XXX some errors are recoverable */
326 			/* XXX also check for SIGPIPE if a socket */
327 
328 			/* send a SIGIO to the owner and exit */
329 			PROC_LOCK(po->po_owner);
330 			psignal(po->po_owner, SIGIO);
331 			PROC_UNLOCK(po->po_owner);
332 
333 			po->po_error = error; /* save for flush log */
334 
335 			PMCDBG(LOG,WRI,2, "po=%p error=%d", po, error);
336 
337 			break;
338 		}
339 
340 		/* put the used buffer back into the global pool */
341 		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
342 
343 		mtx_lock_spin(&pmc_bufferlist_mtx);
344 		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
345 		mtx_unlock_spin(&pmc_bufferlist_mtx);
346 
347 		lb = NULL;
348 	}
349 
350 	po->po_kthread = NULL;
351 
352 	mtx_unlock(&pmc_kthread_mtx);
353 
354 	/* return the current I/O buffer to the global pool */
355 	if (lb) {
356 		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
357 
358 		mtx_lock_spin(&pmc_bufferlist_mtx);
359 		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
360 		mtx_unlock_spin(&pmc_bufferlist_mtx);
361 	}
362 
363 	/*
364 	 * Exit this thread, signalling the waiter
365 	 */
366 
367 	crfree(ownercred);
368 
369 	kproc_exit(0);
370 }
371 
372 /*
373  * Release and log entry and schedule an I/O if needed.
374  */
375 
376 static void
377 pmclog_release(struct pmc_owner *po)
378 {
379 	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
380 	    ("[pmc,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
381 		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
382 	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
383 	    ("[pmc,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
384 		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
385 
386 	/* schedule an I/O if we've filled a buffer */
387 	if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence)
388 		pmclog_schedule_io(po);
389 
390 	mtx_unlock_spin(&po->po_mtx);
391 
392 	PMCDBG(LOG,REL,1, "po=%p", po);
393 }
394 
395 
396 /*
397  * Attempt to reserve 'length' bytes of space in an owner's log
398  * buffer.  The function returns a pointer to 'length' bytes of space
399  * if there was enough space or returns NULL if no space was
400  * available.  Non-null returns do so with the po mutex locked.  The
401  * caller must invoke pmclog_release() on the pmc owner structure
402  * when done.
403  */
404 
405 static uint32_t *
406 pmclog_reserve(struct pmc_owner *po, int length)
407 {
408 	uintptr_t newptr, oldptr;
409 	uint32_t *lh;
410 	struct timespec ts;
411 
412 	PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length);
413 
414 	KASSERT(length % sizeof(uint32_t) == 0,
415 	    ("[pmclog,%d] length not a multiple of word size", __LINE__));
416 
417 	mtx_lock_spin(&po->po_mtx);
418 
419 	if (po->po_curbuf == NULL)
420 		if (pmclog_get_buffer(po) != 0) {
421 			mtx_unlock_spin(&po->po_mtx);
422 			return NULL;
423 		}
424 
425 	KASSERT(po->po_curbuf != NULL,
426 	    ("[pmc,%d] po=%p no current buffer", __LINE__, po));
427 
428 	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base &&
429 	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
430 	    ("[pmc,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
431 		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
432 		po->po_curbuf->plb_fence));
433 
434 	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
435 	newptr = oldptr + length;
436 
437 	KASSERT(oldptr != (uintptr_t) NULL,
438 	    ("[pmc,%d] po=%p Null log buffer pointer", __LINE__, po));
439 
440 	/*
441 	 * If we have space in the current buffer, return a pointer to
442 	 * available space with the PO structure locked.
443 	 */
444 	if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) {
445 		po->po_curbuf->plb_ptr = (char *) newptr;
446 		goto done;
447 	}
448 
449 	/*
450 	 * Otherwise, schedule the current buffer for output and get a
451 	 * fresh buffer.
452 	 */
453 	pmclog_schedule_io(po);
454 
455 	if (pmclog_get_buffer(po) != 0) {
456 		mtx_unlock_spin(&po->po_mtx);
457 		return NULL;
458 	}
459 
460 	KASSERT(po->po_curbuf != NULL,
461 	    ("[pmc,%d] po=%p no current buffer", __LINE__, po));
462 
463 	KASSERT(po->po_curbuf->plb_ptr != NULL,
464 	    ("[pmc,%d] null return from pmc_get_log_buffer", __LINE__));
465 
466 	KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base &&
467 	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
468 	    ("[pmc,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
469 		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
470 		po->po_curbuf->plb_fence));
471 
472 	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
473 
474  done:
475 	lh = (uint32_t *) oldptr;
476 	lh++;				/* skip header */
477 	getnanotime(&ts);		/* fill in the timestamp */
478 	*lh++ = ts.tv_sec & 0xFFFFFFFF;
479 	*lh++ = ts.tv_nsec & 0xFFFFFFF;
480 	return (uint32_t *) oldptr;
481 }
482 
483 /*
484  * Schedule an I/O.
485  *
486  * Transfer the current buffer to the helper kthread.
487  */
488 
489 static void
490 pmclog_schedule_io(struct pmc_owner *po)
491 {
492 	KASSERT(po->po_curbuf != NULL,
493 	    ("[pmc,%d] schedule_io with null buffer po=%p", __LINE__, po));
494 
495 	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
496 	    ("[pmc,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
497 		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
498 	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
499 	    ("[pmc,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
500 		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
501 
502 	PMCDBG(LOG,SIO, 1, "po=%p", po);
503 
504 	mtx_assert(&po->po_mtx, MA_OWNED);
505 
506 	/*
507 	 * Add the current buffer to the tail of the buffer list and
508 	 * wakeup the helper.
509 	 */
510 	TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next);
511 	po->po_curbuf = NULL;
512 	wakeup_one(po);
513 }
514 
515 /*
516  * Stop the helper kthread.
517  */
518 
519 static void
520 pmclog_stop_kthread(struct pmc_owner *po)
521 {
522 	/*
523 	 * Unset flag, wakeup the helper thread,
524 	 * wait for it to exit
525 	 */
526 
527 	mtx_assert(&pmc_kthread_mtx, MA_OWNED);
528 	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
529 	wakeup_one(po);
530 	if (po->po_kthread)
531 		msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
532 }
533 
534 /*
535  * Public functions
536  */
537 
538 /*
539  * Configure a log file for pmc owner 'po'.
540  *
541  * Parameter 'logfd' is a file handle referencing an open file in the
542  * owner process.  This file needs to have been opened for writing.
543  */
544 
545 int
546 pmclog_configure_log(struct pmc_owner *po, int logfd)
547 {
548 	int error;
549 	struct proc *p;
550 
551 	PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);
552 
553 	p = po->po_owner;
554 
555 	/* return EBUSY if a log file was already present */
556 	if (po->po_flags & PMC_PO_OWNS_LOGFILE)
557 		return EBUSY;
558 
559 	KASSERT(po->po_kthread == NULL,
560 	    ("[pmc,%d] po=%p kthread (%p) already present", __LINE__, po,
561 		po->po_kthread));
562 	KASSERT(po->po_file == NULL,
563 	    ("[pmc,%d] po=%p file (%p) already present", __LINE__, po,
564 		po->po_file));
565 
566 	/* get a reference to the file state */
567 	error = fget_write(curthread, logfd, &po->po_file);
568 	if (error)
569 		goto error;
570 
571 	/* mark process as owning a log file */
572 	po->po_flags |= PMC_PO_OWNS_LOGFILE;
573 	error = kproc_create(pmclog_loop, po, &po->po_kthread,
574 	    RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid);
575 	if (error)
576 		goto error;
577 
578 	/* mark process as using HWPMCs */
579 	PROC_LOCK(p);
580 	p->p_flag |= P_HWPMC;
581 	PROC_UNLOCK(p);
582 
583 	/* create a log initialization entry */
584 	PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
585 	    sizeof(struct pmclog_initialize));
586 	PMCLOG_EMIT32(PMC_VERSION);
587 	PMCLOG_EMIT32(md->pmd_cputype);
588 	PMCLOG_DESPATCH(po);
589 
590 	return 0;
591 
592  error:
593 	/* shutdown the thread */
594 	mtx_lock(&pmc_kthread_mtx);
595 	if (po->po_kthread)
596 		pmclog_stop_kthread(po);
597 	mtx_unlock(&pmc_kthread_mtx);
598 
599 	KASSERT(po->po_kthread == NULL, ("[pmc,%d] po=%p kthread not stopped",
600 	    __LINE__, po));
601 
602 	if (po->po_file)
603 		(void) fdrop(po->po_file, curthread);
604 	po->po_file  = NULL;	/* clear file and error state */
605 	po->po_error = 0;
606 
607 	return error;
608 }
609 
610 
611 /*
612  * De-configure a log file.  This will throw away any buffers queued
613  * for this owner process.
614  */
615 
616 int
617 pmclog_deconfigure_log(struct pmc_owner *po)
618 {
619 	int error;
620 	struct pmclog_buffer *lb;
621 
622 	PMCDBG(LOG,CFG,1, "de-config po=%p", po);
623 
624 	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
625 		return EINVAL;
626 
627 	KASSERT(po->po_sscount == 0,
628 	    ("[pmc,%d] po=%p still owning SS PMCs", __LINE__, po));
629 	KASSERT(po->po_file != NULL,
630 	    ("[pmc,%d] po=%p no log file", __LINE__, po));
631 
632 	/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
633 	mtx_lock(&pmc_kthread_mtx);
634 	if (po->po_kthread)
635 		pmclog_stop_kthread(po);
636 	mtx_unlock(&pmc_kthread_mtx);
637 
638 	KASSERT(po->po_kthread == NULL,
639 	    ("[pmc,%d] po=%p kthread not stopped", __LINE__, po));
640 
641 	/* return all queued log buffers to the global pool */
642 	while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
643 		TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
644 		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
645 		mtx_lock_spin(&pmc_bufferlist_mtx);
646 		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
647 		mtx_unlock_spin(&pmc_bufferlist_mtx);
648 	}
649 
650 	/* return the 'current' buffer to the global pool */
651 	if ((lb = po->po_curbuf) != NULL) {
652 		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
653 		mtx_lock_spin(&pmc_bufferlist_mtx);
654 		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
655 		mtx_unlock_spin(&pmc_bufferlist_mtx);
656 	}
657 
658 	/* drop a reference to the fd */
659 	error = fdrop(po->po_file, curthread);
660 	po->po_file  = NULL;
661 	po->po_error = 0;
662 
663 	return error;
664 }
665 
666 /*
667  * Flush a process' log buffer.
668  */
669 
670 int
671 pmclog_flush(struct pmc_owner *po)
672 {
673 	int error, has_pending_buffers;
674 
675 	PMCDBG(LOG,FLS,1, "po=%p", po);
676 
677 	/*
678 	 * If there is a pending error recorded by the logger thread,
679 	 * return that.
680 	 */
681 	if (po->po_error)
682 		return po->po_error;
683 
684 	error = 0;
685 
686 	/*
687 	 * Check that we do have an active log file.
688 	 */
689 	mtx_lock(&pmc_kthread_mtx);
690 	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
691 		error = EINVAL;
692 		goto error;
693 	}
694 
695 	/*
696 	 * Schedule the current buffer if any.
697 	 */
698 	mtx_lock_spin(&po->po_mtx);
699 	if (po->po_curbuf)
700 		pmclog_schedule_io(po);
701 	has_pending_buffers = !TAILQ_EMPTY(&po->po_logbuffers);
702 	mtx_unlock_spin(&po->po_mtx);
703 
704 	if (has_pending_buffers) {
705 		po->po_flags |= PMC_PO_IN_FLUSH; /* ask for a wakeup */
706 		error = msleep(po->po_kthread, &pmc_kthread_mtx, PWAIT,
707 		    "pmcflush", 0);
708 	}
709 
710  error:
711 	mtx_unlock(&pmc_kthread_mtx);
712 
713 	return error;
714 }
715 
716 
717 void
718 pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps)
719 {
720 	int n, recordlen;
721 	uint32_t flags;
722 	struct pmc_owner *po;
723 
724 	PMCDBG(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid,
725 	    ps->ps_nsamples);
726 
727 	recordlen = offsetof(struct pmclog_callchain, pl_pc) +
728 	    ps->ps_nsamples * sizeof(uintfptr_t);
729 	po = pm->pm_owner;
730 	flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags);
731 	PMCLOG_RESERVE(po, CALLCHAIN, recordlen);
732 	PMCLOG_EMIT32(ps->ps_pid);
733 	PMCLOG_EMIT32(pm->pm_id);
734 	PMCLOG_EMIT32(flags);
735 	for (n = 0; n < ps->ps_nsamples; n++)
736 		PMCLOG_EMITADDR(ps->ps_pc[n]);
737 	PMCLOG_DESPATCH(po);
738 }
739 
740 void
741 pmclog_process_closelog(struct pmc_owner *po)
742 {
743 	PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog));
744 	PMCLOG_DESPATCH(po);
745 }
746 
747 void
748 pmclog_process_dropnotify(struct pmc_owner *po)
749 {
750 	PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify));
751 	PMCLOG_DESPATCH(po);
752 }
753 
754 void
755 pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start,
756     const char *path)
757 {
758 	int pathlen, recordlen;
759 
760 	KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__));
761 
762 	pathlen = strlen(path) + 1;	/* #bytes for path name */
763 	recordlen = offsetof(struct pmclog_map_in, pl_pathname) +
764 	    pathlen;
765 
766 	PMCLOG_RESERVE(po, MAP_IN, recordlen);
767 	PMCLOG_EMIT32(pid);
768 	PMCLOG_EMITADDR(start);
769 	PMCLOG_EMITSTRING(path,pathlen);
770 	PMCLOG_DESPATCH(po);
771 }
772 
773 void
774 pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start,
775     uintfptr_t end)
776 {
777 	KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__));
778 
779 	PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out));
780 	PMCLOG_EMIT32(pid);
781 	PMCLOG_EMITADDR(start);
782 	PMCLOG_EMITADDR(end);
783 	PMCLOG_DESPATCH(po);
784 }
785 
786 void
787 pmclog_process_pmcallocate(struct pmc *pm)
788 {
789 	struct pmc_owner *po;
790 
791 	po = pm->pm_owner;
792 
793 	PMCDBG(LOG,ALL,1, "pm=%p", pm);
794 
795 	PMCLOG_RESERVE(po, PMCALLOCATE, sizeof(struct pmclog_pmcallocate));
796 	PMCLOG_EMIT32(pm->pm_id);
797 	PMCLOG_EMIT32(pm->pm_event);
798 	PMCLOG_EMIT32(pm->pm_flags);
799 	PMCLOG_DESPATCH(po);
800 }
801 
802 void
803 pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path)
804 {
805 	int pathlen, recordlen;
806 	struct pmc_owner *po;
807 
808 	PMCDBG(LOG,ATT,1,"pm=%p pid=%d", pm, pid);
809 
810 	po = pm->pm_owner;
811 
812 	pathlen = strlen(path) + 1;	/* #bytes for the string */
813 	recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen;
814 
815 	PMCLOG_RESERVE(po, PMCATTACH, recordlen);
816 	PMCLOG_EMIT32(pm->pm_id);
817 	PMCLOG_EMIT32(pid);
818 	PMCLOG_EMITSTRING(path, pathlen);
819 	PMCLOG_DESPATCH(po);
820 }
821 
822 void
823 pmclog_process_pmcdetach(struct pmc *pm, pid_t pid)
824 {
825 	struct pmc_owner *po;
826 
827 	PMCDBG(LOG,ATT,1,"!pm=%p pid=%d", pm, pid);
828 
829 	po = pm->pm_owner;
830 
831 	PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach));
832 	PMCLOG_EMIT32(pm->pm_id);
833 	PMCLOG_EMIT32(pid);
834 	PMCLOG_DESPATCH(po);
835 }
836 
837 /*
838  * Log a context switch event to the log file.
839  */
840 
841 void
842 pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v)
843 {
844 	struct pmc_owner *po;
845 
846 	KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW,
847 	    ("[pmclog,%d] log-process-csw called gratuitously", __LINE__));
848 
849 	PMCDBG(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
850 	    v);
851 
852 	po = pm->pm_owner;
853 
854 	PMCLOG_RESERVE(po, PROCCSW, sizeof(struct pmclog_proccsw));
855 	PMCLOG_EMIT32(pm->pm_id);
856 	PMCLOG_EMIT64(v);
857 	PMCLOG_EMIT32(pp->pp_proc->p_pid);
858 	PMCLOG_DESPATCH(po);
859 }
860 
861 void
862 pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid,
863     uintfptr_t startaddr, char *path)
864 {
865 	int pathlen, recordlen;
866 
867 	PMCDBG(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path);
868 
869 	pathlen   = strlen(path) + 1;	/* #bytes for the path */
870 	recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen;
871 
872 	PMCLOG_RESERVE(po, PROCEXEC, recordlen);
873 	PMCLOG_EMIT32(pid);
874 	PMCLOG_EMITADDR(startaddr);
875 	PMCLOG_EMIT32(pmid);
876 	PMCLOG_EMITSTRING(path,pathlen);
877 	PMCLOG_DESPATCH(po);
878 }
879 
880 /*
881  * Log a process exit event (and accumulated pmc value) to the log file.
882  */
883 
884 void
885 pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp)
886 {
887 	int ri;
888 	struct pmc_owner *po;
889 
890 	ri = PMC_TO_ROWINDEX(pm);
891 	PMCDBG(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
892 	    pp->pp_pmcs[ri].pp_pmcval);
893 
894 	po = pm->pm_owner;
895 
896 	PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit));
897 	PMCLOG_EMIT32(pm->pm_id);
898 	PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval);
899 	PMCLOG_EMIT32(pp->pp_proc->p_pid);
900 	PMCLOG_DESPATCH(po);
901 }
902 
903 /*
904  * Log a fork event.
905  */
906 
907 void
908 pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid)
909 {
910 	PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork));
911 	PMCLOG_EMIT32(oldpid);
912 	PMCLOG_EMIT32(newpid);
913 	PMCLOG_DESPATCH(po);
914 }
915 
916 /*
917  * Log a process exit event of the form suitable for system-wide PMCs.
918  */
919 
920 void
921 pmclog_process_sysexit(struct pmc_owner *po, pid_t pid)
922 {
923 	PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit));
924 	PMCLOG_EMIT32(pid);
925 	PMCLOG_DESPATCH(po);
926 }
927 
928 /*
929  * Write a user log entry.
930  */
931 
932 int
933 pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl)
934 {
935 	int error;
936 
937 	PMCDBG(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata);
938 
939 	error = 0;
940 
941 	PMCLOG_RESERVE_WITH_ERROR(po, USERDATA,
942 	    sizeof(struct pmclog_userdata));
943 	PMCLOG_EMIT32(wl->pm_userdata);
944 	PMCLOG_DESPATCH(po);
945 
946  error:
947 	return error;
948 }
949 
950 /*
951  * Initialization.
952  *
953  * Create a pool of log buffers and initialize mutexes.
954  */
955 
956 void
957 pmclog_initialize()
958 {
959 	int n;
960 	struct pmclog_buffer *plb;
961 
962 	if (pmclog_buffer_size <= 0) {
963 		(void) printf("hwpmc: tunable logbuffersize=%d must be greater "
964 		    "than zero.\n", pmclog_buffer_size);
965 		pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
966 	}
967 
968 	if (pmc_nlogbuffers <= 0) {
969 		(void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
970 		    "than zero.\n", pmc_nlogbuffers);
971 		pmc_nlogbuffers = PMC_NLOGBUFFERS;
972 	}
973 
974 	/* create global pool of log buffers */
975 	for (n = 0; n < pmc_nlogbuffers; n++) {
976 		MALLOC(plb, struct pmclog_buffer *, 1024 * pmclog_buffer_size,
977 		    M_PMC, M_ZERO|M_WAITOK);
978 		PMCLOG_INIT_BUFFER_DESCRIPTOR(plb);
979 		TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next);
980 	}
981 	mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc-leaf",
982 	    MTX_SPIN);
983 	mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
984 }
985 
986 /*
987  * Shutdown logging.
988  *
989  * Destroy mutexes and release memory back the to free pool.
990  */
991 
992 void
993 pmclog_shutdown()
994 {
995 	struct pmclog_buffer *plb;
996 
997 	mtx_destroy(&pmc_kthread_mtx);
998 	mtx_destroy(&pmc_bufferlist_mtx);
999 
1000 	while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) {
1001 		TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
1002 		FREE(plb, M_PMC);
1003 	}
1004 }
1005