1 /*- 2 * Copyright (c) 2005 Joseph Koshy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Logging code for hwpmc(4) 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/file.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/lock.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/pmc.h> 43 #include <sys/pmclog.h> 44 #include <sys/proc.h> 45 #include <sys/signalvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/systm.h> 48 #include <sys/uio.h> 49 #include <sys/unistd.h> 50 #include <sys/vnode.h> 51 52 /* 53 * Sysctl tunables 54 */ 55 56 SYSCTL_DECL(_kern_hwpmc); 57 58 /* 59 * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers. 60 */ 61 62 static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; 63 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size); 64 SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD, 65 &pmclog_buffer_size, 0, "size of log buffers in kilobytes"); 66 67 68 /* 69 * kern.hwpmc.nbuffer -- number of global log buffers 70 */ 71 72 static int pmc_nlogbuffers = PMC_NLOGBUFFERS; 73 TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers); 74 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD, 75 &pmc_nlogbuffers, 0, "number of global log buffers"); 76 77 /* 78 * Global log buffer list and associated spin lock. 79 */ 80 81 TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist = 82 TAILQ_HEAD_INITIALIZER(pmc_bufferlist); 83 static struct mtx pmc_bufferlist_mtx; /* spin lock */ 84 static struct mtx pmc_kthread_mtx; /* sleep lock */ 85 86 #define PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do { \ 87 const int __roundup = roundup(sizeof(*D), \ 88 sizeof(uint32_t)); \ 89 (D)->plb_fence = ((char *) (D)) + \ 90 1024*pmclog_buffer_size; \ 91 (D)->plb_base = (D)->plb_ptr = ((char *) (D)) + \ 92 __roundup; \ 93 } while (0) 94 95 96 /* 97 * Log file record constructors. 98 */ 99 100 #define _PMCLOG_TO_HEADER(T,L) \ 101 ((PMCLOG_HEADER_MAGIC << 24) | \ 102 (PMCLOG_TYPE_ ## T << 16) | \ 103 ((L) & 0xFFFF)) 104 105 /* reserve LEN bytes of space and initialize the entry header */ 106 #define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \ 107 uint32_t *_le; \ 108 int _len = roundup((LEN), sizeof(uint32_t)); \ 109 if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ 110 ACTION; \ 111 } \ 112 *_le = _PMCLOG_TO_HEADER(TYPE,_len); \ 113 _le += 3 /* skip over timestamp */ 114 115 #define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return) 116 #define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \ 117 error=ENOMEM;goto error) 118 119 #define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0) 120 #define PMCLOG_EMIT64(V) do { \ 121 *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \ 122 *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \ 123 } while (0) 124 125 126 /* Emit a string. Caution: does NOT update _le, so needs to be last */ 127 #define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0) 128 129 #define PMCLOG_DESPATCH(PO) \ 130 pmclog_release((PO)); \ 131 } while (0) 132 133 134 /* 135 * Assertions about the log file format. 136 */ 137 138 CTASSERT(sizeof(struct pmclog_closelog) == 3*4); 139 CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4); 140 CTASSERT(sizeof(struct pmclog_mappingchange) == PATH_MAX + 141 5*4 + 2*sizeof(uintfptr_t)); 142 CTASSERT(offsetof(struct pmclog_mappingchange,pl_pathname) == 143 5*4 + 2*sizeof(uintfptr_t)); 144 CTASSERT(sizeof(struct pmclog_pcsample) == 6*4 + sizeof(uintfptr_t)); 145 CTASSERT(sizeof(struct pmclog_pmcallocate) == 6*4); 146 CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX); 147 CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4); 148 CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4); 149 CTASSERT(sizeof(struct pmclog_proccsw) == 5*4 + 8); 150 CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX + 151 sizeof(uintfptr_t)); 152 CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 + 153 sizeof(uintfptr_t)); 154 CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8); 155 CTASSERT(sizeof(struct pmclog_procfork) == 5*4); 156 CTASSERT(sizeof(struct pmclog_sysexit) == 4*4); 157 CTASSERT(sizeof(struct pmclog_userdata) == 4*4); 158 159 /* 160 * Log buffer structure 161 */ 162 163 struct pmclog_buffer { 164 TAILQ_ENTRY(pmclog_buffer) plb_next; 165 char *plb_base; 166 char *plb_ptr; 167 char *plb_fence; 168 }; 169 170 /* 171 * Prototypes 172 */ 173 174 static int pmclog_get_buffer(struct pmc_owner *po); 175 static void pmclog_loop(void *arg); 176 static void pmclog_release(struct pmc_owner *po); 177 static uint32_t *pmclog_reserve(struct pmc_owner *po, int length); 178 static void pmclog_schedule_io(struct pmc_owner *po); 179 static void pmclog_stop_kthread(struct pmc_owner *po); 180 181 /* 182 * Helper functions 183 */ 184 185 /* 186 * Get a log buffer 187 */ 188 189 static int 190 pmclog_get_buffer(struct pmc_owner *po) 191 { 192 struct pmclog_buffer *plb; 193 194 mtx_assert(&po->po_mtx, MA_OWNED); 195 196 KASSERT(po->po_curbuf == NULL, 197 ("[pmc,%d] po=%p current buffer still valid", __LINE__, po)); 198 199 mtx_lock_spin(&pmc_bufferlist_mtx); 200 if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) 201 TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); 202 mtx_unlock_spin(&pmc_bufferlist_mtx); 203 204 PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb); 205 206 #ifdef DEBUG 207 if (plb) 208 KASSERT(plb->plb_ptr == plb->plb_base && 209 plb->plb_base < plb->plb_fence, 210 ("[pmc,%d] po=%p buffer invariants: ptr=%p " 211 "base=%p fence=%p", __LINE__, po, plb->plb_ptr, 212 plb->plb_base, plb->plb_fence)); 213 #endif 214 215 po->po_curbuf = plb; 216 217 /* update stats */ 218 atomic_add_int(&pmc_stats.pm_buffer_requests, 1); 219 if (plb == NULL) 220 atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1); 221 222 return plb ? 0 : ENOMEM; 223 } 224 225 /* 226 * Log handler loop. 227 * 228 * This function is executed by each pmc owner's helper thread. 229 */ 230 231 static void 232 pmclog_loop(void *arg) 233 { 234 int error; 235 struct pmc_owner *po; 236 struct pmclog_buffer *lb; 237 struct ucred *ownercred; 238 struct ucred *mycred; 239 struct thread *td; 240 struct uio auio; 241 struct iovec aiov; 242 size_t nbytes; 243 244 po = (struct pmc_owner *) arg; 245 td = curthread; 246 mycred = td->td_ucred; 247 248 PROC_LOCK(po->po_owner); 249 ownercred = crhold(po->po_owner->p_ucred); 250 PROC_UNLOCK(po->po_owner); 251 252 PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread); 253 KASSERT(po->po_kthread == curthread->td_proc, 254 ("[pmc,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__, 255 po, po->po_kthread, curthread->td_proc)); 256 257 lb = NULL; 258 259 260 /* 261 * Loop waiting for I/O requests to be added to the owner 262 * struct's queue. The loop is exited when the log file 263 * is deconfigured. 264 */ 265 266 mtx_lock(&pmc_kthread_mtx); 267 268 for (;;) { 269 270 /* check if we've been asked to exit */ 271 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 272 break; 273 274 if (lb == NULL) { /* look for a fresh buffer to write */ 275 mtx_lock_spin(&po->po_mtx); 276 if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) { 277 mtx_unlock_spin(&po->po_mtx); 278 279 /* wakeup any processes waiting for a FLUSH */ 280 if (po->po_flags & PMC_PO_IN_FLUSH) { 281 po->po_flags &= ~PMC_PO_IN_FLUSH; 282 wakeup_one(po->po_kthread); 283 } 284 285 (void) msleep(po, &pmc_kthread_mtx, PWAIT, 286 "pmcloop", 0); 287 continue; 288 } 289 290 TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); 291 mtx_unlock_spin(&po->po_mtx); 292 } 293 294 mtx_unlock(&pmc_kthread_mtx); 295 296 /* process the request */ 297 PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po, 298 lb->plb_base, lb->plb_ptr); 299 /* change our thread's credentials before issuing the I/O */ 300 301 aiov.iov_base = lb->plb_base; 302 aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base; 303 304 auio.uio_iov = &aiov; 305 auio.uio_iovcnt = 1; 306 auio.uio_offset = -1; 307 auio.uio_resid = nbytes; 308 auio.uio_rw = UIO_WRITE; 309 auio.uio_segflg = UIO_SYSSPACE; 310 auio.uio_td = td; 311 312 /* switch thread credentials -- see kern_ktrace.c */ 313 td->td_ucred = ownercred; 314 error = fo_write(po->po_file, &auio, ownercred, 0, td); 315 td->td_ucred = mycred; 316 317 mtx_lock(&pmc_kthread_mtx); 318 319 if (error) { 320 /* XXX some errors are recoverable */ 321 /* XXX also check for SIGPIPE if a socket */ 322 323 /* send a SIGIO to the owner and exit */ 324 PROC_LOCK(po->po_owner); 325 psignal(po->po_owner, SIGIO); 326 PROC_UNLOCK(po->po_owner); 327 328 po->po_error = error; /* save for flush log */ 329 330 PMCDBG(LOG,WRI,2, "po=%p error=%d", po, error); 331 332 break; 333 } 334 335 /* put the used buffer back into the global pool */ 336 PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); 337 338 mtx_lock_spin(&pmc_bufferlist_mtx); 339 TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); 340 mtx_unlock_spin(&pmc_bufferlist_mtx); 341 342 lb = NULL; 343 } 344 345 po->po_kthread = NULL; 346 347 mtx_unlock(&pmc_kthread_mtx); 348 349 /* return the current I/O buffer to the global pool */ 350 if (lb) { 351 PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); 352 353 mtx_lock_spin(&pmc_bufferlist_mtx); 354 TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); 355 mtx_unlock_spin(&pmc_bufferlist_mtx); 356 } 357 358 /* 359 * Exit this thread, signalling the waiter 360 */ 361 362 crfree(ownercred); 363 364 kthread_exit(0); 365 } 366 367 /* 368 * Release and log entry and schedule an I/O if needed. 369 */ 370 371 static void 372 pmclog_release(struct pmc_owner *po) 373 { 374 KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base, 375 ("[pmc,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, 376 po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base)); 377 KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, 378 ("[pmc,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, 379 po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence)); 380 381 /* schedule an I/O if we've filled a buffer */ 382 if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence) 383 pmclog_schedule_io(po); 384 385 mtx_unlock_spin(&po->po_mtx); 386 387 PMCDBG(LOG,REL,1, "po=%p", po); 388 } 389 390 391 /* 392 * Attempt to reserve 'length' bytes of space in an owner's log 393 * buffer. The function returns a pointer to 'length' bytes of space 394 * if there was enough space or returns NULL if no space was 395 * available. Non-null returns do so with the po mutex locked. The 396 * caller must invoke pmclog_release() on the pmc owner structure 397 * when done. 398 */ 399 400 static uint32_t * 401 pmclog_reserve(struct pmc_owner *po, int length) 402 { 403 uintptr_t newptr, oldptr; 404 uint32_t *lh; 405 struct timespec ts; 406 407 PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length); 408 409 KASSERT(length % sizeof(uint32_t) == 0, 410 ("[pmclog,%d] length not a multiple of word size", __LINE__)); 411 412 mtx_lock_spin(&po->po_mtx); 413 414 if (po->po_curbuf == NULL) 415 if (pmclog_get_buffer(po) != 0) { 416 mtx_unlock_spin(&po->po_mtx); 417 return NULL; 418 } 419 420 KASSERT(po->po_curbuf != NULL, 421 ("[pmc,%d] po=%p no current buffer", __LINE__, po)); 422 423 KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base && 424 po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, 425 ("[pmc,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", 426 __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base, 427 po->po_curbuf->plb_fence)); 428 429 oldptr = (uintptr_t) po->po_curbuf->plb_ptr; 430 newptr = oldptr + length; 431 432 KASSERT(oldptr != (uintptr_t) NULL, 433 ("[pmc,%d] po=%p Null log buffer pointer", __LINE__, po)); 434 435 /* 436 * If we have space in the current buffer, return a pointer to 437 * available space with the PO structure locked. 438 */ 439 if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) { 440 po->po_curbuf->plb_ptr = (char *) newptr; 441 goto done; 442 } 443 444 /* 445 * Otherwise, schedule the current buffer for output and get a 446 * fresh buffer. 447 */ 448 pmclog_schedule_io(po); 449 450 if (pmclog_get_buffer(po) != 0) { 451 mtx_unlock_spin(&po->po_mtx); 452 return NULL; 453 } 454 455 KASSERT(po->po_curbuf != NULL, 456 ("[pmc,%d] po=%p no current buffer", __LINE__, po)); 457 458 KASSERT(po->po_curbuf->plb_ptr != NULL, 459 ("[pmc,%d] null return from pmc_get_log_buffer", __LINE__)); 460 461 KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base && 462 po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, 463 ("[pmc,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", 464 __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base, 465 po->po_curbuf->plb_fence)); 466 467 oldptr = (uintptr_t) po->po_curbuf->plb_ptr; 468 469 done: 470 lh = (uint32_t *) oldptr; 471 lh++; /* skip header */ 472 getnanotime(&ts); /* fill in the timestamp */ 473 *lh++ = ts.tv_sec & 0xFFFFFFFF; 474 *lh++ = ts.tv_nsec & 0xFFFFFFF; 475 return (uint32_t *) oldptr; 476 } 477 478 /* 479 * Schedule an I/O. 480 * 481 * Transfer the current buffer to the helper kthread. 482 */ 483 484 static void 485 pmclog_schedule_io(struct pmc_owner *po) 486 { 487 KASSERT(po->po_curbuf != NULL, 488 ("[pmc,%d] schedule_io with null buffer po=%p", __LINE__, po)); 489 490 KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base, 491 ("[pmc,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, 492 po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base)); 493 KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence, 494 ("[pmc,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, 495 po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence)); 496 497 PMCDBG(LOG,SIO, 1, "po=%p", po); 498 499 mtx_assert(&po->po_mtx, MA_OWNED); 500 501 /* 502 * Add the current buffer to the tail of the buffer list and 503 * wakeup the helper. 504 */ 505 TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next); 506 po->po_curbuf = NULL; 507 wakeup_one(po); 508 } 509 510 /* 511 * Stop the helper kthread. 512 */ 513 514 static void 515 pmclog_stop_kthread(struct pmc_owner *po) 516 { 517 /* 518 * Unset flag, wakeup the helper thread, 519 * wait for it to exit 520 */ 521 522 mtx_assert(&pmc_kthread_mtx, MA_OWNED); 523 po->po_flags &= ~PMC_PO_OWNS_LOGFILE; 524 wakeup_one(po); 525 if (po->po_kthread) 526 msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0); 527 } 528 529 /* 530 * Public functions 531 */ 532 533 /* 534 * Configure a log file for pmc owner 'po'. 535 * 536 * Parameter 'logfd' is a file handle referencing an open file in the 537 * owner process. This file needs to have been opened for writing. 538 */ 539 540 int 541 pmclog_configure_log(struct pmc_owner *po, int logfd) 542 { 543 int error; 544 struct proc *p; 545 546 PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd); 547 548 p = po->po_owner; 549 550 /* return EBUSY if a log file was already present */ 551 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 552 return EBUSY; 553 554 KASSERT(po->po_kthread == NULL, 555 ("[pmc,%d] po=%p kthread (%p) already present", __LINE__, po, 556 po->po_kthread)); 557 KASSERT(po->po_file == NULL, 558 ("[pmc,%d] po=%p file (%p) already present", __LINE__, po, 559 po->po_file)); 560 561 /* get a reference to the file state */ 562 error = fget_write(curthread, logfd, &po->po_file); 563 if (error) 564 goto error; 565 566 /* mark process as owning a log file */ 567 po->po_flags |= PMC_PO_OWNS_LOGFILE; 568 error = kthread_create(pmclog_loop, po, &po->po_kthread, 569 RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid); 570 if (error) 571 goto error; 572 573 /* mark process as using HWPMCs */ 574 PROC_LOCK(p); 575 p->p_flag |= P_HWPMC; 576 PROC_UNLOCK(p); 577 578 /* create a log initialization entry */ 579 PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE, 580 sizeof(struct pmclog_initialize)); 581 PMCLOG_EMIT32(PMC_VERSION); 582 PMCLOG_EMIT32(md->pmd_cputype); 583 PMCLOG_DESPATCH(po); 584 585 return 0; 586 587 error: 588 /* shutdown the thread */ 589 mtx_lock(&pmc_kthread_mtx); 590 if (po->po_kthread) 591 pmclog_stop_kthread(po); 592 mtx_unlock(&pmc_kthread_mtx); 593 594 KASSERT(po->po_kthread == NULL, ("[pmc,%d] po=%p kthread not stopped", 595 __LINE__, po)); 596 597 if (po->po_file) 598 (void) fdrop(po->po_file, curthread); 599 po->po_file = NULL; /* clear file and error state */ 600 po->po_error = 0; 601 602 return error; 603 } 604 605 606 /* 607 * De-configure a log file. This will throw away any buffers queued 608 * for this owner process. 609 */ 610 611 int 612 pmclog_deconfigure_log(struct pmc_owner *po) 613 { 614 int error; 615 struct pmclog_buffer *lb; 616 617 PMCDBG(LOG,CFG,1, "de-config po=%p", po); 618 619 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 620 return EINVAL; 621 622 /* remove this owner from the global SS pmc owner list */ 623 if (po->po_sscount) 624 LIST_REMOVE(po, po_ssnext); 625 626 KASSERT(po->po_file != NULL, 627 ("[pmc,%d] po=%p no log file", __LINE__, po)); 628 629 /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ 630 mtx_lock(&pmc_kthread_mtx); 631 if (po->po_kthread) 632 pmclog_stop_kthread(po); 633 mtx_unlock(&pmc_kthread_mtx); 634 635 KASSERT(po->po_kthread == NULL, 636 ("[pmc,%d] po=%p kthread not stopped", __LINE__, po)); 637 638 /* return all queued log buffers to the global pool */ 639 while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { 640 TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); 641 PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); 642 mtx_lock_spin(&pmc_bufferlist_mtx); 643 TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); 644 mtx_unlock_spin(&pmc_bufferlist_mtx); 645 } 646 647 /* return the 'current' buffer to the global pool */ 648 if ((lb = po->po_curbuf) != NULL) { 649 PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); 650 mtx_lock_spin(&pmc_bufferlist_mtx); 651 TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); 652 mtx_unlock_spin(&pmc_bufferlist_mtx); 653 } 654 655 /* drop a reference to the fd */ 656 error = fdrop(po->po_file, curthread); 657 po->po_file = NULL; 658 po->po_error = 0; 659 660 return error; 661 } 662 663 /* 664 * Flush a process' log buffer. 665 */ 666 667 int 668 pmclog_flush(struct pmc_owner *po) 669 { 670 int error, has_pending_buffers; 671 672 PMCDBG(LOG,FLS,1, "po=%p", po); 673 674 /* 675 * If there is a pending error recorded by the logger thread, 676 * return that. 677 */ 678 if (po->po_error) 679 return po->po_error; 680 681 error = 0; 682 683 /* 684 * Check that we do have an active log file. 685 */ 686 mtx_lock(&pmc_kthread_mtx); 687 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { 688 error = EINVAL; 689 goto error; 690 } 691 692 /* 693 * Schedule the current buffer if any. 694 */ 695 mtx_lock_spin(&po->po_mtx); 696 if (po->po_curbuf) 697 pmclog_schedule_io(po); 698 has_pending_buffers = !TAILQ_EMPTY(&po->po_logbuffers); 699 mtx_unlock_spin(&po->po_mtx); 700 701 if (has_pending_buffers) { 702 po->po_flags |= PMC_PO_IN_FLUSH; /* ask for a wakeup */ 703 error = msleep(po->po_kthread, &pmc_kthread_mtx, PWAIT, 704 "pmcflush", 0); 705 } 706 707 error: 708 mtx_unlock(&pmc_kthread_mtx); 709 710 return error; 711 } 712 713 714 /* 715 * Send a 'close log' event to the log file. 716 */ 717 718 void 719 pmclog_process_closelog(struct pmc_owner *po) 720 { 721 PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog)); 722 PMCLOG_DESPATCH(po); 723 } 724 725 void 726 pmclog_process_dropnotify(struct pmc_owner *po) 727 { 728 PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify)); 729 PMCLOG_DESPATCH(po); 730 } 731 732 void 733 pmclog_process_mappingchange(struct pmc_owner *po, pid_t pid, int type, 734 uintfptr_t start, uintfptr_t end, char *path) 735 { 736 int pathlen, recordlen; 737 738 pathlen = strlen(path) + 1; /* #bytes for path name */ 739 recordlen = offsetof(struct pmclog_mappingchange, pl_pathname) + 740 pathlen; 741 742 PMCLOG_RESERVE(po,MAPPINGCHANGE,recordlen); 743 PMCLOG_EMIT32(type); 744 PMCLOG_EMITADDR(start); 745 PMCLOG_EMITADDR(end); 746 PMCLOG_EMIT32(pid); 747 PMCLOG_EMITSTRING(path,pathlen); 748 PMCLOG_DESPATCH(po); 749 } 750 751 752 void 753 pmclog_process_pcsample(struct pmc *pm, struct pmc_sample *ps) 754 { 755 struct pmc_owner *po; 756 757 PMCDBG(LOG,SAM,1,"pm=%p pid=%d pc=%p", pm, ps->ps_pid, 758 (void *) ps->ps_pc); 759 760 po = pm->pm_owner; 761 762 PMCLOG_RESERVE(po, PCSAMPLE, sizeof(struct pmclog_pcsample)); 763 PMCLOG_EMIT32(ps->ps_pid); 764 PMCLOG_EMITADDR(ps->ps_pc); 765 PMCLOG_EMIT32(pm->pm_id); 766 PMCLOG_EMIT32(ps->ps_usermode); 767 PMCLOG_DESPATCH(po); 768 } 769 770 void 771 pmclog_process_pmcallocate(struct pmc *pm) 772 { 773 struct pmc_owner *po; 774 775 po = pm->pm_owner; 776 777 PMCDBG(LOG,ALL,1, "pm=%p", pm); 778 779 PMCLOG_RESERVE(po, PMCALLOCATE, sizeof(struct pmclog_pmcallocate)); 780 PMCLOG_EMIT32(pm->pm_id); 781 PMCLOG_EMIT32(pm->pm_event); 782 PMCLOG_EMIT32(pm->pm_flags); 783 PMCLOG_DESPATCH(po); 784 } 785 786 void 787 pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path) 788 { 789 int pathlen, recordlen; 790 struct pmc_owner *po; 791 792 PMCDBG(LOG,ATT,1,"pm=%p pid=%d", pm, pid); 793 794 po = pm->pm_owner; 795 796 pathlen = strlen(path) + 1; /* #bytes for the string */ 797 recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen; 798 799 PMCLOG_RESERVE(po, PMCATTACH, recordlen); 800 PMCLOG_EMIT32(pm->pm_id); 801 PMCLOG_EMIT32(pid); 802 PMCLOG_EMITSTRING(path, pathlen); 803 PMCLOG_DESPATCH(po); 804 } 805 806 void 807 pmclog_process_pmcdetach(struct pmc *pm, pid_t pid) 808 { 809 struct pmc_owner *po; 810 811 PMCDBG(LOG,ATT,1,"!pm=%p pid=%d", pm, pid); 812 813 po = pm->pm_owner; 814 815 PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach)); 816 PMCLOG_EMIT32(pm->pm_id); 817 PMCLOG_EMIT32(pid); 818 PMCLOG_DESPATCH(po); 819 } 820 821 /* 822 * Log a context switch event to the log file. 823 */ 824 825 void 826 pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v) 827 { 828 struct pmc_owner *po; 829 830 KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW, 831 ("[pmclog,%d] log-process-csw called gratuitously", __LINE__)); 832 833 PMCDBG(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, 834 v); 835 836 po = pm->pm_owner; 837 838 PMCLOG_RESERVE(po, PROCCSW, sizeof(struct pmclog_proccsw)); 839 PMCLOG_EMIT32(pm->pm_id); 840 PMCLOG_EMIT64(v); 841 PMCLOG_EMIT32(pp->pp_proc->p_pid); 842 PMCLOG_DESPATCH(po); 843 } 844 845 void 846 pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid, 847 uintfptr_t startaddr, char *path) 848 { 849 int pathlen, recordlen; 850 851 PMCDBG(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path); 852 853 pathlen = strlen(path) + 1; /* #bytes for the path */ 854 recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen; 855 856 PMCLOG_RESERVE(po, PROCEXEC, recordlen); 857 PMCLOG_EMIT32(pid); 858 PMCLOG_EMITADDR(startaddr); 859 PMCLOG_EMIT32(pmid); 860 PMCLOG_EMITSTRING(path,pathlen); 861 PMCLOG_DESPATCH(po); 862 } 863 864 /* 865 * Log a process exit event (and accumulated pmc value) to the log file. 866 */ 867 868 void 869 pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp) 870 { 871 int ri; 872 struct pmc_owner *po; 873 874 ri = PMC_TO_ROWINDEX(pm); 875 PMCDBG(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, 876 pp->pp_pmcs[ri].pp_pmcval); 877 878 po = pm->pm_owner; 879 880 PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit)); 881 PMCLOG_EMIT32(pm->pm_id); 882 PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval); 883 PMCLOG_EMIT32(pp->pp_proc->p_pid); 884 PMCLOG_DESPATCH(po); 885 } 886 887 /* 888 * Log a fork event. 889 */ 890 891 void 892 pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid) 893 { 894 PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork)); 895 PMCLOG_EMIT32(oldpid); 896 PMCLOG_EMIT32(newpid); 897 PMCLOG_DESPATCH(po); 898 } 899 900 /* 901 * Log a process exit event of the form suitable for system-wide PMCs. 902 */ 903 904 void 905 pmclog_process_sysexit(struct pmc_owner *po, pid_t pid) 906 { 907 PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit)); 908 PMCLOG_EMIT32(pid); 909 PMCLOG_DESPATCH(po); 910 } 911 912 /* 913 * Write a user log entry. 914 */ 915 916 int 917 pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl) 918 { 919 int error; 920 921 PMCDBG(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata); 922 923 error = 0; 924 925 PMCLOG_RESERVE_WITH_ERROR(po, USERDATA, 926 sizeof(struct pmclog_userdata)); 927 PMCLOG_EMIT32(wl->pm_userdata); 928 PMCLOG_DESPATCH(po); 929 930 error: 931 return error; 932 } 933 934 /* 935 * Initialization. 936 * 937 * Create a pool of log buffers and initialize mutexes. 938 */ 939 940 void 941 pmclog_initialize() 942 { 943 int n; 944 struct pmclog_buffer *plb; 945 946 if (pmclog_buffer_size <= 0) { 947 (void) printf("hwpmc: tunable logbuffersize=%d must be greater " 948 "than zero.\n", pmclog_buffer_size); 949 pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; 950 } 951 952 if (pmc_nlogbuffers <= 0) { 953 (void) printf("hwpmc: tunable nlogbuffers=%d must be greater " 954 "than zero.\n", pmc_nlogbuffers); 955 pmc_nlogbuffers = PMC_NLOGBUFFERS; 956 } 957 958 /* create global pool of log buffers */ 959 for (n = 0; n < pmc_nlogbuffers; n++) { 960 MALLOC(plb, struct pmclog_buffer *, 1024 * pmclog_buffer_size, 961 M_PMC, M_ZERO|M_WAITOK); 962 PMCLOG_INIT_BUFFER_DESCRIPTOR(plb); 963 TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next); 964 } 965 mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc", MTX_SPIN); 966 mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc", MTX_DEF); 967 } 968 969 /* 970 * Shutdown logging. 971 * 972 * Destroy mutexes and release memory back the to free pool. 973 */ 974 975 void 976 pmclog_shutdown() 977 { 978 struct pmclog_buffer *plb; 979 980 mtx_destroy(&pmc_kthread_mtx); 981 mtx_destroy(&pmc_bufferlist_mtx); 982 983 while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) { 984 TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); 985 FREE(plb, M_PMC); 986 } 987 } 988