1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005-2007 Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * Copyright (c) 2018 Matthew Macy 7 * All rights reserved. 8 * 9 * Portions of this software were developed by A. Joseph Koshy under 10 * sponsorship from the FreeBSD Foundation and Google, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Logging code for hwpmc(4) 37 */ 38 39 #include <sys/param.h> 40 #include <sys/capsicum.h> 41 #include <sys/domainset.h> 42 #include <sys/file.h> 43 #include <sys/kernel.h> 44 #include <sys/kthread.h> 45 #include <sys/lock.h> 46 #include <sys/module.h> 47 #include <sys/mutex.h> 48 #include <sys/pmc.h> 49 #include <sys/pmckern.h> 50 #include <sys/pmclog.h> 51 #include <sys/proc.h> 52 #include <sys/sched.h> 53 #include <sys/signalvar.h> 54 #include <sys/smp.h> 55 #include <sys/syscallsubr.h> 56 #include <sys/sysctl.h> 57 #include <sys/systm.h> 58 #include <sys/uio.h> 59 #include <sys/unistd.h> 60 #include <sys/vnode.h> 61 62 #if defined(__i386__) || defined(__amd64__) 63 #include <machine/clock.h> 64 #endif 65 66 #define curdomain PCPU_GET(domain) 67 68 /* 69 * Sysctl tunables 70 */ 71 72 SYSCTL_DECL(_kern_hwpmc); 73 74 /* 75 * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers. 76 */ 77 78 static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; 79 SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN, 80 &pmclog_buffer_size, 0, "size of log buffers in kilobytes"); 81 82 /* 83 * kern.hwpmc.nbuffers_pcpu -- number of global log buffers 84 */ 85 86 static int pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; 87 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers_pcpu, CTLFLAG_RDTUN, 88 &pmc_nlogbuffers_pcpu, 0, "number of log buffers per cpu"); 89 90 /* 91 * Global log buffer list and associated spin lock. 92 */ 93 94 static struct mtx pmc_kthread_mtx; /* sleep lock */ 95 96 #define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \ 97 (D)->plb_fence = ((char *)(buf)) + 1024 * pmclog_buffer_size; \ 98 (D)->plb_base = (D)->plb_ptr = ((char *)(buf)); \ 99 (D)->plb_domain = domain; \ 100 } while (0) 101 102 #define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \ 103 (D)->plb_ptr = (D)->plb_base; \ 104 } while (0) 105 106 /* 107 * Log file record constructors. 108 */ 109 #define _PMCLOG_TO_HEADER(T, L) \ 110 ((PMCLOG_HEADER_MAGIC << 24) | (T << 16) | ((L) & 0xFFFF)) 111 112 /* reserve LEN bytes of space and initialize the entry header */ 113 #define _PMCLOG_RESERVE_SAFE(PO, TYPE, LEN, ACTION, TSC) do { \ 114 uint32_t *_le; \ 115 int _len = roundup((LEN), sizeof(uint32_t)); \ 116 struct pmclog_header *ph; \ 117 \ 118 if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ 119 ACTION; \ 120 } \ 121 ph = (struct pmclog_header *)_le; \ 122 ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \ 123 ph->pl_tsc = (TSC); \ 124 _le += sizeof(*ph) / 4 /* skip over timestamp */ 125 126 /* reserve LEN bytes of space and initialize the entry header */ 127 #define _PMCLOG_RESERVE(PO, TYPE, LEN, ACTION) do { \ 128 uint32_t *_le; \ 129 int _len = roundup((LEN), sizeof(uint32_t)); \ 130 uint64_t tsc; \ 131 struct pmclog_header *ph; \ 132 \ 133 tsc = pmc_rdtsc(); \ 134 spinlock_enter(); \ 135 if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ 136 spinlock_exit(); \ 137 ACTION; \ 138 } \ 139 ph = (struct pmclog_header *)_le; \ 140 ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \ 141 ph->pl_tsc = tsc; \ 142 143 #define PMCLOG_RESERVE_SAFE(P, T, L, TSC) \ 144 _PMCLOG_RESERVE_SAFE(P, T, L, return, TSC) 145 #define PMCLOG_RESERVE(P,T,L) \ 146 _PMCLOG_RESERVE(P, T, L, return) 147 #define PMCLOG_RESERVE_WITH_ERROR(P, T, L) \ 148 _PMCLOG_RESERVE(P, T, L, error = ENOMEM; goto error) 149 150 #define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0) 151 #define PMCLOG_EMIT64(V) do { \ 152 *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \ 153 *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \ 154 } while (0) 155 156 157 /* Emit a string. Caution: does NOT update _le, so needs to be last */ 158 #define PMCLOG_EMITSTRING(S,L) do { \ 159 bcopy((S), _le, (L)); \ 160 } while (0) 161 #define PMCLOG_EMITNULLSTRING(L) do { \ 162 bzero(_le, (L)); \ 163 } while (0) 164 165 #define PMCLOG_DESPATCH_SAFE(PO) \ 166 pmclog_release((PO)); \ 167 } while (0) 168 169 #define PMCLOG_DESPATCH_SCHED_LOCK(PO) \ 170 pmclog_release_flags((PO), 0); \ 171 } while (0) 172 173 #define PMCLOG_DESPATCH(PO) \ 174 pmclog_release((PO)); \ 175 spinlock_exit(); \ 176 } while (0) 177 178 #define PMCLOG_DESPATCH_SYNC(PO) \ 179 pmclog_schedule_io((PO), 1); \ 180 spinlock_exit(); \ 181 } while (0) 182 183 #define TSDELTA 4 184 /* 185 * Assertions about the log file format. 186 */ 187 CTASSERT(sizeof(struct pmclog_callchain) == 7*4 + TSDELTA + 188 PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t)); 189 CTASSERT(sizeof(struct pmclog_closelog) == 3*4 + TSDELTA); 190 CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4 + TSDELTA); 191 CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX + TSDELTA + 192 5*4 + sizeof(uintfptr_t)); 193 CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) == 194 5*4 + TSDELTA + sizeof(uintfptr_t)); 195 CTASSERT(sizeof(struct pmclog_map_out) == 5*4 + 2*sizeof(uintfptr_t) + TSDELTA); 196 CTASSERT(sizeof(struct pmclog_pmcallocate) == 9*4 + TSDELTA); 197 CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX + TSDELTA); 198 CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4 + TSDELTA); 199 CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4 + TSDELTA); 200 CTASSERT(sizeof(struct pmclog_proccsw) == 7*4 + 8 + TSDELTA); 201 CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX + 202 2*sizeof(uintptr_t) + TSDELTA); 203 CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 + TSDELTA + 204 2*sizeof(uintptr_t)); 205 CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8 + TSDELTA); 206 CTASSERT(sizeof(struct pmclog_procfork) == 5*4 + TSDELTA); 207 CTASSERT(sizeof(struct pmclog_sysexit) == 6*4); 208 CTASSERT(sizeof(struct pmclog_userdata) == 6*4); 209 210 /* 211 * Log buffer structure 212 */ 213 214 struct pmclog_buffer { 215 TAILQ_ENTRY(pmclog_buffer) plb_next; 216 char *plb_base; 217 char *plb_ptr; 218 char *plb_fence; 219 uint16_t plb_domain; 220 } __aligned(CACHE_LINE_SIZE); 221 222 /* 223 * Prototypes 224 */ 225 226 static int pmclog_get_buffer(struct pmc_owner *po); 227 static void pmclog_loop(void *arg); 228 static void pmclog_release(struct pmc_owner *po); 229 static uint32_t *pmclog_reserve(struct pmc_owner *po, int length); 230 static void pmclog_schedule_io(struct pmc_owner *po, int wakeup); 231 static void pmclog_schedule_all(struct pmc_owner *po); 232 static void pmclog_stop_kthread(struct pmc_owner *po); 233 234 /* 235 * Helper functions 236 */ 237 238 static inline void 239 pmc_plb_rele_unlocked(struct pmclog_buffer *plb) 240 { 241 TAILQ_INSERT_HEAD(&pmc_dom_hdrs[plb->plb_domain]->pdbh_head, plb, plb_next); 242 } 243 244 static inline void 245 pmc_plb_rele(struct pmclog_buffer *plb) 246 { 247 mtx_lock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx); 248 pmc_plb_rele_unlocked(plb); 249 mtx_unlock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx); 250 } 251 252 /* 253 * Get a log buffer 254 */ 255 static int 256 pmclog_get_buffer(struct pmc_owner *po) 257 { 258 struct pmclog_buffer *plb; 259 int domain; 260 261 KASSERT(po->po_curbuf[curcpu] == NULL, 262 ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po)); 263 264 domain = curdomain; 265 MPASS(pmc_dom_hdrs[domain]); 266 mtx_lock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx); 267 if ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL) 268 TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next); 269 mtx_unlock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx); 270 271 PMCDBG2(LOG,GTB,1, "po=%p plb=%p", po, plb); 272 273 #ifdef HWPMC_DEBUG 274 if (plb) 275 KASSERT(plb->plb_ptr == plb->plb_base && 276 plb->plb_base < plb->plb_fence, 277 ("[pmclog,%d] po=%p buffer invariants: ptr=%p " 278 "base=%p fence=%p", __LINE__, po, plb->plb_ptr, 279 plb->plb_base, plb->plb_fence)); 280 #endif 281 282 po->po_curbuf[curcpu] = plb; 283 284 /* update stats */ 285 counter_u64_add(pmc_stats.pm_buffer_requests, 1); 286 if (plb == NULL) 287 counter_u64_add(pmc_stats.pm_buffer_requests_failed, 1); 288 289 return (plb ? 0 : ENOMEM); 290 } 291 292 struct pmclog_proc_init_args { 293 struct proc *kthr; 294 struct pmc_owner *po; 295 bool exit; 296 bool acted; 297 }; 298 299 int 300 pmclog_proc_create(struct thread *td, void **handlep) 301 { 302 struct pmclog_proc_init_args *ia; 303 int error; 304 305 ia = malloc(sizeof(*ia), M_TEMP, M_WAITOK | M_ZERO); 306 error = kproc_create(pmclog_loop, ia, &ia->kthr, 307 RFHIGHPID, 0, "hwpmc: proc(%d)", td->td_proc->p_pid); 308 if (error == 0) 309 *handlep = ia; 310 return (error); 311 } 312 313 void 314 pmclog_proc_ignite(void *handle, struct pmc_owner *po) 315 { 316 struct pmclog_proc_init_args *ia; 317 318 ia = handle; 319 mtx_lock(&pmc_kthread_mtx); 320 MPASS(!ia->acted); 321 MPASS(ia->po == NULL); 322 MPASS(!ia->exit); 323 MPASS(ia->kthr != NULL); 324 if (po == NULL) { 325 ia->exit = true; 326 } else { 327 ia->po = po; 328 KASSERT(po->po_kthread == NULL, 329 ("[pmclog,%d] po=%p kthread (%p) already present", 330 __LINE__, po, po->po_kthread)); 331 po->po_kthread = ia->kthr; 332 } 333 wakeup(ia); 334 while (!ia->acted) 335 msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogw", 0); 336 mtx_unlock(&pmc_kthread_mtx); 337 free(ia, M_TEMP); 338 } 339 340 /* 341 * Log handler loop. 342 * 343 * This function is executed by each pmc owner's helper thread. 344 */ 345 static void 346 pmclog_loop(void *arg) 347 { 348 struct pmclog_proc_init_args *ia; 349 struct pmc_owner *po; 350 struct pmclog_buffer *lb; 351 struct proc *p; 352 struct ucred *ownercred; 353 struct ucred *mycred; 354 struct thread *td; 355 sigset_t unb; 356 struct uio auio; 357 struct iovec aiov; 358 size_t nbytes; 359 int error; 360 361 td = curthread; 362 363 SIGEMPTYSET(unb); 364 SIGADDSET(unb, SIGHUP); 365 (void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0); 366 367 ia = arg; 368 MPASS(ia->kthr == curproc); 369 MPASS(!ia->acted); 370 mtx_lock(&pmc_kthread_mtx); 371 while (ia->po == NULL && !ia->exit) 372 msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0); 373 if (ia->exit) { 374 ia->acted = true; 375 wakeup(ia); 376 mtx_unlock(&pmc_kthread_mtx); 377 kproc_exit(0); 378 } 379 MPASS(ia->po != NULL); 380 po = ia->po; 381 ia->acted = true; 382 wakeup(ia); 383 mtx_unlock(&pmc_kthread_mtx); 384 ia = NULL; 385 386 p = po->po_owner; 387 mycred = td->td_ucred; 388 389 PROC_LOCK(p); 390 ownercred = crhold(p->p_ucred); 391 PROC_UNLOCK(p); 392 393 PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread); 394 KASSERT(po->po_kthread == curthread->td_proc, 395 ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__, 396 po, po->po_kthread, curthread->td_proc)); 397 398 lb = NULL; 399 400 401 /* 402 * Loop waiting for I/O requests to be added to the owner 403 * struct's queue. The loop is exited when the log file 404 * is deconfigured. 405 */ 406 407 mtx_lock(&pmc_kthread_mtx); 408 409 for (;;) { 410 411 /* check if we've been asked to exit */ 412 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 413 break; 414 415 if (lb == NULL) { /* look for a fresh buffer to write */ 416 mtx_lock_spin(&po->po_mtx); 417 if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) { 418 mtx_unlock_spin(&po->po_mtx); 419 420 /* No more buffers and shutdown required. */ 421 if (po->po_flags & PMC_PO_SHUTDOWN) 422 break; 423 424 (void) msleep(po, &pmc_kthread_mtx, PWAIT, 425 "pmcloop", 250); 426 continue; 427 } 428 429 TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); 430 mtx_unlock_spin(&po->po_mtx); 431 } 432 433 mtx_unlock(&pmc_kthread_mtx); 434 435 /* process the request */ 436 PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po, 437 lb->plb_base, lb->plb_ptr); 438 /* change our thread's credentials before issuing the I/O */ 439 440 aiov.iov_base = lb->plb_base; 441 aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base; 442 443 auio.uio_iov = &aiov; 444 auio.uio_iovcnt = 1; 445 auio.uio_offset = -1; 446 auio.uio_resid = nbytes; 447 auio.uio_rw = UIO_WRITE; 448 auio.uio_segflg = UIO_SYSSPACE; 449 auio.uio_td = td; 450 451 /* switch thread credentials -- see kern_ktrace.c */ 452 td->td_ucred = ownercred; 453 error = fo_write(po->po_file, &auio, ownercred, 0, td); 454 td->td_ucred = mycred; 455 456 if (error) { 457 /* XXX some errors are recoverable */ 458 /* send a SIGIO to the owner and exit */ 459 PROC_LOCK(p); 460 kern_psignal(p, SIGIO); 461 PROC_UNLOCK(p); 462 463 mtx_lock(&pmc_kthread_mtx); 464 465 po->po_error = error; /* save for flush log */ 466 467 PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error); 468 469 break; 470 } 471 472 mtx_lock(&pmc_kthread_mtx); 473 474 /* put the used buffer back into the global pool */ 475 PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); 476 477 pmc_plb_rele(lb); 478 lb = NULL; 479 } 480 481 wakeup_one(po->po_kthread); 482 po->po_kthread = NULL; 483 484 mtx_unlock(&pmc_kthread_mtx); 485 486 /* return the current I/O buffer to the global pool */ 487 if (lb) { 488 PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); 489 490 pmc_plb_rele(lb); 491 } 492 493 /* 494 * Exit this thread, signalling the waiter 495 */ 496 497 crfree(ownercred); 498 499 kproc_exit(0); 500 } 501 502 /* 503 * Release and log entry and schedule an I/O if needed. 504 */ 505 506 static void 507 pmclog_release_flags(struct pmc_owner *po, int wakeup) 508 { 509 struct pmclog_buffer *plb; 510 511 plb = po->po_curbuf[curcpu]; 512 KASSERT(plb->plb_ptr >= plb->plb_base, 513 ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, 514 po, plb->plb_ptr, plb->plb_base)); 515 KASSERT(plb->plb_ptr <= plb->plb_fence, 516 ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, 517 po, plb->plb_ptr, plb->plb_fence)); 518 519 /* schedule an I/O if we've filled a buffer */ 520 if (plb->plb_ptr >= plb->plb_fence) 521 pmclog_schedule_io(po, wakeup); 522 523 PMCDBG1(LOG,REL,1, "po=%p", po); 524 } 525 526 static void 527 pmclog_release(struct pmc_owner *po) 528 { 529 530 pmclog_release_flags(po, 1); 531 } 532 533 534 /* 535 * Attempt to reserve 'length' bytes of space in an owner's log 536 * buffer. The function returns a pointer to 'length' bytes of space 537 * if there was enough space or returns NULL if no space was 538 * available. Non-null returns do so with the po mutex locked. The 539 * caller must invoke pmclog_release() on the pmc owner structure 540 * when done. 541 */ 542 543 static uint32_t * 544 pmclog_reserve(struct pmc_owner *po, int length) 545 { 546 uintptr_t newptr, oldptr __diagused; 547 struct pmclog_buffer *plb, **pplb; 548 549 PMCDBG2(LOG,ALL,1, "po=%p len=%d", po, length); 550 551 KASSERT(length % sizeof(uint32_t) == 0, 552 ("[pmclog,%d] length not a multiple of word size", __LINE__)); 553 554 /* No more data when shutdown in progress. */ 555 if (po->po_flags & PMC_PO_SHUTDOWN) 556 return (NULL); 557 558 pplb = &po->po_curbuf[curcpu]; 559 if (*pplb == NULL && pmclog_get_buffer(po) != 0) 560 goto fail; 561 562 KASSERT(*pplb != NULL, 563 ("[pmclog,%d] po=%p no current buffer", __LINE__, po)); 564 565 plb = *pplb; 566 KASSERT(plb->plb_ptr >= plb->plb_base && 567 plb->plb_ptr <= plb->plb_fence, 568 ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", 569 __LINE__, po, plb->plb_ptr, plb->plb_base, 570 plb->plb_fence)); 571 572 oldptr = (uintptr_t) plb->plb_ptr; 573 newptr = oldptr + length; 574 575 KASSERT(oldptr != (uintptr_t) NULL, 576 ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po)); 577 578 /* 579 * If we have space in the current buffer, return a pointer to 580 * available space with the PO structure locked. 581 */ 582 if (newptr <= (uintptr_t) plb->plb_fence) { 583 plb->plb_ptr = (char *) newptr; 584 goto done; 585 } 586 587 /* 588 * Otherwise, schedule the current buffer for output and get a 589 * fresh buffer. 590 */ 591 pmclog_schedule_io(po, 0); 592 593 if (pmclog_get_buffer(po) != 0) 594 goto fail; 595 596 plb = *pplb; 597 KASSERT(plb != NULL, 598 ("[pmclog,%d] po=%p no current buffer", __LINE__, po)); 599 600 KASSERT(plb->plb_ptr != NULL, 601 ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__)); 602 603 KASSERT(plb->plb_ptr == plb->plb_base && 604 plb->plb_ptr <= plb->plb_fence, 605 ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", 606 __LINE__, po, plb->plb_ptr, plb->plb_base, 607 plb->plb_fence)); 608 609 oldptr = (uintptr_t) plb->plb_ptr; 610 611 done: 612 return ((uint32_t *) oldptr); 613 fail: 614 return (NULL); 615 } 616 617 /* 618 * Schedule an I/O. 619 * 620 * Transfer the current buffer to the helper kthread. 621 */ 622 623 static void 624 pmclog_schedule_io(struct pmc_owner *po, int wakeup) 625 { 626 struct pmclog_buffer *plb; 627 628 plb = po->po_curbuf[curcpu]; 629 po->po_curbuf[curcpu] = NULL; 630 KASSERT(plb != NULL, 631 ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po)); 632 KASSERT(plb->plb_ptr >= plb->plb_base, 633 ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, 634 po, plb->plb_ptr, plb->plb_base)); 635 KASSERT(plb->plb_ptr <= plb->plb_fence, 636 ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, 637 po, plb->plb_ptr, plb->plb_fence)); 638 639 PMCDBG1(LOG,SIO, 1, "po=%p", po); 640 641 /* 642 * Add the current buffer to the tail of the buffer list and 643 * wakeup the helper. 644 */ 645 mtx_lock_spin(&po->po_mtx); 646 TAILQ_INSERT_TAIL(&po->po_logbuffers, plb, plb_next); 647 mtx_unlock_spin(&po->po_mtx); 648 if (wakeup) 649 wakeup_one(po); 650 } 651 652 /* 653 * Stop the helper kthread. 654 */ 655 656 static void 657 pmclog_stop_kthread(struct pmc_owner *po) 658 { 659 660 mtx_lock(&pmc_kthread_mtx); 661 po->po_flags &= ~PMC_PO_OWNS_LOGFILE; 662 if (po->po_kthread != NULL) { 663 PROC_LOCK(po->po_kthread); 664 kern_psignal(po->po_kthread, SIGHUP); 665 PROC_UNLOCK(po->po_kthread); 666 } 667 wakeup_one(po); 668 while (po->po_kthread) 669 msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0); 670 mtx_unlock(&pmc_kthread_mtx); 671 } 672 673 /* 674 * Public functions 675 */ 676 677 /* 678 * Configure a log file for pmc owner 'po'. 679 * 680 * Parameter 'logfd' is a file handle referencing an open file in the 681 * owner process. This file needs to have been opened for writing. 682 */ 683 684 int 685 pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd) 686 { 687 struct proc *p; 688 struct timespec ts; 689 int error; 690 691 sx_assert(&pmc_sx, SA_XLOCKED); 692 PMCDBG2(LOG,CFG,1, "config po=%p logfd=%d", po, logfd); 693 694 p = po->po_owner; 695 696 /* return EBUSY if a log file was already present */ 697 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 698 return (EBUSY); 699 700 KASSERT(po->po_file == NULL, 701 ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po, 702 po->po_file)); 703 704 /* get a reference to the file state */ 705 error = fget_write(curthread, logfd, &cap_write_rights, &po->po_file); 706 if (error) 707 goto error; 708 709 /* mark process as owning a log file */ 710 po->po_flags |= PMC_PO_OWNS_LOGFILE; 711 712 /* mark process as using HWPMCs */ 713 PROC_LOCK(p); 714 p->p_flag |= P_HWPMC; 715 PROC_UNLOCK(p); 716 nanotime(&ts); 717 /* create a log initialization entry */ 718 PMCLOG_RESERVE_WITH_ERROR(po, PMCLOG_TYPE_INITIALIZE, 719 sizeof(struct pmclog_initialize)); 720 PMCLOG_EMIT32(PMC_VERSION); 721 PMCLOG_EMIT32(md->pmd_cputype); 722 #if defined(__i386__) || defined(__amd64__) 723 PMCLOG_EMIT64(tsc_freq); 724 #else 725 /* other architectures will need to fill this in */ 726 PMCLOG_EMIT32(0); 727 PMCLOG_EMIT32(0); 728 #endif 729 memcpy(_le, &ts, sizeof(ts)); 730 _le += sizeof(ts)/4; 731 PMCLOG_EMITSTRING(pmc_cpuid, PMC_CPUID_LEN); 732 PMCLOG_DESPATCH_SYNC(po); 733 734 return (0); 735 736 error: 737 KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not " 738 "stopped", __LINE__, po)); 739 740 if (po->po_file) 741 (void) fdrop(po->po_file, curthread); 742 po->po_file = NULL; /* clear file and error state */ 743 po->po_error = 0; 744 po->po_flags &= ~PMC_PO_OWNS_LOGFILE; 745 746 return (error); 747 } 748 749 750 /* 751 * De-configure a log file. This will throw away any buffers queued 752 * for this owner process. 753 */ 754 755 int 756 pmclog_deconfigure_log(struct pmc_owner *po) 757 { 758 int error; 759 struct pmclog_buffer *lb; 760 struct pmc_binding pb; 761 762 PMCDBG1(LOG,CFG,1, "de-config po=%p", po); 763 764 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 765 return (EINVAL); 766 767 KASSERT(po->po_sscount == 0, 768 ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po)); 769 KASSERT(po->po_file != NULL, 770 ("[pmclog,%d] po=%p no log file", __LINE__, po)); 771 772 /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ 773 pmclog_stop_kthread(po); 774 775 KASSERT(po->po_kthread == NULL, 776 ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po)); 777 778 /* return all queued log buffers to the global pool */ 779 while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { 780 TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); 781 PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); 782 pmc_plb_rele(lb); 783 } 784 pmc_save_cpu_binding(&pb); 785 for (int i = 0; i < mp_ncpus; i++) { 786 pmc_select_cpu(i); 787 /* return the 'current' buffer to the global pool */ 788 if ((lb = po->po_curbuf[curcpu]) != NULL) { 789 PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); 790 pmc_plb_rele(lb); 791 } 792 } 793 pmc_restore_cpu_binding(&pb); 794 795 /* drop a reference to the fd */ 796 if (po->po_file != NULL) { 797 error = fdrop(po->po_file, curthread); 798 po->po_file = NULL; 799 } else 800 error = 0; 801 po->po_error = 0; 802 803 return (error); 804 } 805 806 /* 807 * Flush a process' log buffer. 808 */ 809 810 int 811 pmclog_flush(struct pmc_owner *po, int force) 812 { 813 int error; 814 815 PMCDBG1(LOG,FLS,1, "po=%p", po); 816 817 /* 818 * If there is a pending error recorded by the logger thread, 819 * return that. 820 */ 821 if (po->po_error) 822 return (po->po_error); 823 824 error = 0; 825 826 /* 827 * Check that we do have an active log file. 828 */ 829 mtx_lock(&pmc_kthread_mtx); 830 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { 831 error = EINVAL; 832 goto error; 833 } 834 835 pmclog_schedule_all(po); 836 error: 837 mtx_unlock(&pmc_kthread_mtx); 838 839 return (error); 840 } 841 842 static void 843 pmclog_schedule_one_cond(struct pmc_owner *po) 844 { 845 struct pmclog_buffer *plb; 846 int cpu; 847 848 spinlock_enter(); 849 cpu = curcpu; 850 /* tell hardclock not to run again */ 851 if (PMC_CPU_HAS_SAMPLES(cpu)) 852 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL); 853 854 plb = po->po_curbuf[cpu]; 855 if (plb && plb->plb_ptr != plb->plb_base) 856 pmclog_schedule_io(po, 1); 857 spinlock_exit(); 858 } 859 860 static void 861 pmclog_schedule_all(struct pmc_owner *po) 862 { 863 struct pmc_binding pb; 864 865 /* 866 * Schedule the current buffer if any and not empty. 867 */ 868 pmc_save_cpu_binding(&pb); 869 for (int i = 0; i < mp_ncpus; i++) { 870 pmc_select_cpu(i); 871 pmclog_schedule_one_cond(po); 872 } 873 pmc_restore_cpu_binding(&pb); 874 } 875 876 int 877 pmclog_close(struct pmc_owner *po) 878 { 879 880 PMCDBG1(LOG,CLO,1, "po=%p", po); 881 882 pmclog_process_closelog(po); 883 884 mtx_lock(&pmc_kthread_mtx); 885 /* 886 * Initiate shutdown: no new data queued, 887 * thread will close file on last block. 888 */ 889 po->po_flags |= PMC_PO_SHUTDOWN; 890 /* give time for all to see */ 891 DELAY(50); 892 893 /* 894 * Schedule the current buffer. 895 */ 896 pmclog_schedule_all(po); 897 wakeup_one(po); 898 899 mtx_unlock(&pmc_kthread_mtx); 900 901 return (0); 902 } 903 904 void 905 pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps) 906 { 907 int n, recordlen; 908 uint32_t flags; 909 struct pmc_owner *po; 910 911 PMCDBG3(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid, 912 ps->ps_nsamples); 913 914 recordlen = offsetof(struct pmclog_callchain, pl_pc) + 915 ps->ps_nsamples * sizeof(uintfptr_t); 916 po = pm->pm_owner; 917 flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags); 918 PMCLOG_RESERVE_SAFE(po, PMCLOG_TYPE_CALLCHAIN, recordlen, ps->ps_tsc); 919 PMCLOG_EMIT32(ps->ps_pid); 920 PMCLOG_EMIT32(ps->ps_tid); 921 PMCLOG_EMIT32(pm->pm_id); 922 PMCLOG_EMIT32(flags); 923 for (n = 0; n < ps->ps_nsamples; n++) 924 PMCLOG_EMITADDR(ps->ps_pc[n]); 925 PMCLOG_DESPATCH_SAFE(po); 926 } 927 928 void 929 pmclog_process_closelog(struct pmc_owner *po) 930 { 931 PMCLOG_RESERVE(po, PMCLOG_TYPE_CLOSELOG, 932 sizeof(struct pmclog_closelog)); 933 PMCLOG_DESPATCH_SYNC(po); 934 } 935 936 void 937 pmclog_process_dropnotify(struct pmc_owner *po) 938 { 939 PMCLOG_RESERVE(po, PMCLOG_TYPE_DROPNOTIFY, 940 sizeof(struct pmclog_dropnotify)); 941 PMCLOG_DESPATCH(po); 942 } 943 944 void 945 pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start, 946 const char *path) 947 { 948 int pathlen, recordlen; 949 950 KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__)); 951 952 pathlen = strlen(path) + 1; /* #bytes for path name */ 953 recordlen = offsetof(struct pmclog_map_in, pl_pathname) + 954 pathlen; 955 956 PMCLOG_RESERVE(po, PMCLOG_TYPE_MAP_IN, recordlen); 957 PMCLOG_EMIT32(pid); 958 PMCLOG_EMIT32(0); 959 PMCLOG_EMITADDR(start); 960 PMCLOG_EMITSTRING(path,pathlen); 961 PMCLOG_DESPATCH_SYNC(po); 962 } 963 964 void 965 pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start, 966 uintfptr_t end) 967 { 968 KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__)); 969 970 PMCLOG_RESERVE(po, PMCLOG_TYPE_MAP_OUT, sizeof(struct pmclog_map_out)); 971 PMCLOG_EMIT32(pid); 972 PMCLOG_EMIT32(0); 973 PMCLOG_EMITADDR(start); 974 PMCLOG_EMITADDR(end); 975 PMCLOG_DESPATCH(po); 976 } 977 978 void 979 pmclog_process_pmcallocate(struct pmc *pm) 980 { 981 struct pmc_owner *po; 982 struct pmc_soft *ps; 983 984 po = pm->pm_owner; 985 986 PMCDBG1(LOG,ALL,1, "pm=%p", pm); 987 988 if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) { 989 PMCLOG_RESERVE(po, PMCLOG_TYPE_PMCALLOCATEDYN, 990 sizeof(struct pmclog_pmcallocatedyn)); 991 PMCLOG_EMIT32(pm->pm_id); 992 PMCLOG_EMIT32(pm->pm_event); 993 PMCLOG_EMIT32(pm->pm_flags); 994 PMCLOG_EMIT32(0); 995 PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount); 996 ps = pmc_soft_ev_acquire(pm->pm_event); 997 if (ps != NULL) 998 PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX); 999 else 1000 PMCLOG_EMITNULLSTRING(PMC_NAME_MAX); 1001 pmc_soft_ev_release(ps); 1002 PMCLOG_DESPATCH_SYNC(po); 1003 } else { 1004 PMCLOG_RESERVE(po, PMCLOG_TYPE_PMCALLOCATE, 1005 sizeof(struct pmclog_pmcallocate)); 1006 PMCLOG_EMIT32(pm->pm_id); 1007 PMCLOG_EMIT32(pm->pm_event); 1008 PMCLOG_EMIT32(pm->pm_flags); 1009 PMCLOG_EMIT32(0); 1010 PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount); 1011 PMCLOG_DESPATCH_SYNC(po); 1012 } 1013 } 1014 1015 void 1016 pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path) 1017 { 1018 int pathlen, recordlen; 1019 struct pmc_owner *po; 1020 1021 PMCDBG2(LOG,ATT,1,"pm=%p pid=%d", pm, pid); 1022 1023 po = pm->pm_owner; 1024 1025 pathlen = strlen(path) + 1; /* #bytes for the string */ 1026 recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen; 1027 1028 PMCLOG_RESERVE(po, PMCLOG_TYPE_PMCATTACH, recordlen); 1029 PMCLOG_EMIT32(pm->pm_id); 1030 PMCLOG_EMIT32(pid); 1031 PMCLOG_EMITSTRING(path, pathlen); 1032 PMCLOG_DESPATCH_SYNC(po); 1033 } 1034 1035 void 1036 pmclog_process_pmcdetach(struct pmc *pm, pid_t pid) 1037 { 1038 struct pmc_owner *po; 1039 1040 PMCDBG2(LOG,ATT,1,"!pm=%p pid=%d", pm, pid); 1041 1042 po = pm->pm_owner; 1043 1044 PMCLOG_RESERVE(po, PMCLOG_TYPE_PMCDETACH, 1045 sizeof(struct pmclog_pmcdetach)); 1046 PMCLOG_EMIT32(pm->pm_id); 1047 PMCLOG_EMIT32(pid); 1048 PMCLOG_DESPATCH_SYNC(po); 1049 } 1050 1051 void 1052 pmclog_process_proccreate(struct pmc_owner *po, struct proc *p, int sync) 1053 { 1054 if (sync) { 1055 PMCLOG_RESERVE(po, PMCLOG_TYPE_PROC_CREATE, 1056 sizeof(struct pmclog_proccreate)); 1057 PMCLOG_EMIT32(p->p_pid); 1058 PMCLOG_EMIT32(p->p_flag); 1059 PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1); 1060 PMCLOG_DESPATCH_SYNC(po); 1061 } else { 1062 PMCLOG_RESERVE(po, PMCLOG_TYPE_PROC_CREATE, 1063 sizeof(struct pmclog_proccreate)); 1064 PMCLOG_EMIT32(p->p_pid); 1065 PMCLOG_EMIT32(p->p_flag); 1066 PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1); 1067 PMCLOG_DESPATCH(po); 1068 } 1069 } 1070 1071 /* 1072 * Log a context switch event to the log file. 1073 */ 1074 1075 void 1076 pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v, struct thread *td) 1077 { 1078 struct pmc_owner *po; 1079 1080 KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW, 1081 ("[pmclog,%d] log-process-csw called gratuitously", __LINE__)); 1082 1083 PMCDBG3(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, 1084 v); 1085 1086 po = pm->pm_owner; 1087 1088 PMCLOG_RESERVE_SAFE(po, PMCLOG_TYPE_PROCCSW, 1089 sizeof(struct pmclog_proccsw), pmc_rdtsc()); 1090 PMCLOG_EMIT64(v); 1091 PMCLOG_EMIT32(pm->pm_id); 1092 PMCLOG_EMIT32(pp->pp_proc->p_pid); 1093 PMCLOG_EMIT32(td->td_tid); 1094 PMCLOG_EMIT32(0); 1095 PMCLOG_DESPATCH_SCHED_LOCK(po); 1096 } 1097 1098 void 1099 pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid, 1100 uintptr_t baseaddr, uintptr_t dynaddr, char *path) 1101 { 1102 int pathlen, recordlen; 1103 1104 PMCDBG3(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path); 1105 1106 pathlen = strlen(path) + 1; /* #bytes for the path */ 1107 recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen; 1108 PMCLOG_RESERVE(po, PMCLOG_TYPE_PROCEXEC, recordlen); 1109 PMCLOG_EMIT32(pid); 1110 PMCLOG_EMIT32(pmid); 1111 PMCLOG_EMITADDR(baseaddr); 1112 PMCLOG_EMITADDR(dynaddr); 1113 PMCLOG_EMITSTRING(path,pathlen); 1114 PMCLOG_DESPATCH_SYNC(po); 1115 } 1116 1117 /* 1118 * Log a process exit event (and accumulated pmc value) to the log file. 1119 */ 1120 1121 void 1122 pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp) 1123 { 1124 int ri; 1125 struct pmc_owner *po; 1126 1127 ri = PMC_TO_ROWINDEX(pm); 1128 PMCDBG3(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, 1129 pp->pp_pmcs[ri].pp_pmcval); 1130 1131 po = pm->pm_owner; 1132 1133 PMCLOG_RESERVE(po, PMCLOG_TYPE_PROCEXIT, 1134 sizeof(struct pmclog_procexit)); 1135 PMCLOG_EMIT32(pm->pm_id); 1136 PMCLOG_EMIT32(pp->pp_proc->p_pid); 1137 PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval); 1138 PMCLOG_DESPATCH(po); 1139 } 1140 1141 /* 1142 * Log a fork event. 1143 */ 1144 1145 void 1146 pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid) 1147 { 1148 PMCLOG_RESERVE(po, PMCLOG_TYPE_PROCFORK, 1149 sizeof(struct pmclog_procfork)); 1150 PMCLOG_EMIT32(oldpid); 1151 PMCLOG_EMIT32(newpid); 1152 PMCLOG_DESPATCH(po); 1153 } 1154 1155 /* 1156 * Log a process exit event of the form suitable for system-wide PMCs. 1157 */ 1158 1159 void 1160 pmclog_process_sysexit(struct pmc_owner *po, pid_t pid) 1161 { 1162 PMCLOG_RESERVE(po, PMCLOG_TYPE_SYSEXIT, sizeof(struct pmclog_sysexit)); 1163 PMCLOG_EMIT32(pid); 1164 PMCLOG_DESPATCH(po); 1165 } 1166 1167 void 1168 pmclog_process_threadcreate(struct pmc_owner *po, struct thread *td, int sync) 1169 { 1170 struct proc *p; 1171 1172 p = td->td_proc; 1173 if (sync) { 1174 PMCLOG_RESERVE(po, PMCLOG_TYPE_THR_CREATE, 1175 sizeof(struct pmclog_threadcreate)); 1176 PMCLOG_EMIT32(td->td_tid); 1177 PMCLOG_EMIT32(p->p_pid); 1178 PMCLOG_EMIT32(p->p_flag); 1179 PMCLOG_EMIT32(0); 1180 PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1); 1181 PMCLOG_DESPATCH_SYNC(po); 1182 } else { 1183 PMCLOG_RESERVE(po, PMCLOG_TYPE_THR_CREATE, 1184 sizeof(struct pmclog_threadcreate)); 1185 PMCLOG_EMIT32(td->td_tid); 1186 PMCLOG_EMIT32(p->p_pid); 1187 PMCLOG_EMIT32(p->p_flag); 1188 PMCLOG_EMIT32(0); 1189 PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1); 1190 PMCLOG_DESPATCH(po); 1191 } 1192 } 1193 1194 void 1195 pmclog_process_threadexit(struct pmc_owner *po, struct thread *td) 1196 { 1197 1198 PMCLOG_RESERVE(po, PMCLOG_TYPE_THR_EXIT, 1199 sizeof(struct pmclog_threadexit)); 1200 PMCLOG_EMIT32(td->td_tid); 1201 PMCLOG_DESPATCH(po); 1202 } 1203 1204 /* 1205 * Write a user log entry. 1206 */ 1207 1208 int 1209 pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl) 1210 { 1211 int error; 1212 1213 PMCDBG2(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata); 1214 1215 error = 0; 1216 1217 PMCLOG_RESERVE_WITH_ERROR(po, PMCLOG_TYPE_USERDATA, 1218 sizeof(struct pmclog_userdata)); 1219 PMCLOG_EMIT32(wl->pm_userdata); 1220 PMCLOG_DESPATCH(po); 1221 1222 error: 1223 return (error); 1224 } 1225 1226 /* 1227 * Initialization. 1228 * 1229 * Create a pool of log buffers and initialize mutexes. 1230 */ 1231 1232 void 1233 pmclog_initialize(void) 1234 { 1235 struct pmclog_buffer *plb; 1236 int domain, ncpus, total; 1237 1238 if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) { 1239 (void) printf("hwpmc: tunable logbuffersize=%d must be " 1240 "greater than zero and less than or equal to 16MB.\n", 1241 pmclog_buffer_size); 1242 pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; 1243 } 1244 1245 if (pmc_nlogbuffers_pcpu <= 0) { 1246 (void) printf("hwpmc: tunable nlogbuffers=%d must be greater " 1247 "than zero.\n", pmc_nlogbuffers_pcpu); 1248 pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; 1249 } 1250 if (pmc_nlogbuffers_pcpu*pmclog_buffer_size > 32*1024) { 1251 (void) printf("hwpmc: memory allocated pcpu must be less than 32MB (is %dK).\n", 1252 pmc_nlogbuffers_pcpu*pmclog_buffer_size); 1253 pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; 1254 pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; 1255 } 1256 for (domain = 0; domain < vm_ndomains; domain++) { 1257 ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; 1258 total = ncpus * pmc_nlogbuffers_pcpu; 1259 1260 plb = malloc_domainset(sizeof(struct pmclog_buffer) * total, 1261 M_PMC, DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 1262 pmc_dom_hdrs[domain]->pdbh_plbs = plb; 1263 for (; total > 0; total--, plb++) { 1264 void *buf; 1265 1266 buf = malloc_domainset(1024 * pmclog_buffer_size, M_PMC, 1267 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 1268 PMCLOG_INIT_BUFFER_DESCRIPTOR(plb, buf, domain); 1269 pmc_plb_rele_unlocked(plb); 1270 } 1271 } 1272 mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF); 1273 } 1274 1275 /* 1276 * Shutdown logging. 1277 * 1278 * Destroy mutexes and release memory back the to free pool. 1279 */ 1280 1281 void 1282 pmclog_shutdown(void) 1283 { 1284 struct pmclog_buffer *plb; 1285 int domain; 1286 1287 mtx_destroy(&pmc_kthread_mtx); 1288 1289 for (domain = 0; domain < vm_ndomains; domain++) { 1290 while ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL) { 1291 TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next); 1292 free(plb->plb_base, M_PMC); 1293 } 1294 free(pmc_dom_hdrs[domain]->pdbh_plbs, M_PMC); 1295 } 1296 } 1297