1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * Copyright (c) 2018 Matthew Macy 7 * All rights reserved. 8 * 9 * Portions of this software were developed by A. Joseph Koshy under 10 * sponsorship from the FreeBSD Foundation and Google, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/domainset.h> 40 #include <sys/eventhandler.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/module.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/pmc.h> 51 #include <sys/pmckern.h> 52 #include <sys/pmclog.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/queue.h> 56 #include <sys/resourcevar.h> 57 #include <sys/rwlock.h> 58 #include <sys/sched.h> 59 #include <sys/signalvar.h> 60 #include <sys/smp.h> 61 #include <sys/sx.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysent.h> 64 #include <sys/syslog.h> 65 #include <sys/taskqueue.h> 66 #include <sys/vnode.h> 67 68 #include <sys/linker.h> /* needs to be after <sys/malloc.h> */ 69 70 #include <machine/atomic.h> 71 #include <machine/md_var.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_extern.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_object.h> 78 79 #include "hwpmc_soft.h" 80 81 #define PMC_EPOCH_ENTER() \ 82 struct epoch_tracker pmc_et; \ 83 epoch_enter_preempt(global_epoch_preempt, &pmc_et) 84 85 #define PMC_EPOCH_EXIT() \ 86 epoch_exit_preempt(global_epoch_preempt, &pmc_et) 87 88 /* 89 * Types 90 */ 91 92 enum pmc_flags { 93 PMC_FLAG_NONE = 0x00, /* do nothing */ 94 PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */ 95 PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */ 96 PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */ 97 }; 98 99 /* 100 * The offset in sysent where the syscall is allocated. 101 */ 102 static int pmc_syscall_num = NO_SYSCALL; 103 104 struct pmc_cpu **pmc_pcpu; /* per-cpu state */ 105 pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */ 106 107 #define PMC_PCPU_SAVED(C, R) pmc_pcpu_saved[(R) + md->pmd_npmc * (C)] 108 109 struct mtx_pool *pmc_mtxpool; 110 static int *pmc_pmcdisp; /* PMC row dispositions */ 111 112 #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0) 113 #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0) 114 #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0) 115 116 #define PMC_MARK_ROW_FREE(R) do { \ 117 pmc_pmcdisp[(R)] = 0; \ 118 } while (0) 119 120 #define PMC_MARK_ROW_STANDALONE(R) do { \ 121 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 122 __LINE__)); \ 123 atomic_add_int(&pmc_pmcdisp[(R)], -1); \ 124 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \ 125 ("[pmc,%d] row disposition error", __LINE__)); \ 126 } while (0) 127 128 #define PMC_UNMARK_ROW_STANDALONE(R) do { \ 129 atomic_add_int(&pmc_pmcdisp[(R)], 1); \ 130 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 131 __LINE__)); \ 132 } while (0) 133 134 #define PMC_MARK_ROW_THREAD(R) do { \ 135 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ 136 __LINE__)); \ 137 atomic_add_int(&pmc_pmcdisp[(R)], 1); \ 138 } while (0) 139 140 #define PMC_UNMARK_ROW_THREAD(R) do { \ 141 atomic_add_int(&pmc_pmcdisp[(R)], -1); \ 142 KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ 143 __LINE__)); \ 144 } while (0) 145 146 /* various event handlers */ 147 static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag, 148 pmc_kld_unload_tag; 149 150 /* Module statistics */ 151 struct pmc_driverstats pmc_stats; 152 153 /* Machine/processor dependent operations */ 154 static struct pmc_mdep *md; 155 156 /* 157 * Hash tables mapping owner processes and target threads to PMCs. 158 */ 159 struct mtx pmc_processhash_mtx; /* spin mutex */ 160 static u_long pmc_processhashmask; 161 static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash; 162 163 /* 164 * Hash table of PMC owner descriptors. This table is protected by 165 * the shared PMC "sx" lock. 166 */ 167 static u_long pmc_ownerhashmask; 168 static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash; 169 170 /* 171 * List of PMC owners with system-wide sampling PMCs. 172 */ 173 static CK_LIST_HEAD(, pmc_owner) pmc_ss_owners; 174 175 /* 176 * List of free thread entries. This is protected by the spin 177 * mutex. 178 */ 179 static struct mtx pmc_threadfreelist_mtx; /* spin mutex */ 180 static LIST_HEAD(, pmc_thread) pmc_threadfreelist; 181 static int pmc_threadfreelist_entries = 0; 182 #define THREADENTRY_SIZE (sizeof(struct pmc_thread) + \ 183 (md->pmd_npmc * sizeof(struct pmc_threadpmcstate))) 184 185 /* 186 * Task to free thread descriptors 187 */ 188 static struct task free_task; 189 190 /* 191 * A map of row indices to classdep structures. 192 */ 193 static struct pmc_classdep **pmc_rowindex_to_classdep; 194 195 /* 196 * Prototypes 197 */ 198 199 #ifdef HWPMC_DEBUG 200 static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS); 201 static int pmc_debugflags_parse(char *newstr, char *fence); 202 #endif 203 204 static int load(struct module *module, int cmd, void *arg); 205 static int pmc_add_sample(ring_type_t ring, struct pmc *pm, 206 struct trapframe *tf); 207 static void pmc_add_thread_descriptors_from_proc(struct proc *p, 208 struct pmc_process *pp); 209 static int pmc_attach_process(struct proc *p, struct pmc *pm); 210 static struct pmc *pmc_allocate_pmc_descriptor(void); 211 static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p); 212 static int pmc_attach_one_process(struct proc *p, struct pmc *pm); 213 static bool pmc_can_allocate_row(int ri, enum pmc_mode mode); 214 static bool pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, 215 int cpu); 216 static int pmc_can_attach(struct pmc *pm, struct proc *p); 217 static void pmc_capture_user_callchain(int cpu, int soft, 218 struct trapframe *tf); 219 static void pmc_cleanup(void); 220 static int pmc_detach_process(struct proc *p, struct pmc *pm); 221 static int pmc_detach_one_process(struct proc *p, struct pmc *pm, 222 int flags); 223 static void pmc_destroy_owner_descriptor(struct pmc_owner *po); 224 static void pmc_destroy_pmc_descriptor(struct pmc *pm); 225 static void pmc_destroy_process_descriptor(struct pmc_process *pp); 226 static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p); 227 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm); 228 static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, 229 pmc_id_t pmc); 230 static struct pmc_process *pmc_find_process_descriptor(struct proc *p, 231 uint32_t mode); 232 static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp, 233 struct thread *td, uint32_t mode); 234 static void pmc_force_context_switch(void); 235 static void pmc_link_target_process(struct pmc *pm, 236 struct pmc_process *pp); 237 static void pmc_log_all_process_mappings(struct pmc_owner *po); 238 static void pmc_log_kernel_mappings(struct pmc *pm); 239 static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p); 240 static void pmc_maybe_remove_owner(struct pmc_owner *po); 241 static void pmc_post_callchain_callback(void); 242 static void pmc_process_allproc(struct pmc *pm); 243 static void pmc_process_csw_in(struct thread *td); 244 static void pmc_process_csw_out(struct thread *td); 245 static void pmc_process_exec(struct thread *td, 246 struct pmckern_procexec *pk); 247 static void pmc_process_exit(void *arg, struct proc *p); 248 static void pmc_process_fork(void *arg, struct proc *p1, 249 struct proc *p2, int n); 250 static void pmc_process_proccreate(struct proc *p); 251 static void pmc_process_samples(int cpu, ring_type_t soft); 252 static void pmc_process_threadcreate(struct thread *td); 253 static void pmc_process_threadexit(struct thread *td); 254 static void pmc_process_thread_add(struct thread *td); 255 static void pmc_process_thread_delete(struct thread *td); 256 static void pmc_process_thread_userret(struct thread *td); 257 static void pmc_release_pmc_descriptor(struct pmc *pmc); 258 static void pmc_remove_owner(struct pmc_owner *po); 259 static void pmc_remove_process_descriptor(struct pmc_process *pp); 260 static int pmc_start(struct pmc *pm); 261 static int pmc_stop(struct pmc *pm); 262 static int pmc_syscall_handler(struct thread *td, void *syscall_args); 263 static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void); 264 static void pmc_thread_descriptor_pool_drain(void); 265 static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt); 266 static void pmc_unlink_target_process(struct pmc *pmc, 267 struct pmc_process *pp); 268 269 static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp); 270 static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp); 271 static struct pmc_mdep *pmc_generic_cpu_initialize(void); 272 static void pmc_generic_cpu_finalize(struct pmc_mdep *md); 273 274 /* 275 * Kernel tunables and sysctl(8) interface. 276 */ 277 278 SYSCTL_DECL(_kern_hwpmc); 279 SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 280 "HWPMC stats"); 281 282 /* Stats. */ 283 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW, 284 &pmc_stats.pm_intr_ignored, 285 "# of interrupts ignored"); 286 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW, 287 &pmc_stats.pm_intr_processed, 288 "# of interrupts processed"); 289 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW, 290 &pmc_stats.pm_intr_bufferfull, 291 "# of interrupts where buffer was full"); 292 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW, 293 &pmc_stats.pm_syscalls, 294 "# of syscalls"); 295 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW, 296 &pmc_stats.pm_syscall_errors, 297 "# of syscall_errors"); 298 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW, 299 &pmc_stats.pm_buffer_requests, 300 "# of buffer requests"); 301 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, 302 CTLFLAG_RW, &pmc_stats.pm_buffer_requests_failed, 303 "# of buffer requests which failed"); 304 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW, 305 &pmc_stats.pm_log_sweeps, 306 "# of times samples were processed"); 307 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW, 308 &pmc_stats.pm_merges, 309 "# of times kernel stack was found for user trace"); 310 SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW, 311 &pmc_stats.pm_overwrites, 312 "# of times a sample was overwritten before being logged"); 313 314 static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; 315 SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN, 316 &pmc_callchaindepth, 0, 317 "depth of call chain records"); 318 319 char pmc_cpuid[PMC_CPUID_LEN]; 320 SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD, 321 pmc_cpuid, 0, 322 "cpu version string"); 323 324 #ifdef HWPMC_DEBUG 325 struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS; 326 char pmc_debugstr[PMC_DEBUG_STRSIZE]; 327 TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, 328 sizeof(pmc_debugstr)); 329 SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags, 330 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, 331 0, 0, pmc_debugflags_sysctl_handler, "A", 332 "debug flags"); 333 #endif 334 335 /* 336 * kern.hwpmc.hashsize -- determines the number of rows in the 337 * of the hash table used to look up threads 338 */ 339 static int pmc_hashsize = PMC_HASH_SIZE; 340 SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN, 341 &pmc_hashsize, 0, 342 "rows in hash tables"); 343 344 /* 345 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU 346 */ 347 static int pmc_nsamples = PMC_NSAMPLES; 348 SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN, 349 &pmc_nsamples, 0, 350 "number of PC samples per CPU"); 351 352 static uint64_t pmc_sample_mask = PMC_NSAMPLES - 1; 353 354 /* 355 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool. 356 */ 357 static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; 358 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN, 359 &pmc_mtxpool_size, 0, 360 "size of spin mutex pool"); 361 362 /* 363 * kern.hwpmc.threadfreelist_entries -- number of free entries 364 */ 365 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD, 366 &pmc_threadfreelist_entries, 0, 367 "number of available thread entries"); 368 369 /* 370 * kern.hwpmc.threadfreelist_max -- maximum number of free entries 371 */ 372 static int pmc_threadfreelist_max = PMC_THREADLIST_MAX; 373 SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW, 374 &pmc_threadfreelist_max, 0, 375 "maximum number of available thread entries before freeing some"); 376 377 /* 378 * kern.hwpmc.mincount -- minimum sample count 379 */ 380 static u_int pmc_mincount = 1000; 381 SYSCTL_INT(_kern_hwpmc, OID_AUTO, mincount, CTLFLAG_RWTUN, 382 &pmc_mincount, 0, 383 "minimum count for sampling counters"); 384 385 /* 386 * security.bsd.unprivileged_syspmcs -- allow non-root processes to 387 * allocate system-wide PMCs. 388 * 389 * Allowing unprivileged processes to allocate system PMCs is convenient 390 * if system-wide measurements need to be taken concurrently with other 391 * per-process measurements. This feature is turned off by default. 392 */ 393 static int pmc_unprivileged_syspmcs = 0; 394 SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN, 395 &pmc_unprivileged_syspmcs, 0, 396 "allow unprivileged process to allocate system PMCs"); 397 398 /* 399 * Hash function. Discard the lower 2 bits of the pointer since 400 * these are always zero for our uses. The hash multiplier is 401 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)). 402 */ 403 #if LONG_BIT == 64 404 #define _PMC_HM 11400714819323198486u 405 #elif LONG_BIT == 32 406 #define _PMC_HM 2654435769u 407 #else 408 #error Must know the size of 'long' to compile 409 #endif 410 411 #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M)) 412 413 /* 414 * Syscall structures 415 */ 416 417 /* The `sysent' for the new syscall */ 418 static struct sysent pmc_sysent = { 419 .sy_narg = 2, 420 .sy_call = pmc_syscall_handler, 421 }; 422 423 static struct syscall_module_data pmc_syscall_mod = { 424 .chainevh = load, 425 .chainarg = NULL, 426 .offset = &pmc_syscall_num, 427 .new_sysent = &pmc_sysent, 428 .old_sysent = { .sy_narg = 0, .sy_call = NULL }, 429 .flags = SY_THR_STATIC_KLD, 430 }; 431 432 static moduledata_t pmc_mod = { 433 .name = PMC_MODULE_NAME, 434 .evhand = syscall_module_handler, 435 .priv = &pmc_syscall_mod, 436 }; 437 438 #ifdef EARLY_AP_STARTUP 439 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY); 440 #else 441 DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY); 442 #endif 443 MODULE_VERSION(pmc, PMC_VERSION); 444 445 #ifdef HWPMC_DEBUG 446 enum pmc_dbgparse_state { 447 PMCDS_WS, /* in whitespace */ 448 PMCDS_MAJOR, /* seen a major keyword */ 449 PMCDS_MINOR 450 }; 451 452 static int 453 pmc_debugflags_parse(char *newstr, char *fence) 454 { 455 struct pmc_debugflags *tmpflags; 456 size_t kwlen; 457 char c, *p, *q; 458 int error, *newbits, tmp; 459 int found; 460 461 tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK | M_ZERO); 462 463 error = 0; 464 for (p = newstr; p < fence && (c = *p); p++) { 465 /* skip white space */ 466 if (c == ' ' || c == '\t') 467 continue; 468 469 /* look for a keyword followed by "=" */ 470 for (q = p; p < fence && (c = *p) && c != '='; p++) 471 ; 472 if (c != '=') { 473 error = EINVAL; 474 goto done; 475 } 476 477 kwlen = p - q; 478 newbits = NULL; 479 480 /* lookup flag group name */ 481 #define DBG_SET_FLAG_MAJ(S,F) \ 482 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ 483 newbits = &tmpflags->pdb_ ## F; 484 485 DBG_SET_FLAG_MAJ("cpu", CPU); 486 DBG_SET_FLAG_MAJ("csw", CSW); 487 DBG_SET_FLAG_MAJ("logging", LOG); 488 DBG_SET_FLAG_MAJ("module", MOD); 489 DBG_SET_FLAG_MAJ("md", MDP); 490 DBG_SET_FLAG_MAJ("owner", OWN); 491 DBG_SET_FLAG_MAJ("pmc", PMC); 492 DBG_SET_FLAG_MAJ("process", PRC); 493 DBG_SET_FLAG_MAJ("sampling", SAM); 494 #undef DBG_SET_FLAG_MAJ 495 496 if (newbits == NULL) { 497 error = EINVAL; 498 goto done; 499 } 500 501 p++; /* skip the '=' */ 502 503 /* Now parse the individual flags */ 504 tmp = 0; 505 newflag: 506 for (q = p; p < fence && (c = *p); p++) 507 if (c == ' ' || c == '\t' || c == ',') 508 break; 509 510 /* p == fence or c == ws or c == "," or c == 0 */ 511 512 if ((kwlen = p - q) == 0) { 513 *newbits = tmp; 514 continue; 515 } 516 517 found = 0; 518 #define DBG_SET_FLAG_MIN(S,F) \ 519 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ 520 tmp |= found = (1 << PMC_DEBUG_MIN_ ## F) 521 522 /* a '*' denotes all possible flags in the group */ 523 if (kwlen == 1 && *q == '*') 524 tmp = found = ~0; 525 /* look for individual flag names */ 526 DBG_SET_FLAG_MIN("allocaterow", ALR); 527 DBG_SET_FLAG_MIN("allocate", ALL); 528 DBG_SET_FLAG_MIN("attach", ATT); 529 DBG_SET_FLAG_MIN("bind", BND); 530 DBG_SET_FLAG_MIN("config", CFG); 531 DBG_SET_FLAG_MIN("exec", EXC); 532 DBG_SET_FLAG_MIN("exit", EXT); 533 DBG_SET_FLAG_MIN("find", FND); 534 DBG_SET_FLAG_MIN("flush", FLS); 535 DBG_SET_FLAG_MIN("fork", FRK); 536 DBG_SET_FLAG_MIN("getbuf", GTB); 537 DBG_SET_FLAG_MIN("hook", PMH); 538 DBG_SET_FLAG_MIN("init", INI); 539 DBG_SET_FLAG_MIN("intr", INT); 540 DBG_SET_FLAG_MIN("linktarget", TLK); 541 DBG_SET_FLAG_MIN("mayberemove", OMR); 542 DBG_SET_FLAG_MIN("ops", OPS); 543 DBG_SET_FLAG_MIN("read", REA); 544 DBG_SET_FLAG_MIN("register", REG); 545 DBG_SET_FLAG_MIN("release", REL); 546 DBG_SET_FLAG_MIN("remove", ORM); 547 DBG_SET_FLAG_MIN("sample", SAM); 548 DBG_SET_FLAG_MIN("scheduleio", SIO); 549 DBG_SET_FLAG_MIN("select", SEL); 550 DBG_SET_FLAG_MIN("signal", SIG); 551 DBG_SET_FLAG_MIN("swi", SWI); 552 DBG_SET_FLAG_MIN("swo", SWO); 553 DBG_SET_FLAG_MIN("start", STA); 554 DBG_SET_FLAG_MIN("stop", STO); 555 DBG_SET_FLAG_MIN("syscall", PMS); 556 DBG_SET_FLAG_MIN("unlinktarget", TUL); 557 DBG_SET_FLAG_MIN("write", WRI); 558 #undef DBG_SET_FLAG_MIN 559 if (found == 0) { 560 /* unrecognized flag name */ 561 error = EINVAL; 562 goto done; 563 } 564 565 if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */ 566 *newbits = tmp; 567 continue; 568 } 569 570 p++; 571 goto newflag; 572 } 573 574 /* save the new flag set */ 575 bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags)); 576 done: 577 free(tmpflags, M_PMC); 578 return (error); 579 } 580 581 static int 582 pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS) 583 { 584 char *fence, *newstr; 585 int error; 586 u_int n; 587 588 n = sizeof(pmc_debugstr); 589 newstr = malloc(n, M_PMC, M_WAITOK | M_ZERO); 590 strlcpy(newstr, pmc_debugstr, n); 591 592 error = sysctl_handle_string(oidp, newstr, n, req); 593 594 /* if there is a new string, parse and copy it */ 595 if (error == 0 && req->newptr != NULL) { 596 fence = newstr + (n < req->newlen ? n : req->newlen + 1); 597 error = pmc_debugflags_parse(newstr, fence); 598 if (error == 0) 599 strlcpy(pmc_debugstr, newstr, sizeof(pmc_debugstr)); 600 } 601 free(newstr, M_PMC); 602 603 return (error); 604 } 605 #endif 606 607 /* 608 * Map a row index to a classdep structure and return the adjusted row 609 * index for the PMC class index. 610 */ 611 static struct pmc_classdep * 612 pmc_ri_to_classdep(struct pmc_mdep *md __unused, int ri, int *adjri) 613 { 614 struct pmc_classdep *pcd; 615 616 KASSERT(ri >= 0 && ri < md->pmd_npmc, 617 ("[pmc,%d] illegal row-index %d", __LINE__, ri)); 618 619 pcd = pmc_rowindex_to_classdep[ri]; 620 KASSERT(pcd != NULL, 621 ("[pmc,%d] ri %d null pcd", __LINE__, ri)); 622 623 *adjri = ri - pcd->pcd_ri; 624 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num, 625 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri)); 626 627 return (pcd); 628 } 629 630 /* 631 * Concurrency Control 632 * 633 * The driver manages the following data structures: 634 * 635 * - target process descriptors, one per target process 636 * - owner process descriptors (and attached lists), one per owner process 637 * - lookup hash tables for owner and target processes 638 * - PMC descriptors (and attached lists) 639 * - per-cpu hardware state 640 * - the 'hook' variable through which the kernel calls into 641 * this module 642 * - the machine hardware state (managed by the MD layer) 643 * 644 * These data structures are accessed from: 645 * 646 * - thread context-switch code 647 * - interrupt handlers (possibly on multiple cpus) 648 * - kernel threads on multiple cpus running on behalf of user 649 * processes doing system calls 650 * - this driver's private kernel threads 651 * 652 * = Locks and Locking strategy = 653 * 654 * The driver uses four locking strategies for its operation: 655 * 656 * - The global SX lock "pmc_sx" is used to protect internal 657 * data structures. 658 * 659 * Calls into the module by syscall() start with this lock being 660 * held in exclusive mode. Depending on the requested operation, 661 * the lock may be downgraded to 'shared' mode to allow more 662 * concurrent readers into the module. Calls into the module from 663 * other parts of the kernel acquire the lock in shared mode. 664 * 665 * This SX lock is held in exclusive mode for any operations that 666 * modify the linkages between the driver's internal data structures. 667 * 668 * The 'pmc_hook' function pointer is also protected by this lock. 669 * It is only examined with the sx lock held in exclusive mode. The 670 * kernel module is allowed to be unloaded only with the sx lock held 671 * in exclusive mode. In normal syscall handling, after acquiring the 672 * pmc_sx lock we first check that 'pmc_hook' is non-null before 673 * proceeding. This prevents races between the thread unloading the module 674 * and other threads seeking to use the module. 675 * 676 * - Lookups of target process structures and owner process structures 677 * cannot use the global "pmc_sx" SX lock because these lookups need 678 * to happen during context switches and in other critical sections 679 * where sleeping is not allowed. We protect these lookup tables 680 * with their own private spin-mutexes, "pmc_processhash_mtx" and 681 * "pmc_ownerhash_mtx". 682 * 683 * - Interrupt handlers work in a lock free manner. At interrupt 684 * time, handlers look at the PMC pointer (phw->phw_pmc) configured 685 * when the PMC was started. If this pointer is NULL, the interrupt 686 * is ignored after updating driver statistics. We ensure that this 687 * pointer is set (using an atomic operation if necessary) before the 688 * PMC hardware is started. Conversely, this pointer is unset atomically 689 * only after the PMC hardware is stopped. 690 * 691 * We ensure that everything needed for the operation of an 692 * interrupt handler is available without it needing to acquire any 693 * locks. We also ensure that a PMC's software state is destroyed only 694 * after the PMC is taken off hardware (on all CPUs). 695 * 696 * - Context-switch handling with process-private PMCs needs more 697 * care. 698 * 699 * A given process may be the target of multiple PMCs. For example, 700 * PMCATTACH and PMCDETACH may be requested by a process on one CPU 701 * while the target process is running on another. A PMC could also 702 * be getting released because its owner is exiting. We tackle 703 * these situations in the following manner: 704 * 705 * - each target process structure 'pmc_process' has an array 706 * of 'struct pmc *' pointers, one for each hardware PMC. 707 * 708 * - At context switch IN time, each "target" PMC in RUNNING state 709 * gets started on hardware and a pointer to each PMC is copied into 710 * the per-cpu phw array. The 'runcount' for the PMC is 711 * incremented. 712 * 713 * - At context switch OUT time, all process-virtual PMCs are stopped 714 * on hardware. The saved value is added to the PMCs value field 715 * only if the PMC is in a non-deleted state (the PMCs state could 716 * have changed during the current time slice). 717 * 718 * Note that since in-between a switch IN on a processor and a switch 719 * OUT, the PMC could have been released on another CPU. Therefore 720 * context switch OUT always looks at the hardware state to turn 721 * OFF PMCs and will update a PMC's saved value only if reachable 722 * from the target process record. 723 * 724 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could 725 * be attached to many processes at the time of the call and could 726 * be active on multiple CPUs). 727 * 728 * We prevent further scheduling of the PMC by marking it as in 729 * state 'DELETED'. If the runcount of the PMC is non-zero then 730 * this PMC is currently running on a CPU somewhere. The thread 731 * doing the PMCRELEASE operation waits by repeatedly doing a 732 * pause() till the runcount comes to zero. 733 * 734 * The contents of a PMC descriptor (struct pmc) are protected using 735 * a spin-mutex. In order to save space, we use a mutex pool. 736 * 737 * In terms of lock types used by witness(4), we use: 738 * - Type "pmc-sx", used by the global SX lock. 739 * - Type "pmc-sleep", for sleep mutexes used by logger threads. 740 * - Type "pmc-per-proc", for protecting PMC owner descriptors. 741 * - Type "pmc-leaf", used for all other spin mutexes. 742 */ 743 744 /* 745 * Save the CPU binding of the current kthread. 746 */ 747 void 748 pmc_save_cpu_binding(struct pmc_binding *pb) 749 { 750 PMCDBG0(CPU,BND,2, "save-cpu"); 751 thread_lock(curthread); 752 pb->pb_bound = sched_is_bound(curthread); 753 pb->pb_cpu = curthread->td_oncpu; 754 pb->pb_priority = curthread->td_priority; 755 thread_unlock(curthread); 756 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu); 757 } 758 759 /* 760 * Restore the CPU binding of the current thread. 761 */ 762 void 763 pmc_restore_cpu_binding(struct pmc_binding *pb) 764 { 765 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d", 766 curthread->td_oncpu, pb->pb_cpu); 767 thread_lock(curthread); 768 sched_bind(curthread, pb->pb_cpu); 769 if (!pb->pb_bound) 770 sched_unbind(curthread); 771 sched_prio(curthread, pb->pb_priority); 772 thread_unlock(curthread); 773 PMCDBG0(CPU,BND,2, "restore-cpu done"); 774 } 775 776 /* 777 * Move execution over to the specified CPU and bind it there. 778 */ 779 void 780 pmc_select_cpu(int cpu) 781 { 782 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 783 ("[pmc,%d] bad cpu number %d", __LINE__, cpu)); 784 785 /* Never move to an inactive CPU. */ 786 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive " 787 "CPU %d", __LINE__, cpu)); 788 789 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu); 790 thread_lock(curthread); 791 sched_prio(curthread, PRI_MIN); 792 sched_bind(curthread, cpu); 793 thread_unlock(curthread); 794 795 KASSERT(curthread->td_oncpu == cpu, 796 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__, 797 cpu, curthread->td_oncpu)); 798 799 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu); 800 } 801 802 /* 803 * Force a context switch. 804 * 805 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not 806 * guaranteed to force a context switch. 807 */ 808 static void 809 pmc_force_context_switch(void) 810 { 811 812 pause("pmcctx", 1); 813 } 814 815 uint64_t 816 pmc_rdtsc(void) 817 { 818 #if defined(__i386__) || defined(__amd64__) 819 if (__predict_true(amd_feature & AMDID_RDTSCP)) 820 return (rdtscp()); 821 else 822 return (rdtsc()); 823 #else 824 return (get_cyclecount()); 825 #endif 826 } 827 828 /* 829 * Get the file name for an executable. This is a simple wrapper 830 * around vn_fullpath(9). 831 */ 832 static void 833 pmc_getfilename(struct vnode *v, char **fullpath, char **freepath) 834 { 835 836 *fullpath = "unknown"; 837 *freepath = NULL; 838 vn_fullpath(v, fullpath, freepath); 839 } 840 841 /* 842 * Remove a process owning PMCs. 843 */ 844 void 845 pmc_remove_owner(struct pmc_owner *po) 846 { 847 struct pmc *pm, *tmp; 848 849 sx_assert(&pmc_sx, SX_XLOCKED); 850 851 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po); 852 853 /* Remove descriptor from the owner hash table */ 854 LIST_REMOVE(po, po_next); 855 856 /* release all owned PMC descriptors */ 857 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) { 858 PMCDBG1(OWN,ORM,2, "pmc=%p", pm); 859 KASSERT(pm->pm_owner == po, 860 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po)); 861 862 pmc_release_pmc_descriptor(pm); /* will unlink from the list */ 863 pmc_destroy_pmc_descriptor(pm); 864 } 865 866 KASSERT(po->po_sscount == 0, 867 ("[pmc,%d] SS count not zero", __LINE__)); 868 KASSERT(LIST_EMPTY(&po->po_pmcs), 869 ("[pmc,%d] PMC list not empty", __LINE__)); 870 871 /* de-configure the log file if present */ 872 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 873 pmclog_deconfigure_log(po); 874 } 875 876 /* 877 * Remove an owner process record if all conditions are met. 878 */ 879 static void 880 pmc_maybe_remove_owner(struct pmc_owner *po) 881 { 882 883 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po); 884 885 /* 886 * Remove owner record if 887 * - this process does not own any PMCs 888 * - this process has not allocated a system-wide sampling buffer 889 */ 890 if (LIST_EMPTY(&po->po_pmcs) && 891 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) { 892 pmc_remove_owner(po); 893 pmc_destroy_owner_descriptor(po); 894 } 895 } 896 897 /* 898 * Add an association between a target process and a PMC. 899 */ 900 static void 901 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) 902 { 903 struct pmc_target *pt; 904 struct pmc_thread *pt_td __diagused; 905 int ri; 906 907 sx_assert(&pmc_sx, SX_XLOCKED); 908 KASSERT(pm != NULL && pp != NULL, 909 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); 910 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), 911 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", 912 __LINE__, pm, pp->pp_proc->p_pid)); 913 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1), 914 ("[pmc,%d] Illegal reference count %d for process record %p", 915 __LINE__, pp->pp_refcnt, (void *) pp)); 916 917 ri = PMC_TO_ROWINDEX(pm); 918 919 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p", 920 pm, ri, pp); 921 922 #ifdef HWPMC_DEBUG 923 LIST_FOREACH(pt, &pm->pm_targets, pt_next) { 924 if (pt->pt_process == pp) 925 KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets", 926 __LINE__, pp, pm)); 927 } 928 #endif 929 pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK | M_ZERO); 930 pt->pt_process = pp; 931 932 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next); 933 934 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc, 935 (uintptr_t)pm); 936 937 if (pm->pm_owner->po_owner == pp->pp_proc) 938 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER; 939 940 /* 941 * Initialize the per-process values at this row index. 942 */ 943 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? 944 pm->pm_sc.pm_reloadcount : 0; 945 pp->pp_refcnt++; 946 947 #ifdef INVARIANTS 948 /* Confirm that the per-thread values at this row index are cleared. */ 949 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { 950 mtx_lock_spin(pp->pp_tdslock); 951 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) { 952 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0, 953 ("[pmc,%d] pt_pmcval not cleared for pid=%d at " 954 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri)); 955 } 956 mtx_unlock_spin(pp->pp_tdslock); 957 } 958 #endif 959 } 960 961 /* 962 * Removes the association between a target process and a PMC. 963 */ 964 static void 965 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) 966 { 967 int ri; 968 struct proc *p; 969 struct pmc_target *ptgt; 970 struct pmc_thread *pt; 971 972 sx_assert(&pmc_sx, SX_XLOCKED); 973 974 KASSERT(pm != NULL && pp != NULL, 975 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); 976 977 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, 978 ("[pmc,%d] Illegal ref count %d on process record %p", 979 __LINE__, pp->pp_refcnt, (void *) pp)); 980 981 ri = PMC_TO_ROWINDEX(pm); 982 983 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", 984 pm, ri, pp); 985 986 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, 987 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, 988 ri, pm, pp->pp_pmcs[ri].pp_pmc)); 989 990 pp->pp_pmcs[ri].pp_pmc = NULL; 991 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t)0; 992 993 /* Clear the per-thread values at this row index. */ 994 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { 995 mtx_lock_spin(pp->pp_tdslock); 996 LIST_FOREACH(pt, &pp->pp_tds, pt_next) 997 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t)0; 998 mtx_unlock_spin(pp->pp_tdslock); 999 } 1000 1001 /* Remove owner-specific flags */ 1002 if (pm->pm_owner->po_owner == pp->pp_proc) { 1003 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; 1004 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; 1005 } 1006 1007 pp->pp_refcnt--; 1008 1009 /* Remove the target process from the PMC structure */ 1010 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next) 1011 if (ptgt->pt_process == pp) 1012 break; 1013 1014 KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found " 1015 "in pmc %p", __LINE__, pp->pp_proc, pp, pm)); 1016 1017 LIST_REMOVE(ptgt, pt_next); 1018 free(ptgt, M_PMC); 1019 1020 /* if the PMC now lacks targets, send the owner a SIGIO */ 1021 if (LIST_EMPTY(&pm->pm_targets)) { 1022 p = pm->pm_owner->po_owner; 1023 PROC_LOCK(p); 1024 kern_psignal(p, SIGIO); 1025 PROC_UNLOCK(p); 1026 1027 PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p, SIGIO); 1028 } 1029 } 1030 1031 /* 1032 * Check if PMC 'pm' may be attached to target process 't'. 1033 */ 1034 1035 static int 1036 pmc_can_attach(struct pmc *pm, struct proc *t) 1037 { 1038 struct proc *o; /* pmc owner */ 1039 struct ucred *oc, *tc; /* owner, target credentials */ 1040 int decline_attach, i; 1041 1042 /* 1043 * A PMC's owner can always attach that PMC to itself. 1044 */ 1045 1046 if ((o = pm->pm_owner->po_owner) == t) 1047 return 0; 1048 1049 PROC_LOCK(o); 1050 oc = o->p_ucred; 1051 crhold(oc); 1052 PROC_UNLOCK(o); 1053 1054 PROC_LOCK(t); 1055 tc = t->p_ucred; 1056 crhold(tc); 1057 PROC_UNLOCK(t); 1058 1059 /* 1060 * The effective uid of the PMC owner should match at least one 1061 * of the {effective,real,saved} uids of the target process. 1062 */ 1063 1064 decline_attach = oc->cr_uid != tc->cr_uid && 1065 oc->cr_uid != tc->cr_svuid && 1066 oc->cr_uid != tc->cr_ruid; 1067 1068 /* 1069 * Every one of the target's group ids, must be in the owner's 1070 * group list. 1071 */ 1072 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++) 1073 decline_attach = !groupmember(tc->cr_groups[i], oc); 1074 1075 /* check the read and saved gids too */ 1076 if (decline_attach == 0) 1077 decline_attach = !groupmember(tc->cr_rgid, oc) || 1078 !groupmember(tc->cr_svgid, oc); 1079 1080 crfree(tc); 1081 crfree(oc); 1082 1083 return !decline_attach; 1084 } 1085 1086 /* 1087 * Attach a process to a PMC. 1088 */ 1089 static int 1090 pmc_attach_one_process(struct proc *p, struct pmc *pm) 1091 { 1092 int ri, error; 1093 char *fullpath, *freepath; 1094 struct pmc_process *pp; 1095 1096 sx_assert(&pmc_sx, SX_XLOCKED); 1097 1098 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm, 1099 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); 1100 1101 /* 1102 * Locate the process descriptor corresponding to process 'p', 1103 * allocating space as needed. 1104 * 1105 * Verify that rowindex 'pm_rowindex' is free in the process 1106 * descriptor. 1107 * 1108 * If not, allocate space for a descriptor and link the 1109 * process descriptor and PMC. 1110 */ 1111 ri = PMC_TO_ROWINDEX(pm); 1112 1113 /* mark process as using HWPMCs */ 1114 PROC_LOCK(p); 1115 p->p_flag |= P_HWPMC; 1116 PROC_UNLOCK(p); 1117 1118 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) { 1119 error = ENOMEM; 1120 goto fail; 1121 } 1122 1123 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */ 1124 error = EEXIST; 1125 goto fail; 1126 } 1127 1128 if (pp->pp_pmcs[ri].pp_pmc != NULL) { 1129 error = EBUSY; 1130 goto fail; 1131 } 1132 1133 pmc_link_target_process(pm, pp); 1134 1135 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) && 1136 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0) 1137 pm->pm_flags |= PMC_F_NEEDS_LOGFILE; 1138 1139 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */ 1140 1141 /* issue an attach event to a configured log file */ 1142 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) { 1143 if (p->p_flag & P_KPROC) { 1144 fullpath = kernelname; 1145 freepath = NULL; 1146 } else { 1147 pmc_getfilename(p->p_textvp, &fullpath, &freepath); 1148 pmclog_process_pmcattach(pm, p->p_pid, fullpath); 1149 } 1150 free(freepath, M_TEMP); 1151 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 1152 pmc_log_process_mappings(pm->pm_owner, p); 1153 } 1154 1155 return (0); 1156 fail: 1157 PROC_LOCK(p); 1158 p->p_flag &= ~P_HWPMC; 1159 PROC_UNLOCK(p); 1160 return (error); 1161 } 1162 1163 /* 1164 * Attach a process and optionally its children 1165 */ 1166 static int 1167 pmc_attach_process(struct proc *p, struct pmc *pm) 1168 { 1169 int error; 1170 struct proc *top; 1171 1172 sx_assert(&pmc_sx, SX_XLOCKED); 1173 1174 PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm, 1175 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); 1176 1177 /* 1178 * If this PMC successfully allowed a GETMSR operation 1179 * in the past, disallow further ATTACHes. 1180 */ 1181 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0) 1182 return (EPERM); 1183 1184 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) 1185 return (pmc_attach_one_process(p, pm)); 1186 1187 /* 1188 * Traverse all child processes, attaching them to 1189 * this PMC. 1190 */ 1191 sx_slock(&proctree_lock); 1192 1193 top = p; 1194 for (;;) { 1195 if ((error = pmc_attach_one_process(p, pm)) != 0) 1196 break; 1197 if (!LIST_EMPTY(&p->p_children)) 1198 p = LIST_FIRST(&p->p_children); 1199 else for (;;) { 1200 if (p == top) 1201 goto done; 1202 if (LIST_NEXT(p, p_sibling)) { 1203 p = LIST_NEXT(p, p_sibling); 1204 break; 1205 } 1206 p = p->p_pptr; 1207 } 1208 } 1209 1210 if (error != 0) 1211 (void)pmc_detach_process(top, pm); 1212 1213 done: 1214 sx_sunlock(&proctree_lock); 1215 return (error); 1216 } 1217 1218 /* 1219 * Detach a process from a PMC. If there are no other PMCs tracking 1220 * this process, remove the process structure from its hash table. If 1221 * 'flags' contains PMC_FLAG_REMOVE, then free the process structure. 1222 */ 1223 static int 1224 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags) 1225 { 1226 int ri; 1227 struct pmc_process *pp; 1228 1229 sx_assert(&pmc_sx, SX_XLOCKED); 1230 1231 KASSERT(pm != NULL, 1232 ("[pmc,%d] null pm pointer", __LINE__)); 1233 1234 ri = PMC_TO_ROWINDEX(pm); 1235 1236 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x", 1237 pm, ri, p, p->p_pid, p->p_comm, flags); 1238 1239 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) 1240 return (ESRCH); 1241 1242 if (pp->pp_pmcs[ri].pp_pmc != pm) 1243 return (EINVAL); 1244 1245 pmc_unlink_target_process(pm, pp); 1246 1247 /* Issue a detach entry if a log file is configured */ 1248 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) 1249 pmclog_process_pmcdetach(pm, p->p_pid); 1250 1251 /* 1252 * If there are no PMCs targeting this process, we remove its 1253 * descriptor from the target hash table and unset the P_HWPMC 1254 * flag in the struct proc. 1255 */ 1256 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, 1257 ("[pmc,%d] Illegal refcnt %d for process struct %p", 1258 __LINE__, pp->pp_refcnt, pp)); 1259 1260 if (pp->pp_refcnt != 0) /* still a target of some PMC */ 1261 return (0); 1262 1263 pmc_remove_process_descriptor(pp); 1264 1265 if (flags & PMC_FLAG_REMOVE) 1266 pmc_destroy_process_descriptor(pp); 1267 1268 PROC_LOCK(p); 1269 p->p_flag &= ~P_HWPMC; 1270 PROC_UNLOCK(p); 1271 1272 return (0); 1273 } 1274 1275 /* 1276 * Detach a process and optionally its descendants from a PMC. 1277 */ 1278 static int 1279 pmc_detach_process(struct proc *p, struct pmc *pm) 1280 { 1281 struct proc *top; 1282 1283 sx_assert(&pmc_sx, SX_XLOCKED); 1284 1285 PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm, 1286 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); 1287 1288 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) 1289 return (pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE)); 1290 1291 /* 1292 * Traverse all children, detaching them from this PMC. We 1293 * ignore errors since we could be detaching a PMC from a 1294 * partially attached proc tree. 1295 */ 1296 sx_slock(&proctree_lock); 1297 1298 top = p; 1299 for (;;) { 1300 (void)pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); 1301 1302 if (!LIST_EMPTY(&p->p_children)) { 1303 p = LIST_FIRST(&p->p_children); 1304 } else { 1305 for (;;) { 1306 if (p == top) 1307 goto done; 1308 if (LIST_NEXT(p, p_sibling)) { 1309 p = LIST_NEXT(p, p_sibling); 1310 break; 1311 } 1312 p = p->p_pptr; 1313 } 1314 } 1315 } 1316 done: 1317 sx_sunlock(&proctree_lock); 1318 if (LIST_EMPTY(&pm->pm_targets)) 1319 pm->pm_flags &= ~PMC_F_ATTACH_DONE; 1320 1321 return (0); 1322 } 1323 1324 /* 1325 * Handle events after an exec() for a process: 1326 * - Inform log owners of the new exec() event 1327 * - Release any PMCs owned by the process before the exec() 1328 * - Detach PMCs from the target if required 1329 */ 1330 static void 1331 pmc_process_exec(struct thread *td, struct pmckern_procexec *pk) 1332 { 1333 struct pmc *pm; 1334 struct pmc_owner *po; 1335 struct pmc_process *pp; 1336 struct proc *p; 1337 char *fullpath, *freepath; 1338 u_int ri; 1339 bool is_using_hwpmcs; 1340 1341 sx_assert(&pmc_sx, SX_XLOCKED); 1342 1343 p = td->td_proc; 1344 pmc_getfilename(p->p_textvp, &fullpath, &freepath); 1345 1346 PMC_EPOCH_ENTER(); 1347 /* Inform owners of SS mode PMCs of the exec event. */ 1348 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 1349 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { 1350 pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid, 1351 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); 1352 } 1353 } 1354 PMC_EPOCH_EXIT(); 1355 1356 PROC_LOCK(p); 1357 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; 1358 PROC_UNLOCK(p); 1359 1360 if (!is_using_hwpmcs) { 1361 if (freepath != NULL) 1362 free(freepath, M_TEMP); 1363 return; 1364 } 1365 1366 /* 1367 * PMCs are not inherited across an exec(): remove any PMCs that this 1368 * process is the owner of. 1369 */ 1370 if ((po = pmc_find_owner_descriptor(p)) != NULL) { 1371 pmc_remove_owner(po); 1372 pmc_destroy_owner_descriptor(po); 1373 } 1374 1375 /* 1376 * If the process being exec'ed is not the target of any PMC, we are 1377 * done. 1378 */ 1379 if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) { 1380 if (freepath != NULL) 1381 free(freepath, M_TEMP); 1382 return; 1383 } 1384 1385 /* 1386 * Log the exec event to all monitoring owners. Skip owners who have 1387 * already received the event because they had system sampling PMCs 1388 * active. 1389 */ 1390 for (ri = 0; ri < md->pmd_npmc; ri++) { 1391 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) 1392 continue; 1393 1394 po = pm->pm_owner; 1395 if (po->po_sscount == 0 && 1396 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { 1397 pmclog_process_procexec(po, pm->pm_id, p->p_pid, 1398 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); 1399 } 1400 } 1401 1402 if (freepath != NULL) 1403 free(freepath, M_TEMP); 1404 1405 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d", 1406 p, p->p_pid, p->p_comm, pk->pm_credentialschanged); 1407 1408 if (pk->pm_credentialschanged == 0) /* no change */ 1409 return; 1410 1411 /* 1412 * If the newly exec()'ed process has a different credential 1413 * than before, allow it to be the target of a PMC only if 1414 * the PMC's owner has sufficient privilege. 1415 */ 1416 for (ri = 0; ri < md->pmd_npmc; ri++) { 1417 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { 1418 if (pmc_can_attach(pm, td->td_proc) != 0) { 1419 pmc_detach_one_process(td->td_proc, pm, 1420 PMC_FLAG_NONE); 1421 } 1422 } 1423 } 1424 1425 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= md->pmd_npmc, 1426 ("[pmc,%d] Illegal ref count %u on pp %p", __LINE__, 1427 pp->pp_refcnt, pp)); 1428 1429 /* 1430 * If this process is no longer the target of any 1431 * PMCs, we can remove the process entry and free 1432 * up space. 1433 */ 1434 if (pp->pp_refcnt == 0) { 1435 pmc_remove_process_descriptor(pp); 1436 pmc_destroy_process_descriptor(pp); 1437 } 1438 } 1439 1440 /* 1441 * Thread context switch IN. 1442 */ 1443 static void 1444 pmc_process_csw_in(struct thread *td) 1445 { 1446 struct pmc *pm; 1447 struct pmc_classdep *pcd; 1448 struct pmc_cpu *pc; 1449 struct pmc_hw *phw __diagused; 1450 struct pmc_process *pp; 1451 struct pmc_thread *pt; 1452 struct proc *p; 1453 pmc_value_t newvalue; 1454 int cpu; 1455 u_int adjri, ri; 1456 1457 p = td->td_proc; 1458 pt = NULL; 1459 if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL) 1460 return; 1461 1462 KASSERT(pp->pp_proc == td->td_proc, 1463 ("[pmc,%d] not my thread state", __LINE__)); 1464 1465 critical_enter(); /* no preemption from this point */ 1466 1467 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1468 1469 PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1470 p->p_pid, p->p_comm, pp); 1471 1472 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1473 ("[pmc,%d] weird CPU id %d", __LINE__, cpu)); 1474 1475 pc = pmc_pcpu[cpu]; 1476 for (ri = 0; ri < md->pmd_npmc; ri++) { 1477 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) 1478 continue; 1479 1480 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), 1481 ("[pmc,%d] Target PMC in non-virtual mode (%d)", 1482 __LINE__, PMC_TO_MODE(pm))); 1483 KASSERT(PMC_TO_ROWINDEX(pm) == ri, 1484 ("[pmc,%d] Row index mismatch pmc %d != ri %d", 1485 __LINE__, PMC_TO_ROWINDEX(pm), ri)); 1486 1487 /* 1488 * Only PMCs that are marked as 'RUNNING' need 1489 * be placed on hardware. 1490 */ 1491 if (pm->pm_state != PMC_STATE_RUNNING) 1492 continue; 1493 1494 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, 1495 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, 1496 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 1497 1498 /* increment PMC runcount */ 1499 counter_u64_add(pm->pm_runcount, 1); 1500 1501 /* configure the HWPMC we are going to use. */ 1502 pcd = pmc_ri_to_classdep(md, ri, &adjri); 1503 (void)pcd->pcd_config_pmc(cpu, adjri, pm); 1504 1505 phw = pc->pc_hwpmcs[ri]; 1506 1507 KASSERT(phw != NULL, 1508 ("[pmc,%d] null hw pointer", __LINE__)); 1509 1510 KASSERT(phw->phw_pmc == pm, 1511 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, 1512 phw->phw_pmc, pm)); 1513 1514 /* 1515 * Write out saved value and start the PMC. 1516 * 1517 * Sampling PMCs use a per-thread value, while 1518 * counting mode PMCs use a per-pmc value that is 1519 * inherited across descendants. 1520 */ 1521 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { 1522 if (pt == NULL) 1523 pt = pmc_find_thread_descriptor(pp, td, 1524 PMC_FLAG_NONE); 1525 1526 KASSERT(pt != NULL, 1527 ("[pmc,%d] No thread found for td=%p", __LINE__, 1528 td)); 1529 1530 mtx_pool_lock_spin(pmc_mtxpool, pm); 1531 1532 /* 1533 * If we have a thread descriptor, use the per-thread 1534 * counter in the descriptor. If not, we will use 1535 * a per-process counter. 1536 * 1537 * TODO: Remove the per-process "safety net" once 1538 * we have thoroughly tested that we don't hit the 1539 * above assert. 1540 */ 1541 if (pt != NULL) { 1542 if (pt->pt_pmcs[ri].pt_pmcval > 0) 1543 newvalue = pt->pt_pmcs[ri].pt_pmcval; 1544 else 1545 newvalue = pm->pm_sc.pm_reloadcount; 1546 } else { 1547 /* 1548 * Use the saved value calculated after the most 1549 * recent time a thread using the shared counter 1550 * switched out. Reset the saved count in case 1551 * another thread from this process switches in 1552 * before any threads switch out. 1553 */ 1554 newvalue = pp->pp_pmcs[ri].pp_pmcval; 1555 pp->pp_pmcs[ri].pp_pmcval = 1556 pm->pm_sc.pm_reloadcount; 1557 } 1558 mtx_pool_unlock_spin(pmc_mtxpool, pm); 1559 KASSERT(newvalue > 0 && newvalue <= 1560 pm->pm_sc.pm_reloadcount, 1561 ("[pmc,%d] pmcval outside of expected range cpu=%d " 1562 "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__, 1563 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount)); 1564 } else { 1565 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, 1566 ("[pmc,%d] illegal mode=%d", __LINE__, 1567 PMC_TO_MODE(pm))); 1568 mtx_pool_lock_spin(pmc_mtxpool, pm); 1569 newvalue = PMC_PCPU_SAVED(cpu, ri) = 1570 pm->pm_gv.pm_savedvalue; 1571 mtx_pool_unlock_spin(pmc_mtxpool, pm); 1572 } 1573 1574 PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue); 1575 1576 (void)pcd->pcd_write_pmc(cpu, adjri, pm, newvalue); 1577 1578 /* If a sampling mode PMC, reset stalled state. */ 1579 if (PMC_TO_MODE(pm) == PMC_MODE_TS) 1580 pm->pm_pcpu_state[cpu].pps_stalled = 0; 1581 1582 /* Indicate that we desire this to run. */ 1583 pm->pm_pcpu_state[cpu].pps_cpustate = 1; 1584 1585 /* Start the PMC. */ 1586 (void)pcd->pcd_start_pmc(cpu, adjri, pm); 1587 } 1588 1589 /* 1590 * Perform any other architecture/cpu dependent thread 1591 * switch-in actions. 1592 */ 1593 (void)(*md->pmd_switch_in)(pc, pp); 1594 1595 critical_exit(); 1596 } 1597 1598 /* 1599 * Thread context switch OUT. 1600 */ 1601 static void 1602 pmc_process_csw_out(struct thread *td) 1603 { 1604 struct pmc *pm; 1605 struct pmc_classdep *pcd; 1606 struct pmc_cpu *pc; 1607 struct pmc_process *pp; 1608 struct pmc_thread *pt = NULL; 1609 struct proc *p; 1610 pmc_value_t newvalue; 1611 int64_t tmp; 1612 enum pmc_mode mode; 1613 int cpu; 1614 u_int adjri, ri; 1615 1616 /* 1617 * Locate our process descriptor; this may be NULL if 1618 * this process is exiting and we have already removed 1619 * the process from the target process table. 1620 * 1621 * Note that due to kernel preemption, multiple 1622 * context switches may happen while the process is 1623 * exiting. 1624 * 1625 * Note also that if the target process cannot be 1626 * found we still need to deconfigure any PMCs that 1627 * are currently running on hardware. 1628 */ 1629 p = td->td_proc; 1630 pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE); 1631 1632 critical_enter(); 1633 1634 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1635 1636 PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1637 p->p_pid, p->p_comm, pp); 1638 1639 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 1640 ("[pmc,%d weird CPU id %d", __LINE__, cpu)); 1641 1642 pc = pmc_pcpu[cpu]; 1643 1644 /* 1645 * When a PMC gets unlinked from a target PMC, it will 1646 * be removed from the target's pp_pmc[] array. 1647 * 1648 * However, on a MP system, the target could have been 1649 * executing on another CPU at the time of the unlink. 1650 * So, at context switch OUT time, we need to look at 1651 * the hardware to determine if a PMC is scheduled on 1652 * it. 1653 */ 1654 for (ri = 0; ri < md->pmd_npmc; ri++) { 1655 pcd = pmc_ri_to_classdep(md, ri, &adjri); 1656 pm = NULL; 1657 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); 1658 1659 if (pm == NULL) /* nothing at this row index */ 1660 continue; 1661 1662 mode = PMC_TO_MODE(pm); 1663 if (!PMC_IS_VIRTUAL_MODE(mode)) 1664 continue; /* not a process virtual PMC */ 1665 1666 KASSERT(PMC_TO_ROWINDEX(pm) == ri, 1667 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", 1668 __LINE__, PMC_TO_ROWINDEX(pm), ri)); 1669 1670 /* 1671 * Change desired state, and then stop if not stalled. 1672 * This two-step dance should avoid race conditions where 1673 * an interrupt re-enables the PMC after this code has 1674 * already checked the pm_stalled flag. 1675 */ 1676 pm->pm_pcpu_state[cpu].pps_cpustate = 0; 1677 if (pm->pm_pcpu_state[cpu].pps_stalled == 0) 1678 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); 1679 1680 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 1681 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, 1682 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 1683 1684 /* reduce this PMC's runcount */ 1685 counter_u64_add(pm->pm_runcount, -1); 1686 1687 /* 1688 * If this PMC is associated with this process, 1689 * save the reading. 1690 */ 1691 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL && 1692 pp->pp_pmcs[ri].pp_pmc != NULL) { 1693 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, 1694 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, 1695 pm, ri, pp->pp_pmcs[ri].pp_pmc)); 1696 KASSERT(pp->pp_refcnt > 0, 1697 ("[pmc,%d] pp refcnt = %d", __LINE__, 1698 pp->pp_refcnt)); 1699 1700 (void)pcd->pcd_read_pmc(cpu, adjri, pm, &newvalue); 1701 1702 if (mode == PMC_MODE_TS) { 1703 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)", 1704 cpu, ri, newvalue); 1705 1706 if (pt == NULL) 1707 pt = pmc_find_thread_descriptor(pp, td, 1708 PMC_FLAG_NONE); 1709 1710 KASSERT(pt != NULL, 1711 ("[pmc,%d] No thread found for td=%p", 1712 __LINE__, td)); 1713 1714 mtx_pool_lock_spin(pmc_mtxpool, pm); 1715 1716 /* 1717 * If we have a thread descriptor, save the 1718 * per-thread counter in the descriptor. If not, 1719 * we will update the per-process counter. 1720 * 1721 * TODO: Remove the per-process "safety net" 1722 * once we have thoroughly tested that we 1723 * don't hit the above assert. 1724 */ 1725 if (pt != NULL) { 1726 pt->pt_pmcs[ri].pt_pmcval = newvalue; 1727 } else { 1728 /* 1729 * For sampling process-virtual PMCs, 1730 * newvalue is the number of events to 1731 * be seen until the next sampling 1732 * interrupt. We can just add the events 1733 * left from this invocation to the 1734 * counter, then adjust in case we 1735 * overflow our range. 1736 * 1737 * (Recall that we reload the counter 1738 * every time we use it.) 1739 */ 1740 pp->pp_pmcs[ri].pp_pmcval += newvalue; 1741 if (pp->pp_pmcs[ri].pp_pmcval > 1742 pm->pm_sc.pm_reloadcount) { 1743 pp->pp_pmcs[ri].pp_pmcval -= 1744 pm->pm_sc.pm_reloadcount; 1745 } 1746 } 1747 mtx_pool_unlock_spin(pmc_mtxpool, pm); 1748 } else { 1749 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); 1750 1751 PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)", 1752 cpu, ri, tmp); 1753 1754 /* 1755 * For counting process-virtual PMCs, 1756 * we expect the count to be 1757 * increasing monotonically, modulo a 64 1758 * bit wraparound. 1759 */ 1760 KASSERT(tmp >= 0, 1761 ("[pmc,%d] negative increment cpu=%d " 1762 "ri=%d newvalue=%jx saved=%jx " 1763 "incr=%jx", __LINE__, cpu, ri, 1764 newvalue, PMC_PCPU_SAVED(cpu, ri), tmp)); 1765 1766 mtx_pool_lock_spin(pmc_mtxpool, pm); 1767 pm->pm_gv.pm_savedvalue += tmp; 1768 pp->pp_pmcs[ri].pp_pmcval += tmp; 1769 mtx_pool_unlock_spin(pmc_mtxpool, pm); 1770 1771 if (pm->pm_flags & PMC_F_LOG_PROCCSW) 1772 pmclog_process_proccsw(pm, pp, tmp, td); 1773 } 1774 } 1775 1776 /* Mark hardware as free. */ 1777 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); 1778 } 1779 1780 /* 1781 * Perform any other architecture/cpu dependent thread 1782 * switch out functions. 1783 */ 1784 (void)(*md->pmd_switch_out)(pc, pp); 1785 1786 critical_exit(); 1787 } 1788 1789 /* 1790 * A new thread for a process. 1791 */ 1792 static void 1793 pmc_process_thread_add(struct thread *td) 1794 { 1795 struct pmc_process *pmc; 1796 1797 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); 1798 if (pmc != NULL) 1799 pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE); 1800 } 1801 1802 /* 1803 * A thread delete for a process. 1804 */ 1805 static void 1806 pmc_process_thread_delete(struct thread *td) 1807 { 1808 struct pmc_process *pmc; 1809 1810 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); 1811 if (pmc != NULL) 1812 pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc, 1813 td, PMC_FLAG_REMOVE)); 1814 } 1815 1816 /* 1817 * A userret() call for a thread. 1818 */ 1819 static void 1820 pmc_process_thread_userret(struct thread *td) 1821 { 1822 sched_pin(); 1823 pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame); 1824 sched_unpin(); 1825 } 1826 1827 /* 1828 * A mapping change for a process. 1829 */ 1830 static void 1831 pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm) 1832 { 1833 const struct pmc *pm; 1834 const struct pmc_process *pp; 1835 struct pmc_owner *po; 1836 char *fullpath, *freepath; 1837 pid_t pid; 1838 int ri; 1839 1840 MPASS(!in_epoch(global_epoch_preempt)); 1841 1842 freepath = fullpath = NULL; 1843 pmc_getfilename((struct vnode *)pkm->pm_file, &fullpath, &freepath); 1844 1845 pid = td->td_proc->p_pid; 1846 1847 PMC_EPOCH_ENTER(); 1848 /* Inform owners of all system-wide sampling PMCs. */ 1849 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 1850 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 1851 pmclog_process_map_in(po, pid, pkm->pm_address, 1852 fullpath); 1853 } 1854 1855 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) 1856 goto done; 1857 1858 /* 1859 * Inform sampling PMC owners tracking this process. 1860 */ 1861 for (ri = 0; ri < md->pmd_npmc; ri++) { 1862 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && 1863 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1864 pmclog_process_map_in(pm->pm_owner, 1865 pid, pkm->pm_address, fullpath); 1866 } 1867 } 1868 1869 done: 1870 if (freepath != NULL) 1871 free(freepath, M_TEMP); 1872 PMC_EPOCH_EXIT(); 1873 } 1874 1875 /* 1876 * Log an munmap request. 1877 */ 1878 static void 1879 pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm) 1880 { 1881 const struct pmc *pm; 1882 const struct pmc_process *pp; 1883 struct pmc_owner *po; 1884 pid_t pid; 1885 int ri; 1886 1887 pid = td->td_proc->p_pid; 1888 1889 PMC_EPOCH_ENTER(); 1890 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 1891 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 1892 pmclog_process_map_out(po, pid, pkm->pm_address, 1893 pkm->pm_address + pkm->pm_size); 1894 } 1895 PMC_EPOCH_EXIT(); 1896 1897 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) 1898 return; 1899 1900 for (ri = 0; ri < md->pmd_npmc; ri++) { 1901 pm = pp->pp_pmcs[ri].pp_pmc; 1902 if (pm != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 1903 pmclog_process_map_out(pm->pm_owner, pid, 1904 pkm->pm_address, pkm->pm_address + pkm->pm_size); 1905 } 1906 } 1907 } 1908 1909 /* 1910 * Log mapping information about the kernel. 1911 */ 1912 static void 1913 pmc_log_kernel_mappings(struct pmc *pm) 1914 { 1915 struct pmc_owner *po; 1916 struct pmckern_map_in *km, *kmbase; 1917 1918 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)); 1919 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), 1920 ("[pmc,%d] non-sampling PMC (%p) desires mapping information", 1921 __LINE__, (void *) pm)); 1922 1923 po = pm->pm_owner; 1924 if ((po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) != 0) 1925 return; 1926 1927 if (PMC_TO_MODE(pm) == PMC_MODE_SS) 1928 pmc_process_allproc(pm); 1929 1930 /* 1931 * Log the current set of kernel modules. 1932 */ 1933 kmbase = linker_hwpmc_list_objects(); 1934 for (km = kmbase; km->pm_file != NULL; km++) { 1935 PMCDBG2(LOG,REG,1,"%s %p", (char *)km->pm_file, 1936 (void *)km->pm_address); 1937 pmclog_process_map_in(po, (pid_t)-1, km->pm_address, 1938 km->pm_file); 1939 } 1940 free(kmbase, M_LINKER); 1941 1942 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE; 1943 } 1944 1945 /* 1946 * Log the mappings for a single process. 1947 */ 1948 static void 1949 pmc_log_process_mappings(struct pmc_owner *po, struct proc *p) 1950 { 1951 vm_map_t map; 1952 vm_map_entry_t entry; 1953 vm_object_t obj, lobj, tobj; 1954 vm_offset_t last_end; 1955 vm_offset_t start_addr; 1956 struct vnode *vp, *last_vp; 1957 struct vmspace *vm; 1958 char *fullpath, *freepath; 1959 u_int last_timestamp; 1960 1961 last_vp = NULL; 1962 last_end = (vm_offset_t)0; 1963 fullpath = freepath = NULL; 1964 1965 if ((vm = vmspace_acquire_ref(p)) == NULL) 1966 return; 1967 1968 map = &vm->vm_map; 1969 vm_map_lock_read(map); 1970 VM_MAP_ENTRY_FOREACH(entry, map) { 1971 if (entry == NULL) { 1972 PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly " 1973 "NULL! pid=%d vm_map=%p\n", p->p_pid, map); 1974 break; 1975 } 1976 1977 /* 1978 * We only care about executable map entries. 1979 */ 1980 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 1981 (entry->protection & VM_PROT_EXECUTE) == 0 || 1982 entry->object.vm_object == NULL) { 1983 continue; 1984 } 1985 1986 obj = entry->object.vm_object; 1987 VM_OBJECT_RLOCK(obj); 1988 1989 /* 1990 * Walk the backing_object list to find the base (non-shadowed) 1991 * vm_object. 1992 */ 1993 for (lobj = tobj = obj; tobj != NULL; 1994 tobj = tobj->backing_object) { 1995 if (tobj != obj) 1996 VM_OBJECT_RLOCK(tobj); 1997 if (lobj != obj) 1998 VM_OBJECT_RUNLOCK(lobj); 1999 lobj = tobj; 2000 } 2001 2002 /* 2003 * At this point lobj is the base vm_object and it is locked. 2004 */ 2005 if (lobj == NULL) { 2006 PMCDBG3(LOG,OPS,2, 2007 "hwpmc: lobj unexpectedly NULL! pid=%d " 2008 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj); 2009 VM_OBJECT_RUNLOCK(obj); 2010 continue; 2011 } 2012 2013 vp = vm_object_vnode(lobj); 2014 if (vp == NULL) { 2015 if (lobj != obj) 2016 VM_OBJECT_RUNLOCK(lobj); 2017 VM_OBJECT_RUNLOCK(obj); 2018 continue; 2019 } 2020 2021 /* 2022 * Skip contiguous regions that point to the same vnode, so we 2023 * don't emit redundant MAP-IN directives. 2024 */ 2025 if (entry->start == last_end && vp == last_vp) { 2026 last_end = entry->end; 2027 if (lobj != obj) 2028 VM_OBJECT_RUNLOCK(lobj); 2029 VM_OBJECT_RUNLOCK(obj); 2030 continue; 2031 } 2032 2033 /* 2034 * We don't want to keep the proc's vm_map or this vm_object 2035 * locked while we walk the pathname, since vn_fullpath() can 2036 * sleep. However, if we drop the lock, it's possible for 2037 * concurrent activity to modify the vm_map list. To protect 2038 * against this, we save the vm_map timestamp before we release 2039 * the lock, and check it after we reacquire the lock below. 2040 */ 2041 start_addr = entry->start; 2042 last_end = entry->end; 2043 last_timestamp = map->timestamp; 2044 vm_map_unlock_read(map); 2045 2046 vref(vp); 2047 if (lobj != obj) 2048 VM_OBJECT_RUNLOCK(lobj); 2049 VM_OBJECT_RUNLOCK(obj); 2050 2051 freepath = NULL; 2052 pmc_getfilename(vp, &fullpath, &freepath); 2053 last_vp = vp; 2054 2055 vrele(vp); 2056 2057 vp = NULL; 2058 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath); 2059 if (freepath != NULL) 2060 free(freepath, M_TEMP); 2061 2062 vm_map_lock_read(map); 2063 2064 /* 2065 * If our saved timestamp doesn't match, this means 2066 * that the vm_map was modified out from under us and 2067 * we can't trust our current "entry" pointer. Do a 2068 * new lookup for this entry. If there is no entry 2069 * for this address range, vm_map_lookup_entry() will 2070 * return the previous one, so we always want to go to 2071 * the next entry on the next loop iteration. 2072 * 2073 * There is an edge condition here that can occur if 2074 * there is no entry at or before this address. In 2075 * this situation, vm_map_lookup_entry returns 2076 * &map->header, which would cause our loop to abort 2077 * without processing the rest of the map. However, 2078 * in practice this will never happen for process 2079 * vm_map. This is because the executable's text 2080 * segment is the first mapping in the proc's address 2081 * space, and this mapping is never removed until the 2082 * process exits, so there will always be a non-header 2083 * entry at or before the requested address for 2084 * vm_map_lookup_entry to return. 2085 */ 2086 if (map->timestamp != last_timestamp) 2087 vm_map_lookup_entry(map, last_end - 1, &entry); 2088 } 2089 2090 vm_map_unlock_read(map); 2091 vmspace_free(vm); 2092 return; 2093 } 2094 2095 /* 2096 * Log mappings for all processes in the system. 2097 */ 2098 static void 2099 pmc_log_all_process_mappings(struct pmc_owner *po) 2100 { 2101 struct proc *p, *top; 2102 2103 sx_assert(&pmc_sx, SX_XLOCKED); 2104 2105 if ((p = pfind(1)) == NULL) 2106 panic("[pmc,%d] Cannot find init", __LINE__); 2107 2108 PROC_UNLOCK(p); 2109 2110 sx_slock(&proctree_lock); 2111 2112 top = p; 2113 for (;;) { 2114 pmc_log_process_mappings(po, p); 2115 if (!LIST_EMPTY(&p->p_children)) 2116 p = LIST_FIRST(&p->p_children); 2117 else for (;;) { 2118 if (p == top) 2119 goto done; 2120 if (LIST_NEXT(p, p_sibling)) { 2121 p = LIST_NEXT(p, p_sibling); 2122 break; 2123 } 2124 p = p->p_pptr; 2125 } 2126 } 2127 done: 2128 sx_sunlock(&proctree_lock); 2129 } 2130 2131 #ifdef HWPMC_DEBUG 2132 const char *pmc_hooknames[] = { 2133 /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */ 2134 "", 2135 "EXEC", 2136 "CSW-IN", 2137 "CSW-OUT", 2138 "SAMPLE", 2139 "UNUSED1", 2140 "UNUSED2", 2141 "MMAP", 2142 "MUNMAP", 2143 "CALLCHAIN-NMI", 2144 "CALLCHAIN-SOFT", 2145 "SOFTSAMPLING", 2146 "THR-CREATE", 2147 "THR-EXIT", 2148 "THR-USERRET", 2149 "THR-CREATE-LOG", 2150 "THR-EXIT-LOG", 2151 "PROC-CREATE-LOG" 2152 }; 2153 #endif 2154 2155 /* 2156 * The 'hook' invoked from the kernel proper 2157 */ 2158 static int 2159 pmc_hook_handler(struct thread *td, int function, void *arg) 2160 { 2161 int cpu; 2162 2163 PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function, 2164 pmc_hooknames[function], arg); 2165 2166 switch (function) { 2167 case PMC_FN_PROCESS_EXEC: 2168 pmc_process_exec(td, (struct pmckern_procexec *)arg); 2169 break; 2170 2171 case PMC_FN_CSW_IN: 2172 pmc_process_csw_in(td); 2173 break; 2174 2175 case PMC_FN_CSW_OUT: 2176 pmc_process_csw_out(td); 2177 break; 2178 2179 /* 2180 * Process accumulated PC samples. 2181 * 2182 * This function is expected to be called by hardclock() for 2183 * each CPU that has accumulated PC samples. 2184 * 2185 * This function is to be executed on the CPU whose samples 2186 * are being processed. 2187 */ 2188 case PMC_FN_DO_SAMPLES: 2189 /* 2190 * Clear the cpu specific bit in the CPU mask before 2191 * do the rest of the processing. If the NMI handler 2192 * gets invoked after the "atomic_clear_int()" call 2193 * below but before "pmc_process_samples()" gets 2194 * around to processing the interrupt, then we will 2195 * come back here at the next hardclock() tick (and 2196 * may find nothing to do if "pmc_process_samples()" 2197 * had already processed the interrupt). We don't 2198 * lose the interrupt sample. 2199 */ 2200 DPCPU_SET(pmc_sampled, 0); 2201 cpu = PCPU_GET(cpuid); 2202 pmc_process_samples(cpu, PMC_HR); 2203 pmc_process_samples(cpu, PMC_SR); 2204 pmc_process_samples(cpu, PMC_UR); 2205 break; 2206 2207 case PMC_FN_MMAP: 2208 pmc_process_mmap(td, (struct pmckern_map_in *)arg); 2209 break; 2210 2211 case PMC_FN_MUNMAP: 2212 MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)); 2213 pmc_process_munmap(td, (struct pmckern_map_out *)arg); 2214 break; 2215 2216 case PMC_FN_PROC_CREATE_LOG: 2217 pmc_process_proccreate((struct proc *)arg); 2218 break; 2219 2220 case PMC_FN_USER_CALLCHAIN: 2221 /* 2222 * Record a call chain. 2223 */ 2224 KASSERT(td == curthread, ("[pmc,%d] td != curthread", 2225 __LINE__)); 2226 2227 pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR, 2228 (struct trapframe *)arg); 2229 2230 KASSERT(td->td_pinned == 1, 2231 ("[pmc,%d] invalid td_pinned value", __LINE__)); 2232 sched_unpin(); /* Can migrate safely now. */ 2233 2234 td->td_pflags &= ~TDP_CALLCHAIN; 2235 break; 2236 2237 case PMC_FN_USER_CALLCHAIN_SOFT: 2238 /* 2239 * Record a call chain. 2240 */ 2241 KASSERT(td == curthread, ("[pmc,%d] td != curthread", 2242 __LINE__)); 2243 2244 cpu = PCPU_GET(cpuid); 2245 pmc_capture_user_callchain(cpu, PMC_SR, 2246 (struct trapframe *) arg); 2247 2248 KASSERT(td->td_pinned == 1, 2249 ("[pmc,%d] invalid td_pinned value", __LINE__)); 2250 2251 sched_unpin(); /* Can migrate safely now. */ 2252 2253 td->td_pflags &= ~TDP_CALLCHAIN; 2254 break; 2255 2256 case PMC_FN_SOFT_SAMPLING: 2257 /* 2258 * Call soft PMC sampling intr. 2259 */ 2260 pmc_soft_intr((struct pmckern_soft *)arg); 2261 break; 2262 2263 case PMC_FN_THR_CREATE: 2264 pmc_process_thread_add(td); 2265 pmc_process_threadcreate(td); 2266 break; 2267 2268 case PMC_FN_THR_CREATE_LOG: 2269 pmc_process_threadcreate(td); 2270 break; 2271 2272 case PMC_FN_THR_EXIT: 2273 KASSERT(td == curthread, ("[pmc,%d] td != curthread", 2274 __LINE__)); 2275 pmc_process_thread_delete(td); 2276 pmc_process_threadexit(td); 2277 break; 2278 case PMC_FN_THR_EXIT_LOG: 2279 pmc_process_threadexit(td); 2280 break; 2281 case PMC_FN_THR_USERRET: 2282 KASSERT(td == curthread, ("[pmc,%d] td != curthread", 2283 __LINE__)); 2284 pmc_process_thread_userret(td); 2285 break; 2286 default: 2287 #ifdef HWPMC_DEBUG 2288 KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function)); 2289 #endif 2290 break; 2291 } 2292 2293 return (0); 2294 } 2295 2296 /* 2297 * Allocate a 'struct pmc_owner' descriptor in the owner hash table. 2298 */ 2299 static struct pmc_owner * 2300 pmc_allocate_owner_descriptor(struct proc *p) 2301 { 2302 struct pmc_owner *po; 2303 struct pmc_ownerhash *poh; 2304 uint32_t hindex; 2305 2306 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); 2307 poh = &pmc_ownerhash[hindex]; 2308 2309 /* Allocate space for N pointers and one descriptor struct. */ 2310 po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK | M_ZERO); 2311 po->po_owner = p; 2312 LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */ 2313 2314 TAILQ_INIT(&po->po_logbuffers); 2315 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN); 2316 2317 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p", 2318 p, p->p_pid, p->p_comm, po); 2319 2320 return (po); 2321 } 2322 2323 static void 2324 pmc_destroy_owner_descriptor(struct pmc_owner *po) 2325 { 2326 2327 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", 2328 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); 2329 2330 mtx_destroy(&po->po_mtx); 2331 free(po, M_PMC); 2332 } 2333 2334 /* 2335 * Allocate a thread descriptor from the free pool. 2336 * 2337 * NOTE: This *can* return NULL. 2338 */ 2339 static struct pmc_thread * 2340 pmc_thread_descriptor_pool_alloc(void) 2341 { 2342 struct pmc_thread *pt; 2343 2344 mtx_lock_spin(&pmc_threadfreelist_mtx); 2345 if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) { 2346 LIST_REMOVE(pt, pt_next); 2347 pmc_threadfreelist_entries--; 2348 } 2349 mtx_unlock_spin(&pmc_threadfreelist_mtx); 2350 2351 return (pt); 2352 } 2353 2354 /* 2355 * Add a thread descriptor to the free pool. We use this instead of free() 2356 * to maintain a cache of free entries. Additionally, we can safely call 2357 * this function when we cannot call free(), such as in a critical section. 2358 */ 2359 static void 2360 pmc_thread_descriptor_pool_free(struct pmc_thread *pt) 2361 { 2362 2363 if (pt == NULL) 2364 return; 2365 2366 memset(pt, 0, THREADENTRY_SIZE); 2367 mtx_lock_spin(&pmc_threadfreelist_mtx); 2368 LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next); 2369 pmc_threadfreelist_entries++; 2370 if (pmc_threadfreelist_entries > pmc_threadfreelist_max) 2371 taskqueue_enqueue(taskqueue_fast, &free_task); 2372 mtx_unlock_spin(&pmc_threadfreelist_mtx); 2373 } 2374 2375 /* 2376 * An asynchronous task to manage the free list. 2377 */ 2378 static void 2379 pmc_thread_descriptor_pool_free_task(void *arg __unused, int pending __unused) 2380 { 2381 struct pmc_thread *pt; 2382 LIST_HEAD(, pmc_thread) tmplist; 2383 int delta; 2384 2385 LIST_INIT(&tmplist); 2386 2387 /* Determine what changes, if any, we need to make. */ 2388 mtx_lock_spin(&pmc_threadfreelist_mtx); 2389 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max; 2390 while (delta > 0 && (pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) { 2391 delta--; 2392 pmc_threadfreelist_entries--; 2393 LIST_REMOVE(pt, pt_next); 2394 LIST_INSERT_HEAD(&tmplist, pt, pt_next); 2395 } 2396 mtx_unlock_spin(&pmc_threadfreelist_mtx); 2397 2398 /* If there are entries to free, free them. */ 2399 while (!LIST_EMPTY(&tmplist)) { 2400 pt = LIST_FIRST(&tmplist); 2401 LIST_REMOVE(pt, pt_next); 2402 free(pt, M_PMC); 2403 } 2404 } 2405 2406 /* 2407 * Drain the thread free pool, freeing all allocations. 2408 */ 2409 static void 2410 pmc_thread_descriptor_pool_drain(void) 2411 { 2412 struct pmc_thread *pt, *next; 2413 2414 LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) { 2415 LIST_REMOVE(pt, pt_next); 2416 free(pt, M_PMC); 2417 } 2418 } 2419 2420 /* 2421 * find the descriptor corresponding to thread 'td', adding or removing it 2422 * as specified by 'mode'. 2423 * 2424 * Note that this supports additional mode flags in addition to those 2425 * supported by pmc_find_process_descriptor(): 2426 * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs. 2427 * This makes it safe to call while holding certain other locks. 2428 */ 2429 static struct pmc_thread * 2430 pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td, 2431 uint32_t mode) 2432 { 2433 struct pmc_thread *pt = NULL, *ptnew = NULL; 2434 int wait_flag; 2435 2436 KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__)); 2437 2438 /* 2439 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to 2440 * acquiring the lock. 2441 */ 2442 if ((mode & PMC_FLAG_ALLOCATE) != 0) { 2443 if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) { 2444 wait_flag = M_WAITOK; 2445 if ((mode & PMC_FLAG_NOWAIT) != 0 || 2446 in_epoch(global_epoch_preempt)) 2447 wait_flag = M_NOWAIT; 2448 2449 ptnew = malloc(THREADENTRY_SIZE, M_PMC, 2450 wait_flag | M_ZERO); 2451 } 2452 } 2453 2454 mtx_lock_spin(pp->pp_tdslock); 2455 LIST_FOREACH(pt, &pp->pp_tds, pt_next) { 2456 if (pt->pt_td == td) 2457 break; 2458 } 2459 2460 if ((mode & PMC_FLAG_REMOVE) != 0 && pt != NULL) 2461 LIST_REMOVE(pt, pt_next); 2462 2463 if ((mode & PMC_FLAG_ALLOCATE) != 0 && pt == NULL && ptnew != NULL) { 2464 pt = ptnew; 2465 ptnew = NULL; 2466 pt->pt_td = td; 2467 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next); 2468 } 2469 2470 mtx_unlock_spin(pp->pp_tdslock); 2471 2472 if (ptnew != NULL) { 2473 free(ptnew, M_PMC); 2474 } 2475 2476 return (pt); 2477 } 2478 2479 /* 2480 * Try to add thread descriptors for each thread in a process. 2481 */ 2482 static void 2483 pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp) 2484 { 2485 struct pmc_thread **tdlist; 2486 struct thread *curtd; 2487 int i, tdcnt, tdlistsz; 2488 2489 KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked", 2490 __LINE__)); 2491 tdcnt = 32; 2492 restart: 2493 tdlistsz = roundup2(tdcnt, 32); 2494 2495 tdcnt = 0; 2496 tdlist = malloc(sizeof(struct pmc_thread *) * tdlistsz, M_TEMP, 2497 M_WAITOK); 2498 2499 PROC_LOCK(p); 2500 FOREACH_THREAD_IN_PROC(p, curtd) 2501 tdcnt++; 2502 if (tdcnt >= tdlistsz) { 2503 PROC_UNLOCK(p); 2504 free(tdlist, M_TEMP); 2505 goto restart; 2506 } 2507 2508 /* 2509 * Try to add each thread to the list without sleeping. If unable, 2510 * add to a queue to retry after dropping the process lock. 2511 */ 2512 tdcnt = 0; 2513 FOREACH_THREAD_IN_PROC(p, curtd) { 2514 tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd, 2515 PMC_FLAG_ALLOCATE | PMC_FLAG_NOWAIT); 2516 if (tdlist[tdcnt] == NULL) { 2517 PROC_UNLOCK(p); 2518 for (i = 0; i <= tdcnt; i++) 2519 pmc_thread_descriptor_pool_free(tdlist[i]); 2520 free(tdlist, M_TEMP); 2521 goto restart; 2522 } 2523 tdcnt++; 2524 } 2525 PROC_UNLOCK(p); 2526 free(tdlist, M_TEMP); 2527 } 2528 2529 /* 2530 * Find the descriptor corresponding to process 'p', adding or removing it 2531 * as specified by 'mode'. 2532 */ 2533 static struct pmc_process * 2534 pmc_find_process_descriptor(struct proc *p, uint32_t mode) 2535 { 2536 struct pmc_process *pp, *ppnew; 2537 struct pmc_processhash *pph; 2538 uint32_t hindex; 2539 2540 hindex = PMC_HASH_PTR(p, pmc_processhashmask); 2541 pph = &pmc_processhash[hindex]; 2542 2543 ppnew = NULL; 2544 2545 /* 2546 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we 2547 * cannot call malloc(9) once we hold a spin lock. 2548 */ 2549 if ((mode & PMC_FLAG_ALLOCATE) != 0) 2550 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * 2551 sizeof(struct pmc_targetstate), M_PMC, M_WAITOK | M_ZERO); 2552 2553 mtx_lock_spin(&pmc_processhash_mtx); 2554 LIST_FOREACH(pp, pph, pp_next) { 2555 if (pp->pp_proc == p) 2556 break; 2557 } 2558 2559 if ((mode & PMC_FLAG_REMOVE) != 0 && pp != NULL) 2560 LIST_REMOVE(pp, pp_next); 2561 2562 if ((mode & PMC_FLAG_ALLOCATE) != 0 && pp == NULL && ppnew != NULL) { 2563 ppnew->pp_proc = p; 2564 LIST_INIT(&ppnew->pp_tds); 2565 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew); 2566 LIST_INSERT_HEAD(pph, ppnew, pp_next); 2567 mtx_unlock_spin(&pmc_processhash_mtx); 2568 pp = ppnew; 2569 ppnew = NULL; 2570 2571 /* Add thread descriptors for this process' current threads. */ 2572 pmc_add_thread_descriptors_from_proc(p, pp); 2573 } else 2574 mtx_unlock_spin(&pmc_processhash_mtx); 2575 2576 if (ppnew != NULL) 2577 free(ppnew, M_PMC); 2578 return (pp); 2579 } 2580 2581 /* 2582 * Remove a process descriptor from the process hash table. 2583 */ 2584 static void 2585 pmc_remove_process_descriptor(struct pmc_process *pp) 2586 { 2587 KASSERT(pp->pp_refcnt == 0, 2588 ("[pmc,%d] Removing process descriptor %p with count %d", 2589 __LINE__, pp, pp->pp_refcnt)); 2590 2591 mtx_lock_spin(&pmc_processhash_mtx); 2592 LIST_REMOVE(pp, pp_next); 2593 mtx_unlock_spin(&pmc_processhash_mtx); 2594 } 2595 2596 /* 2597 * Destroy a process descriptor. 2598 */ 2599 static void 2600 pmc_destroy_process_descriptor(struct pmc_process *pp) 2601 { 2602 struct pmc_thread *pmc_td; 2603 2604 while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) { 2605 LIST_REMOVE(pmc_td, pt_next); 2606 pmc_thread_descriptor_pool_free(pmc_td); 2607 } 2608 free(pp, M_PMC); 2609 } 2610 2611 /* 2612 * Find an owner descriptor corresponding to proc 'p'. 2613 */ 2614 static struct pmc_owner * 2615 pmc_find_owner_descriptor(struct proc *p) 2616 { 2617 struct pmc_owner *po; 2618 struct pmc_ownerhash *poh; 2619 uint32_t hindex; 2620 2621 hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); 2622 poh = &pmc_ownerhash[hindex]; 2623 2624 po = NULL; 2625 LIST_FOREACH(po, poh, po_next) { 2626 if (po->po_owner == p) 2627 break; 2628 } 2629 2630 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> " 2631 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po); 2632 2633 return (po); 2634 } 2635 2636 /* 2637 * Allocate a pmc descriptor and initialize its fields. 2638 */ 2639 static struct pmc * 2640 pmc_allocate_pmc_descriptor(void) 2641 { 2642 struct pmc *pmc; 2643 2644 pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK | M_ZERO); 2645 pmc->pm_runcount = counter_u64_alloc(M_WAITOK); 2646 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state) * mp_ncpus, 2647 M_PMC, M_WAITOK | M_ZERO); 2648 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc); 2649 2650 return (pmc); 2651 } 2652 2653 /* 2654 * Destroy a pmc descriptor. 2655 */ 2656 static void 2657 pmc_destroy_pmc_descriptor(struct pmc *pm) 2658 { 2659 2660 KASSERT(pm->pm_state == PMC_STATE_DELETED || 2661 pm->pm_state == PMC_STATE_FREE, 2662 ("[pmc,%d] destroying non-deleted PMC", __LINE__)); 2663 KASSERT(LIST_EMPTY(&pm->pm_targets), 2664 ("[pmc,%d] destroying pmc with targets", __LINE__)); 2665 KASSERT(pm->pm_owner == NULL, 2666 ("[pmc,%d] destroying pmc attached to an owner", __LINE__)); 2667 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0, 2668 ("[pmc,%d] pmc has non-zero run count %ju", __LINE__, 2669 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 2670 2671 counter_u64_free(pm->pm_runcount); 2672 free(pm->pm_pcpu_state, M_PMC); 2673 free(pm, M_PMC); 2674 } 2675 2676 static void 2677 pmc_wait_for_pmc_idle(struct pmc *pm) 2678 { 2679 #ifdef INVARIANTS 2680 volatile int maxloop; 2681 2682 maxloop = 100 * pmc_cpu_max(); 2683 #endif 2684 /* 2685 * Loop (with a forced context switch) till the PMC's runcount 2686 * comes down to zero. 2687 */ 2688 pmclog_flush(pm->pm_owner, 1); 2689 while (counter_u64_fetch(pm->pm_runcount) > 0) { 2690 pmclog_flush(pm->pm_owner, 1); 2691 #ifdef INVARIANTS 2692 maxloop--; 2693 KASSERT(maxloop > 0, 2694 ("[pmc,%d] (ri%d, rc%ju) waiting too long for " 2695 "pmc to be free", __LINE__, PMC_TO_ROWINDEX(pm), 2696 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 2697 #endif 2698 pmc_force_context_switch(); 2699 } 2700 } 2701 2702 /* 2703 * This function does the following things: 2704 * 2705 * - detaches the PMC from hardware 2706 * - unlinks all target threads that were attached to it 2707 * - removes the PMC from its owner's list 2708 * - destroys the PMC private mutex 2709 * 2710 * Once this function completes, the given pmc pointer can be freed by 2711 * calling pmc_destroy_pmc_descriptor(). 2712 */ 2713 static void 2714 pmc_release_pmc_descriptor(struct pmc *pm) 2715 { 2716 struct pmc_binding pb; 2717 struct pmc_classdep *pcd; 2718 struct pmc_hw *phw __diagused; 2719 struct pmc_owner *po; 2720 struct pmc_process *pp; 2721 struct pmc_target *ptgt, *tmp; 2722 enum pmc_mode mode; 2723 u_int adjri, ri, cpu; 2724 2725 sx_assert(&pmc_sx, SX_XLOCKED); 2726 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__)); 2727 2728 ri = PMC_TO_ROWINDEX(pm); 2729 pcd = pmc_ri_to_classdep(md, ri, &adjri); 2730 mode = PMC_TO_MODE(pm); 2731 2732 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri, 2733 mode); 2734 2735 /* 2736 * First, we take the PMC off hardware. 2737 */ 2738 cpu = 0; 2739 if (PMC_IS_SYSTEM_MODE(mode)) { 2740 /* 2741 * A system mode PMC runs on a specific CPU. Switch 2742 * to this CPU and turn hardware off. 2743 */ 2744 pmc_save_cpu_binding(&pb); 2745 cpu = PMC_TO_CPU(pm); 2746 pmc_select_cpu(cpu); 2747 2748 /* switch off non-stalled CPUs */ 2749 pm->pm_pcpu_state[cpu].pps_cpustate = 0; 2750 if (pm->pm_state == PMC_STATE_RUNNING && 2751 pm->pm_pcpu_state[cpu].pps_stalled == 0) { 2752 2753 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; 2754 2755 KASSERT(phw->phw_pmc == pm, 2756 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", 2757 __LINE__, ri, phw->phw_pmc, pm)); 2758 PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri); 2759 2760 critical_enter(); 2761 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); 2762 critical_exit(); 2763 } 2764 2765 PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri); 2766 2767 critical_enter(); 2768 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); 2769 critical_exit(); 2770 2771 /* adjust the global and process count of SS mode PMCs */ 2772 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) { 2773 po = pm->pm_owner; 2774 po->po_sscount--; 2775 if (po->po_sscount == 0) { 2776 atomic_subtract_rel_int(&pmc_ss_count, 1); 2777 CK_LIST_REMOVE(po, po_ssnext); 2778 epoch_wait_preempt(global_epoch_preempt); 2779 } 2780 } 2781 pm->pm_state = PMC_STATE_DELETED; 2782 2783 pmc_restore_cpu_binding(&pb); 2784 2785 /* 2786 * We could have references to this PMC structure in the 2787 * per-cpu sample queues. Wait for the queue to drain. 2788 */ 2789 pmc_wait_for_pmc_idle(pm); 2790 2791 } else if (PMC_IS_VIRTUAL_MODE(mode)) { 2792 /* 2793 * A virtual PMC could be running on multiple CPUs at a given 2794 * instant. 2795 * 2796 * By marking its state as DELETED, we ensure that this PMC is 2797 * never further scheduled on hardware. 2798 * 2799 * Then we wait till all CPUs are done with this PMC. 2800 */ 2801 pm->pm_state = PMC_STATE_DELETED; 2802 2803 /* Wait for the PMCs runcount to come to zero. */ 2804 pmc_wait_for_pmc_idle(pm); 2805 2806 /* 2807 * At this point the PMC is off all CPUs and cannot be freshly 2808 * scheduled onto a CPU. It is now safe to unlink all targets 2809 * from this PMC. If a process-record's refcount falls to zero, 2810 * we remove it from the hash table. The module-wide SX lock 2811 * protects us from races. 2812 */ 2813 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { 2814 pp = ptgt->pt_process; 2815 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */ 2816 2817 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt); 2818 2819 /* 2820 * If the target process record shows that no PMCs are 2821 * attached to it, reclaim its space. 2822 */ 2823 if (pp->pp_refcnt == 0) { 2824 pmc_remove_process_descriptor(pp); 2825 pmc_destroy_process_descriptor(pp); 2826 } 2827 } 2828 2829 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ 2830 } 2831 2832 /* 2833 * Release any MD resources. 2834 */ 2835 (void)pcd->pcd_release_pmc(cpu, adjri, pm); 2836 2837 /* 2838 * Update row disposition. 2839 */ 2840 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) 2841 PMC_UNMARK_ROW_STANDALONE(ri); 2842 else 2843 PMC_UNMARK_ROW_THREAD(ri); 2844 2845 /* Unlink from the owner's list. */ 2846 if (pm->pm_owner != NULL) { 2847 LIST_REMOVE(pm, pm_next); 2848 pm->pm_owner = NULL; 2849 } 2850 } 2851 2852 /* 2853 * Register an owner and a pmc. 2854 */ 2855 static int 2856 pmc_register_owner(struct proc *p, struct pmc *pmc) 2857 { 2858 struct pmc_owner *po; 2859 2860 sx_assert(&pmc_sx, SX_XLOCKED); 2861 2862 if ((po = pmc_find_owner_descriptor(p)) == NULL) { 2863 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) 2864 return (ENOMEM); 2865 } 2866 2867 KASSERT(pmc->pm_owner == NULL, 2868 ("[pmc,%d] attempting to own an initialized PMC", __LINE__)); 2869 pmc->pm_owner = po; 2870 2871 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next); 2872 2873 PROC_LOCK(p); 2874 p->p_flag |= P_HWPMC; 2875 PROC_UNLOCK(p); 2876 2877 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) 2878 pmclog_process_pmcallocate(pmc); 2879 2880 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p", 2881 po, pmc); 2882 2883 return (0); 2884 } 2885 2886 /* 2887 * Return the current row disposition: 2888 * == 0 => FREE 2889 * > 0 => PROCESS MODE 2890 * < 0 => SYSTEM MODE 2891 */ 2892 int 2893 pmc_getrowdisp(int ri) 2894 { 2895 return (pmc_pmcdisp[ri]); 2896 } 2897 2898 /* 2899 * Check if a PMC at row index 'ri' can be allocated to the current 2900 * process. 2901 * 2902 * Allocation can fail if: 2903 * - the current process is already being profiled by a PMC at index 'ri', 2904 * attached to it via OP_PMCATTACH. 2905 * - the current process has already allocated a PMC at index 'ri' 2906 * via OP_ALLOCATE. 2907 */ 2908 static bool 2909 pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu) 2910 { 2911 struct pmc *pm; 2912 struct pmc_owner *po; 2913 struct pmc_process *pp; 2914 enum pmc_mode mode; 2915 2916 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d " 2917 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu); 2918 2919 /* 2920 * We shouldn't have already allocated a process-mode PMC at 2921 * row index 'ri'. 2922 * 2923 * We shouldn't have allocated a system-wide PMC on the same 2924 * CPU and same RI. 2925 */ 2926 if ((po = pmc_find_owner_descriptor(p)) != NULL) { 2927 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { 2928 if (PMC_TO_ROWINDEX(pm) == ri) { 2929 mode = PMC_TO_MODE(pm); 2930 if (PMC_IS_VIRTUAL_MODE(mode)) 2931 return (false); 2932 if (PMC_IS_SYSTEM_MODE(mode) && 2933 PMC_TO_CPU(pm) == cpu) 2934 return (false); 2935 } 2936 } 2937 } 2938 2939 /* 2940 * We also shouldn't be the target of any PMC at this index 2941 * since otherwise a PMC_ATTACH to ourselves will fail. 2942 */ 2943 if ((pp = pmc_find_process_descriptor(p, 0)) != NULL) 2944 if (pp->pp_pmcs[ri].pp_pmc != NULL) 2945 return (false); 2946 2947 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok", 2948 p, p->p_pid, p->p_comm, ri); 2949 return (true); 2950 } 2951 2952 /* 2953 * Check if a given PMC at row index 'ri' can be currently used in 2954 * mode 'mode'. 2955 */ 2956 static bool 2957 pmc_can_allocate_row(int ri, enum pmc_mode mode) 2958 { 2959 enum pmc_disp disp; 2960 2961 sx_assert(&pmc_sx, SX_XLOCKED); 2962 2963 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode); 2964 2965 if (PMC_IS_SYSTEM_MODE(mode)) 2966 disp = PMC_DISP_STANDALONE; 2967 else 2968 disp = PMC_DISP_THREAD; 2969 2970 /* 2971 * check disposition for PMC row 'ri': 2972 * 2973 * Expected disposition Row-disposition Result 2974 * 2975 * STANDALONE STANDALONE or FREE proceed 2976 * STANDALONE THREAD fail 2977 * THREAD THREAD or FREE proceed 2978 * THREAD STANDALONE fail 2979 */ 2980 if (!PMC_ROW_DISP_IS_FREE(ri) && 2981 !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) && 2982 !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri))) 2983 return (false); 2984 2985 /* 2986 * All OK 2987 */ 2988 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode); 2989 return (true); 2990 } 2991 2992 /* 2993 * Find a PMC descriptor with user handle 'pmcid' for thread 'td'. 2994 */ 2995 static struct pmc * 2996 pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid) 2997 { 2998 struct pmc *pm; 2999 3000 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc, 3001 ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__, 3002 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc)); 3003 3004 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { 3005 if (pm->pm_id == pmcid) 3006 return (pm); 3007 } 3008 3009 return (NULL); 3010 } 3011 3012 static int 3013 pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc) 3014 { 3015 struct pmc *pm, *opm; 3016 struct pmc_owner *po; 3017 struct pmc_process *pp; 3018 3019 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid); 3020 if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc) 3021 return (EINVAL); 3022 3023 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) { 3024 /* 3025 * In case of PMC_F_DESCENDANTS child processes we will not find 3026 * the current process in the owners hash list. Find the owner 3027 * process first and from there lookup the po. 3028 */ 3029 pp = pmc_find_process_descriptor(curthread->td_proc, 3030 PMC_FLAG_NONE); 3031 if (pp == NULL) 3032 return (ESRCH); 3033 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc; 3034 if (opm == NULL) 3035 return (ESRCH); 3036 if ((opm->pm_flags & 3037 (PMC_F_ATTACHED_TO_OWNER | PMC_F_DESCENDANTS)) != 3038 (PMC_F_ATTACHED_TO_OWNER | PMC_F_DESCENDANTS)) 3039 return (ESRCH); 3040 3041 po = opm->pm_owner; 3042 } 3043 3044 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL) 3045 return (EINVAL); 3046 3047 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm); 3048 3049 *pmc = pm; 3050 return (0); 3051 } 3052 3053 /* 3054 * Start a PMC. 3055 */ 3056 static int 3057 pmc_start(struct pmc *pm) 3058 { 3059 struct pmc_binding pb; 3060 struct pmc_classdep *pcd; 3061 struct pmc_owner *po; 3062 pmc_value_t v; 3063 enum pmc_mode mode; 3064 int adjri, error, cpu, ri; 3065 3066 KASSERT(pm != NULL, 3067 ("[pmc,%d] null pm", __LINE__)); 3068 3069 mode = PMC_TO_MODE(pm); 3070 ri = PMC_TO_ROWINDEX(pm); 3071 pcd = pmc_ri_to_classdep(md, ri, &adjri); 3072 3073 error = 0; 3074 po = pm->pm_owner; 3075 3076 PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri); 3077 3078 po = pm->pm_owner; 3079 3080 /* 3081 * Disallow PMCSTART if a logfile is required but has not been 3082 * configured yet. 3083 */ 3084 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && 3085 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 3086 return (EDOOFUS); /* programming error */ 3087 3088 /* 3089 * If this is a sampling mode PMC, log mapping information for 3090 * the kernel modules that are currently loaded. 3091 */ 3092 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 3093 pmc_log_kernel_mappings(pm); 3094 3095 if (PMC_IS_VIRTUAL_MODE(mode)) { 3096 /* 3097 * If a PMCATTACH has never been done on this PMC, 3098 * attach it to its owner process. 3099 */ 3100 if (LIST_EMPTY(&pm->pm_targets)) { 3101 error = (pm->pm_flags & PMC_F_ATTACH_DONE) != 0 ? 3102 ESRCH : pmc_attach_process(po->po_owner, pm); 3103 } 3104 3105 /* 3106 * If the PMC is attached to its owner, then force a context 3107 * switch to ensure that the MD state gets set correctly. 3108 */ 3109 if (error == 0) { 3110 pm->pm_state = PMC_STATE_RUNNING; 3111 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) != 0) 3112 pmc_force_context_switch(); 3113 } 3114 3115 return (error); 3116 } 3117 3118 /* 3119 * A system-wide PMC. 3120 * 3121 * Add the owner to the global list if this is a system-wide 3122 * sampling PMC. 3123 */ 3124 if (mode == PMC_MODE_SS) { 3125 /* 3126 * Log mapping information for all existing processes in the 3127 * system. Subsequent mappings are logged as they happen; 3128 * see pmc_process_mmap(). 3129 */ 3130 if (po->po_logprocmaps == 0) { 3131 pmc_log_all_process_mappings(po); 3132 po->po_logprocmaps = 1; 3133 } 3134 po->po_sscount++; 3135 if (po->po_sscount == 1) { 3136 atomic_add_rel_int(&pmc_ss_count, 1); 3137 CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext); 3138 PMCDBG1(PMC,OPS,1, "po=%p in global list", po); 3139 } 3140 } 3141 3142 /* 3143 * Move to the CPU associated with this 3144 * PMC, and start the hardware. 3145 */ 3146 pmc_save_cpu_binding(&pb); 3147 cpu = PMC_TO_CPU(pm); 3148 if (!pmc_cpu_is_active(cpu)) 3149 return (ENXIO); 3150 pmc_select_cpu(cpu); 3151 3152 /* 3153 * global PMCs are configured at allocation time 3154 * so write out the initial value and start the PMC. 3155 */ 3156 pm->pm_state = PMC_STATE_RUNNING; 3157 3158 critical_enter(); 3159 v = PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount : 3160 pm->pm_sc.pm_initial; 3161 if ((error = pcd->pcd_write_pmc(cpu, adjri, pm, v)) == 0) { 3162 /* If a sampling mode PMC, reset stalled state. */ 3163 if (PMC_IS_SAMPLING_MODE(mode)) 3164 pm->pm_pcpu_state[cpu].pps_stalled = 0; 3165 3166 /* Indicate that we desire this to run. Start it. */ 3167 pm->pm_pcpu_state[cpu].pps_cpustate = 1; 3168 error = pcd->pcd_start_pmc(cpu, adjri, pm); 3169 } 3170 critical_exit(); 3171 3172 pmc_restore_cpu_binding(&pb); 3173 return (error); 3174 } 3175 3176 /* 3177 * Stop a PMC. 3178 */ 3179 static int 3180 pmc_stop(struct pmc *pm) 3181 { 3182 struct pmc_binding pb; 3183 struct pmc_classdep *pcd; 3184 struct pmc_owner *po; 3185 int adjri, cpu, error, ri; 3186 3187 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__)); 3188 3189 PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, PMC_TO_MODE(pm), 3190 PMC_TO_ROWINDEX(pm)); 3191 3192 pm->pm_state = PMC_STATE_STOPPED; 3193 3194 /* 3195 * If the PMC is a virtual mode one, changing the state to non-RUNNING 3196 * is enough to ensure that the PMC never gets scheduled. 3197 * 3198 * If this PMC is current running on a CPU, then it will handled 3199 * correctly at the time its target process is context switched out. 3200 */ 3201 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) 3202 return (0); 3203 3204 /* 3205 * A system-mode PMC. Move to the CPU associated with this PMC, and 3206 * stop the hardware. We update the 'initial count' so that a 3207 * subsequent PMCSTART will resume counting from the current hardware 3208 * count. 3209 */ 3210 pmc_save_cpu_binding(&pb); 3211 3212 cpu = PMC_TO_CPU(pm); 3213 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 3214 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu)); 3215 if (!pmc_cpu_is_active(cpu)) 3216 return (ENXIO); 3217 3218 pmc_select_cpu(cpu); 3219 3220 ri = PMC_TO_ROWINDEX(pm); 3221 pcd = pmc_ri_to_classdep(md, ri, &adjri); 3222 3223 pm->pm_pcpu_state[cpu].pps_cpustate = 0; 3224 critical_enter(); 3225 if ((error = pcd->pcd_stop_pmc(cpu, adjri, pm)) == 0) { 3226 error = pcd->pcd_read_pmc(cpu, adjri, pm, 3227 &pm->pm_sc.pm_initial); 3228 } 3229 critical_exit(); 3230 3231 pmc_restore_cpu_binding(&pb); 3232 3233 /* Remove this owner from the global list of SS PMC owners. */ 3234 po = pm->pm_owner; 3235 if (PMC_TO_MODE(pm) == PMC_MODE_SS) { 3236 po->po_sscount--; 3237 if (po->po_sscount == 0) { 3238 atomic_subtract_rel_int(&pmc_ss_count, 1); 3239 CK_LIST_REMOVE(po, po_ssnext); 3240 epoch_wait_preempt(global_epoch_preempt); 3241 PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po); 3242 } 3243 } 3244 3245 return (error); 3246 } 3247 3248 static struct pmc_classdep * 3249 pmc_class_to_classdep(enum pmc_class class) 3250 { 3251 int n; 3252 3253 for (n = 0; n < md->pmd_nclass; n++) { 3254 if (md->pmd_classdep[n].pcd_class == class) 3255 return (&md->pmd_classdep[n]); 3256 } 3257 return (NULL); 3258 } 3259 3260 #if defined(HWPMC_DEBUG) && defined(KTR) 3261 static const char *pmc_op_to_name[] = { 3262 #undef __PMC_OP 3263 #define __PMC_OP(N, D) #N , 3264 __PMC_OPS() 3265 NULL 3266 }; 3267 #endif 3268 3269 /* 3270 * The syscall interface 3271 */ 3272 3273 #define PMC_GET_SX_XLOCK(...) do { \ 3274 sx_xlock(&pmc_sx); \ 3275 if (pmc_hook == NULL) { \ 3276 sx_xunlock(&pmc_sx); \ 3277 return __VA_ARGS__; \ 3278 } \ 3279 } while (0) 3280 3281 #define PMC_DOWNGRADE_SX() do { \ 3282 sx_downgrade(&pmc_sx); \ 3283 is_sx_downgraded = true; \ 3284 } while (0) 3285 3286 /* 3287 * Main body of PMC_OP_PMCALLOCATE. 3288 */ 3289 static int 3290 pmc_do_op_pmcallocate(struct thread *td, struct pmc_op_pmcallocate *pa) 3291 { 3292 struct proc *p; 3293 struct pmc *pmc; 3294 struct pmc_binding pb; 3295 struct pmc_classdep *pcd; 3296 struct pmc_hw *phw; 3297 enum pmc_mode mode; 3298 enum pmc_class class; 3299 uint32_t caps; 3300 u_int cpu; 3301 int adjri, n; 3302 int error; 3303 3304 class = pa->pm_class; 3305 caps = pa->pm_caps; 3306 mode = pa->pm_mode; 3307 cpu = pa->pm_cpu; 3308 3309 p = td->td_proc; 3310 3311 /* Requested mode must exist. */ 3312 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC && 3313 mode != PMC_MODE_TS && mode != PMC_MODE_TC)) 3314 return (EINVAL); 3315 3316 /* Requested CPU must be valid. */ 3317 if (cpu != PMC_CPU_ANY && cpu >= pmc_cpu_max()) 3318 return (EINVAL); 3319 3320 /* 3321 * Virtual PMCs should only ask for a default CPU. 3322 * System mode PMCs need to specify a non-default CPU. 3323 */ 3324 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != PMC_CPU_ANY) || 3325 (PMC_IS_SYSTEM_MODE(mode) && cpu == PMC_CPU_ANY)) 3326 return (EINVAL); 3327 3328 /* 3329 * Check that an inactive CPU is not being asked for. 3330 */ 3331 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) 3332 return (ENXIO); 3333 3334 /* 3335 * Refuse an allocation for a system-wide PMC if this process has been 3336 * jailed, or if this process lacks super-user credentials and the 3337 * sysctl tunable 'security.bsd.unprivileged_syspmcs' is zero. 3338 */ 3339 if (PMC_IS_SYSTEM_MODE(mode)) { 3340 if (jailed(td->td_ucred)) 3341 return (EPERM); 3342 if (!pmc_unprivileged_syspmcs) { 3343 error = priv_check(td, PRIV_PMC_SYSTEM); 3344 if (error != 0) 3345 return (error); 3346 } 3347 } 3348 3349 /* 3350 * Look for valid values for 'pm_flags'. 3351 */ 3352 if ((pa->pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW | 3353 PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) != 0) 3354 return (EINVAL); 3355 3356 /* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN. */ 3357 if ((pa->pm_flags & (PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) == 3358 PMC_F_USERCALLCHAIN) 3359 return (EINVAL); 3360 3361 /* PMC_F_USERCALLCHAIN is only valid for sampling mode. */ 3362 if ((pa->pm_flags & PMC_F_USERCALLCHAIN) != 0 && mode != PMC_MODE_TS && 3363 mode != PMC_MODE_SS) 3364 return (EINVAL); 3365 3366 /* Process logging options are not allowed for system PMCs. */ 3367 if (PMC_IS_SYSTEM_MODE(mode) && 3368 (pa->pm_flags & (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT)) != 0) 3369 return (EINVAL); 3370 3371 /* 3372 * All sampling mode PMCs need to be able to interrupt the CPU. 3373 */ 3374 if (PMC_IS_SAMPLING_MODE(mode)) 3375 caps |= PMC_CAP_INTERRUPT; 3376 3377 /* A valid class specifier should have been passed in. */ 3378 pcd = pmc_class_to_classdep(class); 3379 if (pcd == NULL) 3380 return (EINVAL); 3381 3382 /* The requested PMC capabilities should be feasible. */ 3383 if ((pcd->pcd_caps & caps) != caps) 3384 return (EOPNOTSUPP); 3385 3386 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa->pm_ev, 3387 caps, mode, cpu); 3388 3389 pmc = pmc_allocate_pmc_descriptor(); 3390 pmc->pm_id = PMC_ID_MAKE_ID(cpu, pa->pm_mode, class, PMC_ID_INVALID); 3391 pmc->pm_event = pa->pm_ev; 3392 pmc->pm_state = PMC_STATE_FREE; 3393 pmc->pm_caps = caps; 3394 pmc->pm_flags = pa->pm_flags; 3395 3396 /* XXX set lower bound on sampling for process counters */ 3397 if (PMC_IS_SAMPLING_MODE(mode)) { 3398 /* 3399 * Don't permit requested sample rate to be less than 3400 * pmc_mincount. 3401 */ 3402 if (pa->pm_count < MAX(1, pmc_mincount)) 3403 log(LOG_WARNING, "pmcallocate: passed sample " 3404 "rate %ju - setting to %u\n", 3405 (uintmax_t)pa->pm_count, 3406 MAX(1, pmc_mincount)); 3407 pmc->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), 3408 pa->pm_count); 3409 } else 3410 pmc->pm_sc.pm_initial = pa->pm_count; 3411 3412 /* switch thread to CPU 'cpu' */ 3413 pmc_save_cpu_binding(&pb); 3414 3415 #define PMC_IS_SHAREABLE_PMC(cpu, n) \ 3416 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \ 3417 PMC_PHW_FLAG_IS_SHAREABLE) 3418 #define PMC_IS_UNALLOCATED(cpu, n) \ 3419 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL) 3420 3421 if (PMC_IS_SYSTEM_MODE(mode)) { 3422 pmc_select_cpu(cpu); 3423 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { 3424 pcd = pmc_ri_to_classdep(md, n, &adjri); 3425 3426 if (!pmc_can_allocate_row(n, mode) || 3427 !pmc_can_allocate_rowindex(p, n, cpu)) 3428 continue; 3429 if (!PMC_IS_UNALLOCATED(cpu, n) && 3430 !PMC_IS_SHAREABLE_PMC(cpu, n)) 3431 continue; 3432 3433 if (pcd->pcd_allocate_pmc(cpu, adjri, pmc, pa) == 0) { 3434 /* Success. */ 3435 break; 3436 } 3437 } 3438 } else { 3439 /* Process virtual mode */ 3440 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { 3441 pcd = pmc_ri_to_classdep(md, n, &adjri); 3442 3443 if (!pmc_can_allocate_row(n, mode) || 3444 !pmc_can_allocate_rowindex(p, n, PMC_CPU_ANY)) 3445 continue; 3446 3447 if (pcd->pcd_allocate_pmc(td->td_oncpu, adjri, pmc, 3448 pa) == 0) { 3449 /* Success. */ 3450 break; 3451 } 3452 } 3453 } 3454 3455 #undef PMC_IS_UNALLOCATED 3456 #undef PMC_IS_SHAREABLE_PMC 3457 3458 pmc_restore_cpu_binding(&pb); 3459 3460 if (n == md->pmd_npmc) { 3461 pmc_destroy_pmc_descriptor(pmc); 3462 return (EINVAL); 3463 } 3464 3465 /* Fill in the correct value in the ID field. */ 3466 pmc->pm_id = PMC_ID_MAKE_ID(cpu, mode, class, n); 3467 3468 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x", 3469 pmc->pm_event, class, mode, n, pmc->pm_id); 3470 3471 /* Process mode PMCs with logging enabled need log files. */ 3472 if ((pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) != 0) 3473 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; 3474 3475 /* All system mode sampling PMCs require a log file. */ 3476 if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode)) 3477 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; 3478 3479 /* 3480 * Configure global pmc's immediately. 3481 */ 3482 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) { 3483 pmc_save_cpu_binding(&pb); 3484 pmc_select_cpu(cpu); 3485 3486 phw = pmc_pcpu[cpu]->pc_hwpmcs[n]; 3487 pcd = pmc_ri_to_classdep(md, n, &adjri); 3488 3489 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 || 3490 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) { 3491 (void)pcd->pcd_release_pmc(cpu, adjri, pmc); 3492 pmc_destroy_pmc_descriptor(pmc); 3493 pmc_restore_cpu_binding(&pb); 3494 return (EPERM); 3495 } 3496 3497 pmc_restore_cpu_binding(&pb); 3498 } 3499 3500 pmc->pm_state = PMC_STATE_ALLOCATED; 3501 pmc->pm_class = class; 3502 3503 /* 3504 * Mark row disposition. 3505 */ 3506 if (PMC_IS_SYSTEM_MODE(mode)) 3507 PMC_MARK_ROW_STANDALONE(n); 3508 else 3509 PMC_MARK_ROW_THREAD(n); 3510 3511 /* 3512 * Register this PMC with the current thread as its owner. 3513 */ 3514 error = pmc_register_owner(p, pmc); 3515 if (error != 0) { 3516 pmc_release_pmc_descriptor(pmc); 3517 pmc_destroy_pmc_descriptor(pmc); 3518 return (error); 3519 } 3520 3521 /* 3522 * Return the allocated index. 3523 */ 3524 pa->pm_pmcid = pmc->pm_id; 3525 return (0); 3526 } 3527 3528 /* 3529 * Main body of PMC_OP_PMCATTACH. 3530 */ 3531 static int 3532 pmc_do_op_pmcattach(struct thread *td, struct pmc_op_pmcattach a) 3533 { 3534 struct pmc *pm; 3535 struct proc *p; 3536 int error; 3537 3538 sx_assert(&pmc_sx, SX_XLOCKED); 3539 3540 if (a.pm_pid < 0) { 3541 return (EINVAL); 3542 } else if (a.pm_pid == 0) { 3543 a.pm_pid = td->td_proc->p_pid; 3544 } 3545 3546 error = pmc_find_pmc(a.pm_pmc, &pm); 3547 if (error != 0) 3548 return (error); 3549 3550 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) 3551 return (EINVAL); 3552 3553 /* PMCs may be (re)attached only when allocated or stopped */ 3554 if (pm->pm_state == PMC_STATE_RUNNING) { 3555 return (EBUSY); 3556 } else if (pm->pm_state != PMC_STATE_ALLOCATED && 3557 pm->pm_state != PMC_STATE_STOPPED) { 3558 return (EINVAL); 3559 } 3560 3561 /* lookup pid */ 3562 if ((p = pfind(a.pm_pid)) == NULL) 3563 return (ESRCH); 3564 3565 /* 3566 * Ignore processes that are working on exiting. 3567 */ 3568 if ((p->p_flag & P_WEXIT) != 0) { 3569 PROC_UNLOCK(p); /* pfind() returns a locked process */ 3570 return (ESRCH); 3571 } 3572 3573 /* 3574 * We are allowed to attach a PMC to a process if we can debug it. 3575 */ 3576 error = p_candebug(curthread, p); 3577 3578 PROC_UNLOCK(p); 3579 3580 if (error == 0) 3581 error = pmc_attach_process(p, pm); 3582 3583 return (error); 3584 } 3585 3586 /* 3587 * Main body of PMC_OP_PMCDETACH. 3588 */ 3589 static int 3590 pmc_do_op_pmcdetach(struct thread *td, struct pmc_op_pmcattach a) 3591 { 3592 struct pmc *pm; 3593 struct proc *p; 3594 int error; 3595 3596 if (a.pm_pid < 0) { 3597 return (EINVAL); 3598 } else if (a.pm_pid == 0) 3599 a.pm_pid = td->td_proc->p_pid; 3600 3601 error = pmc_find_pmc(a.pm_pmc, &pm); 3602 if (error != 0) 3603 return (error); 3604 3605 if ((p = pfind(a.pm_pid)) == NULL) 3606 return (ESRCH); 3607 3608 /* 3609 * Treat processes that are in the process of exiting as if they were 3610 * not present. 3611 */ 3612 if ((p->p_flag & P_WEXIT) != 0) { 3613 PROC_UNLOCK(p); 3614 return (ESRCH); 3615 } 3616 3617 PROC_UNLOCK(p); /* pfind() returns a locked process */ 3618 3619 if (error == 0) 3620 error = pmc_detach_process(p, pm); 3621 3622 return (error); 3623 } 3624 3625 /* 3626 * Main body of PMC_OP_PMCRELEASE. 3627 */ 3628 static int 3629 pmc_do_op_pmcrelease(pmc_id_t pmcid) 3630 { 3631 struct pmc_owner *po; 3632 struct pmc *pm; 3633 int error; 3634 3635 /* 3636 * Find PMC pointer for the named PMC. 3637 * 3638 * Use pmc_release_pmc_descriptor() to switch off the 3639 * PMC, remove all its target threads, and remove the 3640 * PMC from its owner's list. 3641 * 3642 * Remove the owner record if this is the last PMC 3643 * owned. 3644 * 3645 * Free up space. 3646 */ 3647 error = pmc_find_pmc(pmcid, &pm); 3648 if (error != 0) 3649 return (error); 3650 3651 po = pm->pm_owner; 3652 pmc_release_pmc_descriptor(pm); 3653 pmc_maybe_remove_owner(po); 3654 pmc_destroy_pmc_descriptor(pm); 3655 3656 return (error); 3657 } 3658 3659 /* 3660 * Main body of PMC_OP_PMCRW. 3661 */ 3662 static int 3663 pmc_do_op_pmcrw(const struct pmc_op_pmcrw *prw, pmc_value_t *valp) 3664 { 3665 struct pmc_binding pb; 3666 struct pmc_classdep *pcd; 3667 struct pmc *pm; 3668 u_int cpu, ri, adjri; 3669 int error; 3670 3671 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw->pm_pmcid, prw->pm_flags); 3672 3673 /* Must have at least one flag set. */ 3674 if ((prw->pm_flags & (PMC_F_OLDVALUE | PMC_F_NEWVALUE)) == 0) 3675 return (EINVAL); 3676 3677 /* Locate PMC descriptor. */ 3678 error = pmc_find_pmc(prw->pm_pmcid, &pm); 3679 if (error != 0) 3680 return (error); 3681 3682 /* Can't read a PMC that hasn't been started. */ 3683 if (pm->pm_state != PMC_STATE_ALLOCATED && 3684 pm->pm_state != PMC_STATE_STOPPED && 3685 pm->pm_state != PMC_STATE_RUNNING) 3686 return (EINVAL); 3687 3688 /* Writing a new value is allowed only for 'STOPPED' PMCs. */ 3689 if (pm->pm_state == PMC_STATE_RUNNING && 3690 (prw->pm_flags & PMC_F_NEWVALUE) != 0) 3691 return (EBUSY); 3692 3693 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) { 3694 /* 3695 * If this PMC is attached to its owner (i.e., the process 3696 * requesting this operation) and is running, then attempt to 3697 * get an upto-date reading from hardware for a READ. Writes 3698 * are only allowed when the PMC is stopped, so only update the 3699 * saved value field. 3700 * 3701 * If the PMC is not running, or is not attached to its owner, 3702 * read/write to the savedvalue field. 3703 */ 3704 3705 ri = PMC_TO_ROWINDEX(pm); 3706 pcd = pmc_ri_to_classdep(md, ri, &adjri); 3707 3708 mtx_pool_lock_spin(pmc_mtxpool, pm); 3709 cpu = curthread->td_oncpu; 3710 3711 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) { 3712 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) && 3713 (pm->pm_state == PMC_STATE_RUNNING)) { 3714 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, 3715 valp); 3716 } else { 3717 *valp = pm->pm_gv.pm_savedvalue; 3718 } 3719 } 3720 3721 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) 3722 pm->pm_gv.pm_savedvalue = prw->pm_value; 3723 3724 mtx_pool_unlock_spin(pmc_mtxpool, pm); 3725 } else { /* System mode PMCs */ 3726 cpu = PMC_TO_CPU(pm); 3727 ri = PMC_TO_ROWINDEX(pm); 3728 pcd = pmc_ri_to_classdep(md, ri, &adjri); 3729 3730 if (!pmc_cpu_is_active(cpu)) 3731 return (ENXIO); 3732 3733 /* Move this thread to CPU 'cpu'. */ 3734 pmc_save_cpu_binding(&pb); 3735 pmc_select_cpu(cpu); 3736 critical_enter(); 3737 3738 /* Save old value. */ 3739 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) 3740 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, valp); 3741 3742 /* Write out new value. */ 3743 if (error == 0 && (prw->pm_flags & PMC_F_NEWVALUE) != 0) 3744 error = (*pcd->pcd_write_pmc)(cpu, adjri, pm, 3745 prw->pm_value); 3746 3747 critical_exit(); 3748 pmc_restore_cpu_binding(&pb); 3749 if (error != 0) 3750 return (error); 3751 } 3752 3753 #ifdef HWPMC_DEBUG 3754 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) 3755 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx", 3756 ri, prw->pm_value, *valp); 3757 else 3758 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, *valp); 3759 #endif 3760 return (error); 3761 } 3762 3763 static int 3764 pmc_syscall_handler(struct thread *td, void *syscall_args) 3765 { 3766 struct pmc_syscall_args *c; 3767 void *pmclog_proc_handle; 3768 void *arg; 3769 int error, op; 3770 bool is_sx_downgraded; 3771 3772 c = (struct pmc_syscall_args *)syscall_args; 3773 op = c->pmop_code; 3774 arg = c->pmop_data; 3775 3776 /* PMC isn't set up yet */ 3777 if (pmc_hook == NULL) 3778 return (EINVAL); 3779 3780 if (op == PMC_OP_CONFIGURELOG) { 3781 /* 3782 * We cannot create the logging process inside 3783 * pmclog_configure_log() because there is a LOR 3784 * between pmc_sx and process structure locks. 3785 * Instead, pre-create the process and ignite the loop 3786 * if everything is fine, otherwise direct the process 3787 * to exit. 3788 */ 3789 error = pmclog_proc_create(td, &pmclog_proc_handle); 3790 if (error != 0) 3791 goto done_syscall; 3792 } 3793 3794 PMC_GET_SX_XLOCK(ENOSYS); 3795 is_sx_downgraded = false; 3796 PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op, 3797 pmc_op_to_name[op], arg); 3798 3799 error = 0; 3800 counter_u64_add(pmc_stats.pm_syscalls, 1); 3801 3802 switch (op) { 3803 3804 3805 /* 3806 * Configure a log file. 3807 * 3808 * XXX This OP will be reworked. 3809 */ 3810 3811 case PMC_OP_CONFIGURELOG: 3812 { 3813 struct proc *p; 3814 struct pmc *pm; 3815 struct pmc_owner *po; 3816 struct pmc_op_configurelog cl; 3817 3818 if ((error = copyin(arg, &cl, sizeof(cl))) != 0) { 3819 pmclog_proc_ignite(pmclog_proc_handle, NULL); 3820 break; 3821 } 3822 3823 /* No flags currently implemented */ 3824 if (cl.pm_flags != 0) { 3825 error = EINVAL; 3826 break; 3827 } 3828 3829 /* mark this process as owning a log file */ 3830 p = td->td_proc; 3831 if ((po = pmc_find_owner_descriptor(p)) == NULL) 3832 if ((po = pmc_allocate_owner_descriptor(p)) == NULL) { 3833 pmclog_proc_ignite(pmclog_proc_handle, NULL); 3834 error = ENOMEM; 3835 break; 3836 } 3837 3838 /* 3839 * If a valid fd was passed in, try to configure that, 3840 * otherwise if 'fd' was less than zero and there was 3841 * a log file configured, flush its buffers and 3842 * de-configure it. 3843 */ 3844 if (cl.pm_logfd >= 0) { 3845 error = pmclog_configure_log(md, po, cl.pm_logfd); 3846 pmclog_proc_ignite(pmclog_proc_handle, error == 0 ? 3847 po : NULL); 3848 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) { 3849 pmclog_proc_ignite(pmclog_proc_handle, NULL); 3850 error = pmclog_close(po); 3851 if (error == 0) { 3852 LIST_FOREACH(pm, &po->po_pmcs, pm_next) 3853 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && 3854 pm->pm_state == PMC_STATE_RUNNING) 3855 pmc_stop(pm); 3856 error = pmclog_deconfigure_log(po); 3857 } 3858 } else { 3859 pmclog_proc_ignite(pmclog_proc_handle, NULL); 3860 error = EINVAL; 3861 } 3862 } 3863 break; 3864 3865 /* 3866 * Flush a log file. 3867 */ 3868 3869 case PMC_OP_FLUSHLOG: 3870 { 3871 struct pmc_owner *po; 3872 3873 sx_assert(&pmc_sx, SX_XLOCKED); 3874 3875 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { 3876 error = EINVAL; 3877 break; 3878 } 3879 3880 error = pmclog_flush(po, 0); 3881 } 3882 break; 3883 3884 /* 3885 * Close a log file. 3886 */ 3887 3888 case PMC_OP_CLOSELOG: 3889 { 3890 struct pmc_owner *po; 3891 3892 sx_assert(&pmc_sx, SX_XLOCKED); 3893 3894 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { 3895 error = EINVAL; 3896 break; 3897 } 3898 3899 error = pmclog_close(po); 3900 } 3901 break; 3902 3903 /* 3904 * Retrieve hardware configuration. 3905 */ 3906 3907 case PMC_OP_GETCPUINFO: /* CPU information */ 3908 { 3909 struct pmc_op_getcpuinfo gci; 3910 struct pmc_classinfo *pci; 3911 struct pmc_classdep *pcd; 3912 int cl; 3913 3914 memset(&gci, 0, sizeof(gci)); 3915 gci.pm_cputype = md->pmd_cputype; 3916 gci.pm_ncpu = pmc_cpu_max(); 3917 gci.pm_npmc = md->pmd_npmc; 3918 gci.pm_nclass = md->pmd_nclass; 3919 pci = gci.pm_classes; 3920 pcd = md->pmd_classdep; 3921 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) { 3922 pci->pm_caps = pcd->pcd_caps; 3923 pci->pm_class = pcd->pcd_class; 3924 pci->pm_width = pcd->pcd_width; 3925 pci->pm_num = pcd->pcd_num; 3926 } 3927 error = copyout(&gci, arg, sizeof(gci)); 3928 } 3929 break; 3930 3931 /* 3932 * Retrieve soft events list. 3933 */ 3934 case PMC_OP_GETDYNEVENTINFO: 3935 { 3936 enum pmc_class cl; 3937 enum pmc_event ev; 3938 struct pmc_op_getdyneventinfo *gei; 3939 struct pmc_dyn_event_descr dev; 3940 struct pmc_soft *ps; 3941 uint32_t nevent; 3942 3943 sx_assert(&pmc_sx, SX_LOCKED); 3944 3945 gei = (struct pmc_op_getdyneventinfo *) arg; 3946 3947 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0) 3948 break; 3949 3950 /* Only SOFT class is dynamic. */ 3951 if (cl != PMC_CLASS_SOFT) { 3952 error = EINVAL; 3953 break; 3954 } 3955 3956 nevent = 0; 3957 for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) { 3958 ps = pmc_soft_ev_acquire(ev); 3959 if (ps == NULL) 3960 continue; 3961 bcopy(&ps->ps_ev, &dev, sizeof(dev)); 3962 pmc_soft_ev_release(ps); 3963 3964 error = copyout(&dev, 3965 &gei->pm_events[nevent], 3966 sizeof(struct pmc_dyn_event_descr)); 3967 if (error != 0) 3968 break; 3969 nevent++; 3970 } 3971 if (error != 0) 3972 break; 3973 3974 error = copyout(&nevent, &gei->pm_nevent, 3975 sizeof(nevent)); 3976 } 3977 break; 3978 3979 /* 3980 * Get module statistics 3981 */ 3982 3983 case PMC_OP_GETDRIVERSTATS: 3984 { 3985 struct pmc_op_getdriverstats gms; 3986 #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field) 3987 CFETCH(gms, pmc_stats, pm_intr_ignored); 3988 CFETCH(gms, pmc_stats, pm_intr_processed); 3989 CFETCH(gms, pmc_stats, pm_intr_bufferfull); 3990 CFETCH(gms, pmc_stats, pm_syscalls); 3991 CFETCH(gms, pmc_stats, pm_syscall_errors); 3992 CFETCH(gms, pmc_stats, pm_buffer_requests); 3993 CFETCH(gms, pmc_stats, pm_buffer_requests_failed); 3994 CFETCH(gms, pmc_stats, pm_log_sweeps); 3995 #undef CFETCH 3996 error = copyout(&gms, arg, sizeof(gms)); 3997 } 3998 break; 3999 4000 4001 /* 4002 * Retrieve module version number 4003 */ 4004 4005 case PMC_OP_GETMODULEVERSION: 4006 { 4007 uint32_t cv, modv; 4008 4009 /* retrieve the client's idea of the ABI version */ 4010 if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0) 4011 break; 4012 /* don't service clients newer than our driver */ 4013 modv = PMC_VERSION; 4014 if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) { 4015 error = EPROGMISMATCH; 4016 break; 4017 } 4018 error = copyout(&modv, arg, sizeof(int)); 4019 } 4020 break; 4021 4022 4023 /* 4024 * Retrieve the state of all the PMCs on a given 4025 * CPU. 4026 */ 4027 4028 case PMC_OP_GETPMCINFO: 4029 { 4030 int ari; 4031 struct pmc *pm; 4032 size_t pmcinfo_size; 4033 uint32_t cpu, n, npmc; 4034 struct pmc_owner *po; 4035 struct pmc_binding pb; 4036 struct pmc_classdep *pcd; 4037 struct pmc_info *p, *pmcinfo; 4038 struct pmc_op_getpmcinfo *gpi; 4039 4040 PMC_DOWNGRADE_SX(); 4041 4042 gpi = (struct pmc_op_getpmcinfo *) arg; 4043 4044 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) 4045 break; 4046 4047 if (cpu >= pmc_cpu_max()) { 4048 error = EINVAL; 4049 break; 4050 } 4051 4052 if (!pmc_cpu_is_active(cpu)) { 4053 error = ENXIO; 4054 break; 4055 } 4056 4057 /* switch to CPU 'cpu' */ 4058 pmc_save_cpu_binding(&pb); 4059 pmc_select_cpu(cpu); 4060 4061 npmc = md->pmd_npmc; 4062 4063 pmcinfo_size = npmc * sizeof(struct pmc_info); 4064 pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK | M_ZERO); 4065 4066 p = pmcinfo; 4067 4068 for (n = 0; n < md->pmd_npmc; n++, p++) { 4069 4070 pcd = pmc_ri_to_classdep(md, n, &ari); 4071 4072 KASSERT(pcd != NULL, 4073 ("[pmc,%d] null pcd ri=%d", __LINE__, n)); 4074 4075 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0) 4076 break; 4077 4078 if (PMC_ROW_DISP_IS_STANDALONE(n)) 4079 p->pm_rowdisp = PMC_DISP_STANDALONE; 4080 else if (PMC_ROW_DISP_IS_THREAD(n)) 4081 p->pm_rowdisp = PMC_DISP_THREAD; 4082 else 4083 p->pm_rowdisp = PMC_DISP_FREE; 4084 4085 p->pm_ownerpid = -1; 4086 4087 if (pm == NULL) /* no PMC associated */ 4088 continue; 4089 4090 po = pm->pm_owner; 4091 4092 KASSERT(po->po_owner != NULL, 4093 ("[pmc,%d] pmc_owner had a null proc pointer", 4094 __LINE__)); 4095 4096 p->pm_ownerpid = po->po_owner->p_pid; 4097 p->pm_mode = PMC_TO_MODE(pm); 4098 p->pm_event = pm->pm_event; 4099 p->pm_flags = pm->pm_flags; 4100 4101 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) 4102 p->pm_reloadcount = 4103 pm->pm_sc.pm_reloadcount; 4104 } 4105 4106 pmc_restore_cpu_binding(&pb); 4107 4108 /* now copy out the PMC info collected */ 4109 if (error == 0) 4110 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size); 4111 4112 free(pmcinfo, M_PMC); 4113 } 4114 break; 4115 4116 4117 /* 4118 * Set the administrative state of a PMC. I.e. whether 4119 * the PMC is to be used or not. 4120 */ 4121 4122 case PMC_OP_PMCADMIN: 4123 { 4124 int cpu, ri; 4125 enum pmc_state request; 4126 struct pmc_cpu *pc; 4127 struct pmc_hw *phw; 4128 struct pmc_op_pmcadmin pma; 4129 struct pmc_binding pb; 4130 4131 sx_assert(&pmc_sx, SX_XLOCKED); 4132 4133 KASSERT(td == curthread, 4134 ("[pmc,%d] td != curthread", __LINE__)); 4135 4136 error = priv_check(td, PRIV_PMC_MANAGE); 4137 if (error) 4138 break; 4139 4140 if ((error = copyin(arg, &pma, sizeof(pma))) != 0) 4141 break; 4142 4143 cpu = pma.pm_cpu; 4144 4145 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) { 4146 error = EINVAL; 4147 break; 4148 } 4149 4150 if (!pmc_cpu_is_active(cpu)) { 4151 error = ENXIO; 4152 break; 4153 } 4154 4155 request = pma.pm_state; 4156 4157 if (request != PMC_STATE_DISABLED && 4158 request != PMC_STATE_FREE) { 4159 error = EINVAL; 4160 break; 4161 } 4162 4163 ri = pma.pm_pmc; /* pmc id == row index */ 4164 if (ri < 0 || ri >= (int) md->pmd_npmc) { 4165 error = EINVAL; 4166 break; 4167 } 4168 4169 /* 4170 * We can't disable a PMC with a row-index allocated 4171 * for process virtual PMCs. 4172 */ 4173 4174 if (PMC_ROW_DISP_IS_THREAD(ri) && 4175 request == PMC_STATE_DISABLED) { 4176 error = EBUSY; 4177 break; 4178 } 4179 4180 /* 4181 * otherwise, this PMC on this CPU is either free or 4182 * in system-wide mode. 4183 */ 4184 4185 pmc_save_cpu_binding(&pb); 4186 pmc_select_cpu(cpu); 4187 4188 pc = pmc_pcpu[cpu]; 4189 phw = pc->pc_hwpmcs[ri]; 4190 4191 /* 4192 * XXX do we need some kind of 'forced' disable? 4193 */ 4194 4195 if (phw->phw_pmc == NULL) { 4196 if (request == PMC_STATE_DISABLED && 4197 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) { 4198 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED; 4199 PMC_MARK_ROW_STANDALONE(ri); 4200 } else if (request == PMC_STATE_FREE && 4201 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) { 4202 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; 4203 PMC_UNMARK_ROW_STANDALONE(ri); 4204 } 4205 /* other cases are a no-op */ 4206 } else 4207 error = EBUSY; 4208 4209 pmc_restore_cpu_binding(&pb); 4210 } 4211 break; 4212 4213 4214 /* 4215 * Allocate a PMC. 4216 */ 4217 case PMC_OP_PMCALLOCATE: 4218 { 4219 struct pmc_op_pmcallocate pa; 4220 4221 error = copyin(arg, &pa, sizeof(pa)); 4222 if (error != 0) 4223 break; 4224 4225 error = pmc_do_op_pmcallocate(td, &pa); 4226 if (error != 0) 4227 break; 4228 4229 error = copyout(&pa, arg, sizeof(pa)); 4230 } 4231 break; 4232 4233 /* 4234 * Attach a PMC to a process. 4235 */ 4236 case PMC_OP_PMCATTACH: 4237 { 4238 struct pmc_op_pmcattach a; 4239 4240 error = copyin(arg, &a, sizeof(a)); 4241 if (error != 0) 4242 break; 4243 4244 error = pmc_do_op_pmcattach(td, a); 4245 } 4246 break; 4247 4248 /* 4249 * Detach an attached PMC from a process. 4250 */ 4251 case PMC_OP_PMCDETACH: 4252 { 4253 struct pmc_op_pmcattach a; 4254 4255 error = copyin(arg, &a, sizeof(a)); 4256 if (error != 0) 4257 break; 4258 4259 error = pmc_do_op_pmcdetach(td, a); 4260 } 4261 break; 4262 4263 4264 /* 4265 * Retrieve the MSR number associated with the counter 4266 * 'pmc_id'. This allows processes to directly use RDPMC 4267 * instructions to read their PMCs, without the overhead of a 4268 * system call. 4269 */ 4270 4271 case PMC_OP_PMCGETMSR: 4272 { 4273 int adjri, ri; 4274 struct pmc *pm; 4275 struct pmc_target *pt; 4276 struct pmc_op_getmsr gm; 4277 struct pmc_classdep *pcd; 4278 4279 PMC_DOWNGRADE_SX(); 4280 4281 if ((error = copyin(arg, &gm, sizeof(gm))) != 0) 4282 break; 4283 4284 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0) 4285 break; 4286 4287 /* 4288 * The allocated PMC has to be a process virtual PMC, 4289 * i.e., of type MODE_T[CS]. Global PMCs can only be 4290 * read using the PMCREAD operation since they may be 4291 * allocated on a different CPU than the one we could 4292 * be running on at the time of the RDPMC instruction. 4293 * 4294 * The GETMSR operation is not allowed for PMCs that 4295 * are inherited across processes. 4296 */ 4297 4298 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) || 4299 (pm->pm_flags & PMC_F_DESCENDANTS)) { 4300 error = EINVAL; 4301 break; 4302 } 4303 4304 /* 4305 * It only makes sense to use a RDPMC (or its 4306 * equivalent instruction on non-x86 architectures) on 4307 * a process that has allocated and attached a PMC to 4308 * itself. Conversely the PMC is only allowed to have 4309 * one process attached to it -- its owner. 4310 */ 4311 4312 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL || 4313 LIST_NEXT(pt, pt_next) != NULL || 4314 pt->pt_process->pp_proc != pm->pm_owner->po_owner) { 4315 error = EINVAL; 4316 break; 4317 } 4318 4319 ri = PMC_TO_ROWINDEX(pm); 4320 pcd = pmc_ri_to_classdep(md, ri, &adjri); 4321 4322 /* PMC class has no 'GETMSR' support */ 4323 if (pcd->pcd_get_msr == NULL) { 4324 error = ENOSYS; 4325 break; 4326 } 4327 4328 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0) 4329 break; 4330 4331 if ((error = copyout(&gm, arg, sizeof(gm))) < 0) 4332 break; 4333 4334 /* 4335 * Mark our process as using MSRs. Update machine 4336 * state using a forced context switch. 4337 */ 4338 4339 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS; 4340 pmc_force_context_switch(); 4341 4342 } 4343 break; 4344 4345 /* 4346 * Release an allocated PMC. 4347 */ 4348 case PMC_OP_PMCRELEASE: 4349 { 4350 struct pmc_op_simple sp; 4351 4352 error = copyin(arg, &sp, sizeof(sp)); 4353 if (error != 0) 4354 break; 4355 4356 error = pmc_do_op_pmcrelease(sp.pm_pmcid); 4357 } 4358 break; 4359 4360 /* 4361 * Read and/or write a PMC. 4362 */ 4363 case PMC_OP_PMCRW: 4364 { 4365 struct pmc_op_pmcrw prw; 4366 struct pmc_op_pmcrw *pprw; 4367 pmc_value_t oldvalue; 4368 4369 PMC_DOWNGRADE_SX(); 4370 4371 error = copyin(arg, &prw, sizeof(prw)); 4372 if (error != 0) 4373 break; 4374 4375 error = pmc_do_op_pmcrw(&prw, &oldvalue); 4376 if (error != 0) 4377 break; 4378 4379 /* Return old value if requested. */ 4380 if ((prw.pm_flags & PMC_F_OLDVALUE) != 0) { 4381 pprw = arg; 4382 error = copyout(&oldvalue, &pprw->pm_value, 4383 sizeof(prw.pm_value)); 4384 } 4385 } 4386 break; 4387 4388 4389 /* 4390 * Set the sampling rate for a sampling mode PMC and the 4391 * initial count for a counting mode PMC. 4392 */ 4393 4394 case PMC_OP_PMCSETCOUNT: 4395 { 4396 struct pmc *pm; 4397 struct pmc_op_pmcsetcount sc; 4398 4399 PMC_DOWNGRADE_SX(); 4400 4401 if ((error = copyin(arg, &sc, sizeof(sc))) != 0) 4402 break; 4403 4404 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0) 4405 break; 4406 4407 if (pm->pm_state == PMC_STATE_RUNNING) { 4408 error = EBUSY; 4409 break; 4410 } 4411 4412 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 4413 /* 4414 * Don't permit requested sample rate to be 4415 * less than pmc_mincount. 4416 */ 4417 if (sc.pm_count < MAX(1, pmc_mincount)) 4418 log(LOG_WARNING, "pmcsetcount: passed sample " 4419 "rate %ju - setting to %u\n", 4420 (uintmax_t)sc.pm_count, 4421 MAX(1, pmc_mincount)); 4422 pm->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), 4423 sc.pm_count); 4424 } else 4425 pm->pm_sc.pm_initial = sc.pm_count; 4426 } 4427 break; 4428 4429 4430 /* 4431 * Start a PMC. 4432 */ 4433 4434 case PMC_OP_PMCSTART: 4435 { 4436 pmc_id_t pmcid; 4437 struct pmc *pm; 4438 struct pmc_op_simple sp; 4439 4440 sx_assert(&pmc_sx, SX_XLOCKED); 4441 4442 if ((error = copyin(arg, &sp, sizeof(sp))) != 0) 4443 break; 4444 4445 pmcid = sp.pm_pmcid; 4446 4447 if ((error = pmc_find_pmc(pmcid, &pm)) != 0) 4448 break; 4449 4450 KASSERT(pmcid == pm->pm_id, 4451 ("[pmc,%d] pmcid %x != id %x", __LINE__, 4452 pm->pm_id, pmcid)); 4453 4454 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */ 4455 break; 4456 else if (pm->pm_state != PMC_STATE_STOPPED && 4457 pm->pm_state != PMC_STATE_ALLOCATED) { 4458 error = EINVAL; 4459 break; 4460 } 4461 4462 error = pmc_start(pm); 4463 } 4464 break; 4465 4466 4467 /* 4468 * Stop a PMC. 4469 */ 4470 4471 case PMC_OP_PMCSTOP: 4472 { 4473 pmc_id_t pmcid; 4474 struct pmc *pm; 4475 struct pmc_op_simple sp; 4476 4477 PMC_DOWNGRADE_SX(); 4478 4479 if ((error = copyin(arg, &sp, sizeof(sp))) != 0) 4480 break; 4481 4482 pmcid = sp.pm_pmcid; 4483 4484 /* 4485 * Mark the PMC as inactive and invoke the MD stop 4486 * routines if needed. 4487 */ 4488 4489 if ((error = pmc_find_pmc(pmcid, &pm)) != 0) 4490 break; 4491 4492 KASSERT(pmcid == pm->pm_id, 4493 ("[pmc,%d] pmc id %x != pmcid %x", __LINE__, 4494 pm->pm_id, pmcid)); 4495 4496 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */ 4497 break; 4498 else if (pm->pm_state != PMC_STATE_RUNNING) { 4499 error = EINVAL; 4500 break; 4501 } 4502 4503 error = pmc_stop(pm); 4504 } 4505 break; 4506 4507 4508 /* 4509 * Write a user supplied value to the log file. 4510 */ 4511 4512 case PMC_OP_WRITELOG: 4513 { 4514 struct pmc_op_writelog wl; 4515 struct pmc_owner *po; 4516 4517 PMC_DOWNGRADE_SX(); 4518 4519 if ((error = copyin(arg, &wl, sizeof(wl))) != 0) 4520 break; 4521 4522 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { 4523 error = EINVAL; 4524 break; 4525 } 4526 4527 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { 4528 error = EINVAL; 4529 break; 4530 } 4531 4532 error = pmclog_process_userlog(po, &wl); 4533 } 4534 break; 4535 4536 4537 default: 4538 error = EINVAL; 4539 break; 4540 } 4541 4542 if (is_sx_downgraded) 4543 sx_sunlock(&pmc_sx); 4544 else 4545 sx_xunlock(&pmc_sx); 4546 done_syscall: 4547 if (error) 4548 counter_u64_add(pmc_stats.pm_syscall_errors, 1); 4549 4550 return (error); 4551 } 4552 4553 /* 4554 * Helper functions 4555 */ 4556 4557 /* 4558 * Mark the thread as needing callchain capture and post an AST. The 4559 * actual callchain capture will be done in a context where it is safe 4560 * to take page faults. 4561 */ 4562 static void 4563 pmc_post_callchain_callback(void) 4564 { 4565 struct thread *td; 4566 4567 td = curthread; 4568 4569 /* 4570 * If there is multiple PMCs for the same interrupt ignore new post 4571 */ 4572 if ((td->td_pflags & TDP_CALLCHAIN) != 0) 4573 return; 4574 4575 /* 4576 * Mark this thread as needing callchain capture. 4577 * `td->td_pflags' will be safe to touch because this thread 4578 * was in user space when it was interrupted. 4579 */ 4580 td->td_pflags |= TDP_CALLCHAIN; 4581 4582 /* 4583 * Don't let this thread migrate between CPUs until callchain 4584 * capture completes. 4585 */ 4586 sched_pin(); 4587 4588 return; 4589 } 4590 4591 /* 4592 * Find a free slot in the per-cpu array of samples and capture the 4593 * current callchain there. If a sample was successfully added, a bit 4594 * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook 4595 * needs to be invoked from the clock handler. 4596 * 4597 * This function is meant to be called from an NMI handler. It cannot 4598 * use any of the locking primitives supplied by the OS. 4599 */ 4600 static int 4601 pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf) 4602 { 4603 struct pmc_sample *ps; 4604 struct pmc_samplebuffer *psb; 4605 struct thread *td; 4606 int error, cpu, callchaindepth; 4607 bool inuserspace; 4608 4609 error = 0; 4610 4611 /* 4612 * Allocate space for a sample buffer. 4613 */ 4614 cpu = curcpu; 4615 psb = pmc_pcpu[cpu]->pc_sb[ring]; 4616 inuserspace = TRAPF_USERMODE(tf); 4617 ps = PMC_PROD_SAMPLE(psb); 4618 if (psb->ps_considx != psb->ps_prodidx && 4619 ps->ps_nsamples) { /* in use, reader hasn't caught up */ 4620 pm->pm_pcpu_state[cpu].pps_stalled = 1; 4621 counter_u64_add(pmc_stats.pm_intr_bufferfull, 1); 4622 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", 4623 cpu, pm, tf, inuserspace, 4624 (int)(psb->ps_prodidx & pmc_sample_mask), 4625 (int)(psb->ps_considx & pmc_sample_mask)); 4626 callchaindepth = 1; 4627 error = ENOMEM; 4628 goto done; 4629 } 4630 4631 /* Fill in entry. */ 4632 PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, tf, 4633 inuserspace, (int)(psb->ps_prodidx & pmc_sample_mask), 4634 (int)(psb->ps_considx & pmc_sample_mask)); 4635 4636 td = curthread; 4637 ps->ps_pmc = pm; 4638 ps->ps_td = td; 4639 ps->ps_pid = td->td_proc->p_pid; 4640 ps->ps_tid = td->td_tid; 4641 ps->ps_tsc = pmc_rdtsc(); 4642 ps->ps_ticks = ticks; 4643 ps->ps_cpu = cpu; 4644 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0; 4645 4646 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ? 4647 pmc_callchaindepth : 1; 4648 4649 MPASS(ps->ps_pc != NULL); 4650 if (callchaindepth == 1) { 4651 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf); 4652 } else { 4653 /* 4654 * Kernel stack traversals can be done immediately, while we 4655 * defer to an AST for user space traversals. 4656 */ 4657 if (!inuserspace) { 4658 callchaindepth = pmc_save_kernel_callchain(ps->ps_pc, 4659 callchaindepth, tf); 4660 } else { 4661 pmc_post_callchain_callback(); 4662 callchaindepth = PMC_USER_CALLCHAIN_PENDING; 4663 } 4664 } 4665 4666 ps->ps_nsamples = callchaindepth; /* mark entry as in-use */ 4667 if (ring == PMC_UR) { 4668 ps->ps_nsamples_actual = callchaindepth; 4669 ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING; 4670 } 4671 4672 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, 4673 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, 4674 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 4675 4676 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */ 4677 /* increment write pointer */ 4678 psb->ps_prodidx++; 4679 done: 4680 /* mark CPU as needing processing */ 4681 if (callchaindepth != PMC_USER_CALLCHAIN_PENDING) 4682 DPCPU_SET(pmc_sampled, 1); 4683 4684 return (error); 4685 } 4686 4687 /* 4688 * Interrupt processing. 4689 * 4690 * This function may be called from an NMI handler. It cannot use any of the 4691 * locking primitives supplied by the OS. 4692 */ 4693 int 4694 pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf) 4695 { 4696 struct thread *td; 4697 4698 td = curthread; 4699 if ((pm->pm_flags & PMC_F_USERCALLCHAIN) && 4700 (td->td_proc->p_flag & P_KPROC) == 0 && !TRAPF_USERMODE(tf)) { 4701 atomic_add_int(&td->td_pmcpend, 1); 4702 return (pmc_add_sample(PMC_UR, pm, tf)); 4703 } 4704 return (pmc_add_sample(ring, pm, tf)); 4705 } 4706 4707 /* 4708 * Capture a user call chain. This function will be called from ast() 4709 * before control returns to userland and before the process gets 4710 * rescheduled. 4711 */ 4712 static void 4713 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf) 4714 { 4715 struct pmc *pm; 4716 struct pmc_sample *ps; 4717 struct pmc_samplebuffer *psb; 4718 struct thread *td; 4719 uint64_t considx, prodidx; 4720 int nsamples, nrecords, pass, iter; 4721 int start_ticks __diagused; 4722 4723 psb = pmc_pcpu[cpu]->pc_sb[ring]; 4724 td = curthread; 4725 nrecords = INT_MAX; 4726 pass = 0; 4727 start_ticks = ticks; 4728 4729 KASSERT(td->td_pflags & TDP_CALLCHAIN, 4730 ("[pmc,%d] Retrieving callchain for thread that doesn't want it", 4731 __LINE__)); 4732 restart: 4733 if (ring == PMC_UR) 4734 nrecords = atomic_readandclear_32(&td->td_pmcpend); 4735 4736 for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx; 4737 considx < prodidx && iter < pmc_nsamples; considx++, iter++) { 4738 ps = PMC_CONS_SAMPLE_OFF(psb, considx); 4739 4740 /* 4741 * Iterate through all deferred callchain requests. Walk from 4742 * the current read pointer to the current write pointer. 4743 */ 4744 #ifdef INVARIANTS 4745 if (ps->ps_nsamples == PMC_SAMPLE_FREE) { 4746 continue; 4747 } 4748 #endif 4749 if (ps->ps_td != td || 4750 ps->ps_nsamples != PMC_USER_CALLCHAIN_PENDING || 4751 ps->ps_pmc->pm_state != PMC_STATE_RUNNING) 4752 continue; 4753 4754 KASSERT(ps->ps_cpu == cpu, 4755 ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__, 4756 ps->ps_cpu, PCPU_GET(cpuid))); 4757 4758 pm = ps->ps_pmc; 4759 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN, 4760 ("[pmc,%d] Retrieving callchain for PMC that doesn't " 4761 "want it", __LINE__)); 4762 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 4763 ("[pmc,%d] runcount %ju", __LINE__, 4764 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 4765 4766 if (ring == PMC_UR) { 4767 nsamples = ps->ps_nsamples_actual; 4768 counter_u64_add(pmc_stats.pm_merges, 1); 4769 } else 4770 nsamples = 0; 4771 4772 /* 4773 * Retrieve the callchain and mark the sample buffer 4774 * as 'processable' by the timer tick sweep code. 4775 */ 4776 if (__predict_true(nsamples < pmc_callchaindepth - 1)) 4777 nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples, 4778 pmc_callchaindepth - nsamples - 1, tf); 4779 4780 /* 4781 * We have to prevent hardclock from potentially overwriting 4782 * this sample between when we read the value and when we set 4783 * it. 4784 */ 4785 spinlock_enter(); 4786 4787 /* 4788 * Verify that the sample hasn't been dropped in the meantime. 4789 */ 4790 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { 4791 ps->ps_nsamples = nsamples; 4792 /* 4793 * If we couldn't get a sample, simply drop the 4794 * reference. 4795 */ 4796 if (nsamples == 0) 4797 counter_u64_add(pm->pm_runcount, -1); 4798 } 4799 spinlock_exit(); 4800 if (nrecords-- == 1) 4801 break; 4802 } 4803 if (__predict_false(ring == PMC_UR && td->td_pmcpend)) { 4804 if (pass == 0) { 4805 pass = 1; 4806 goto restart; 4807 } 4808 /* only collect samples for this part once */ 4809 td->td_pmcpend = 0; 4810 } 4811 4812 #ifdef INVARIANTS 4813 if ((ticks - start_ticks) > hz) 4814 log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks)); 4815 #endif 4816 /* mark CPU as needing processing */ 4817 DPCPU_SET(pmc_sampled, 1); 4818 } 4819 4820 /* 4821 * Process saved PC samples. 4822 */ 4823 static void 4824 pmc_process_samples(int cpu, ring_type_t ring) 4825 { 4826 struct pmc *pm; 4827 struct thread *td; 4828 struct pmc_owner *po; 4829 struct pmc_sample *ps; 4830 struct pmc_classdep *pcd; 4831 struct pmc_samplebuffer *psb; 4832 uint64_t delta __diagused; 4833 int adjri, n; 4834 4835 KASSERT(PCPU_GET(cpuid) == cpu, 4836 ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__, 4837 PCPU_GET(cpuid), cpu)); 4838 4839 psb = pmc_pcpu[cpu]->pc_sb[ring]; 4840 delta = psb->ps_prodidx - psb->ps_considx; 4841 MPASS(delta <= pmc_nsamples); 4842 MPASS(psb->ps_considx <= psb->ps_prodidx); 4843 for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) { 4844 ps = PMC_CONS_SAMPLE(psb); 4845 4846 if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE)) 4847 continue; 4848 4849 /* skip non-running samples */ 4850 pm = ps->ps_pmc; 4851 if (pm->pm_state != PMC_STATE_RUNNING) 4852 goto entrydone; 4853 4854 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 4855 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, 4856 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 4857 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), 4858 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__, 4859 pm, PMC_TO_MODE(pm))); 4860 4861 po = pm->pm_owner; 4862 4863 /* If there is a pending AST wait for completion */ 4864 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { 4865 /* 4866 * If we've been waiting more than 1 tick to 4867 * collect a callchain for this record then 4868 * drop it and move on. 4869 */ 4870 if (ticks - ps->ps_ticks > 1) { 4871 /* 4872 * Track how often we hit this as it will 4873 * preferentially lose user samples 4874 * for long running system calls. 4875 */ 4876 counter_u64_add(pmc_stats.pm_overwrites, 1); 4877 goto entrydone; 4878 } 4879 /* Need a rescan at a later time. */ 4880 DPCPU_SET(pmc_sampled, 1); 4881 break; 4882 } 4883 4884 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu, 4885 pm, ps->ps_nsamples, ps->ps_flags, 4886 (int)(psb->ps_prodidx & pmc_sample_mask), 4887 (int)(psb->ps_considx & pmc_sample_mask)); 4888 4889 /* 4890 * If this is a process-mode PMC that is attached to 4891 * its owner, and if the PC is in user mode, update 4892 * profiling statistics like timer-based profiling 4893 * would have done. 4894 * 4895 * Otherwise, this is either a sampling-mode PMC that 4896 * is attached to a different process than its owner, 4897 * or a system-wide sampling PMC. Dispatch a log 4898 * entry to the PMC's owner process. 4899 */ 4900 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) { 4901 if (ps->ps_flags & PMC_CC_F_USERSPACE) { 4902 td = FIRST_THREAD_IN_PROC(po->po_owner); 4903 addupc_intr(td, ps->ps_pc[0], 1); 4904 } 4905 } else 4906 pmclog_process_callchain(pm, ps); 4907 4908 entrydone: 4909 ps->ps_nsamples = 0; /* mark entry as free */ 4910 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 4911 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, 4912 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 4913 4914 counter_u64_add(pm->pm_runcount, -1); 4915 } 4916 4917 counter_u64_add(pmc_stats.pm_log_sweeps, 1); 4918 4919 /* Do not re-enable stalled PMCs if we failed to process any samples */ 4920 if (n == 0) 4921 return; 4922 4923 /* 4924 * Restart any stalled sampling PMCs on this CPU. 4925 * 4926 * If the NMI handler sets the pm_stalled field of a PMC after 4927 * the check below, we'll end up processing the stalled PMC at 4928 * the next hardclock tick. 4929 */ 4930 for (n = 0; n < md->pmd_npmc; n++) { 4931 pcd = pmc_ri_to_classdep(md, n, &adjri); 4932 KASSERT(pcd != NULL, 4933 ("[pmc,%d] null pcd ri=%d", __LINE__, n)); 4934 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); 4935 4936 if (pm == NULL || /* !cfg'ed */ 4937 pm->pm_state != PMC_STATE_RUNNING || /* !active */ 4938 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */ 4939 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */ 4940 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */ 4941 continue; 4942 4943 pm->pm_pcpu_state[cpu].pps_stalled = 0; 4944 (void)(*pcd->pcd_start_pmc)(cpu, adjri, pm); 4945 } 4946 } 4947 4948 /* 4949 * Event handlers. 4950 */ 4951 4952 /* 4953 * Handle a process exit. 4954 * 4955 * Remove this process from all hash tables. If this process 4956 * owned any PMCs, turn off those PMCs and deallocate them, 4957 * removing any associations with target processes. 4958 * 4959 * This function will be called by the last 'thread' of a 4960 * process. 4961 * 4962 * XXX This eventhandler gets called early in the exit process. 4963 * Consider using a 'hook' invocation from thread_exit() or equivalent 4964 * spot. Another negative is that kse_exit doesn't seem to call 4965 * exit1() [??]. 4966 */ 4967 static void 4968 pmc_process_exit(void *arg __unused, struct proc *p) 4969 { 4970 struct pmc *pm; 4971 struct pmc_owner *po; 4972 struct pmc_process *pp; 4973 struct pmc_classdep *pcd; 4974 pmc_value_t newvalue, tmp; 4975 int ri, adjri, cpu; 4976 bool is_using_hwpmcs; 4977 4978 PROC_LOCK(p); 4979 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; 4980 PROC_UNLOCK(p); 4981 4982 /* 4983 * Log a sysexit event to all SS PMC owners. 4984 */ 4985 PMC_EPOCH_ENTER(); 4986 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 4987 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) 4988 pmclog_process_sysexit(po, p->p_pid); 4989 } 4990 PMC_EPOCH_EXIT(); 4991 4992 if (!is_using_hwpmcs) 4993 return; 4994 4995 PMC_GET_SX_XLOCK(); 4996 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid, 4997 p->p_comm); 4998 4999 /* 5000 * Since this code is invoked by the last thread in an exiting process, 5001 * we would have context switched IN at some prior point. However, with 5002 * PREEMPTION, kernel mode context switches may happen any time, so we 5003 * want to disable a context switch OUT till we get any PMCs targeting 5004 * this process off the hardware. 5005 * 5006 * We also need to atomically remove this process' entry from our 5007 * target process hash table, using PMC_FLAG_REMOVE. 5008 */ 5009 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid, 5010 p->p_comm); 5011 5012 critical_enter(); /* no preemption */ 5013 5014 cpu = curthread->td_oncpu; 5015 5016 pp = pmc_find_process_descriptor(p, PMC_FLAG_REMOVE); 5017 if (pp == NULL) { 5018 critical_exit(); 5019 goto out; 5020 } 5021 5022 PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp); 5023 5024 /* 5025 * The exiting process could be the target of some PMCs which will be 5026 * running on currently executing CPU. 5027 * 5028 * We need to turn these PMCs off like we would do at context switch 5029 * OUT time. 5030 */ 5031 for (ri = 0; ri < md->pmd_npmc; ri++) { 5032 /* 5033 * Pick up the pmc pointer from hardware state similar to the 5034 * CSW_OUT code. 5035 */ 5036 pm = NULL; 5037 pcd = pmc_ri_to_classdep(md, ri, &adjri); 5038 5039 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); 5040 5041 PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm); 5042 5043 if (pm == NULL || !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) 5044 continue; 5045 5046 PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p state=%d", ri, 5047 pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state); 5048 5049 KASSERT(PMC_TO_ROWINDEX(pm) == ri, 5050 ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__, 5051 PMC_TO_ROWINDEX(pm), ri)); 5052 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, 5053 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, 5054 pp->pp_pmcs[ri].pp_pmc)); 5055 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 5056 ("[pmc,%d] bad runcount ri %d rc %ju", __LINE__, ri, 5057 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); 5058 5059 /* 5060 * Change desired state, and then stop if not stalled. This 5061 * two-step dance should avoid race conditions where an 5062 * interrupt re-enables the PMC after this code has already 5063 * checked the pm_stalled flag. 5064 */ 5065 if (pm->pm_pcpu_state[cpu].pps_cpustate) { 5066 pm->pm_pcpu_state[cpu].pps_cpustate = 0; 5067 if (!pm->pm_pcpu_state[cpu].pps_stalled) { 5068 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); 5069 5070 if (PMC_TO_MODE(pm) == PMC_MODE_TC) { 5071 pcd->pcd_read_pmc(cpu, adjri, pm, 5072 &newvalue); 5073 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); 5074 5075 mtx_pool_lock_spin(pmc_mtxpool, pm); 5076 pm->pm_gv.pm_savedvalue += tmp; 5077 pp->pp_pmcs[ri].pp_pmcval += tmp; 5078 mtx_pool_unlock_spin(pmc_mtxpool, pm); 5079 } 5080 } 5081 } 5082 5083 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, 5084 ("[pmc,%d] runcount is %d", __LINE__, ri)); 5085 5086 counter_u64_add(pm->pm_runcount, -1); 5087 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); 5088 } 5089 5090 /* 5091 * Inform the MD layer of this pseudo "context switch out". 5092 */ 5093 (void)md->pmd_switch_out(pmc_pcpu[cpu], pp); 5094 5095 critical_exit(); /* ok to be pre-empted now */ 5096 5097 /* 5098 * Unlink this process from the PMCs that are targeting it. This will 5099 * send a signal to all PMC owner's whose PMCs are orphaned. 5100 * 5101 * Log PMC value at exit time if requested. 5102 */ 5103 for (ri = 0; ri < md->pmd_npmc; ri++) { 5104 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { 5105 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && 5106 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) { 5107 pmclog_process_procexit(pm, pp); 5108 } 5109 pmc_unlink_target_process(pm, pp); 5110 } 5111 } 5112 free(pp, M_PMC); 5113 5114 out: 5115 /* 5116 * If the process owned PMCs, free them up and free up memory. 5117 */ 5118 if ((po = pmc_find_owner_descriptor(p)) != NULL) { 5119 pmc_remove_owner(po); 5120 pmc_destroy_owner_descriptor(po); 5121 } 5122 5123 sx_xunlock(&pmc_sx); 5124 } 5125 5126 /* 5127 * Handle a process fork. 5128 * 5129 * If the parent process 'p1' is under HWPMC monitoring, then copy 5130 * over any attached PMCs that have 'do_descendants' semantics. 5131 */ 5132 static void 5133 pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc, 5134 int flags __unused) 5135 { 5136 struct pmc *pm; 5137 struct pmc_owner *po; 5138 struct pmc_process *ppnew, *ppold; 5139 unsigned int ri; 5140 bool is_using_hwpmcs, do_descendants; 5141 5142 PROC_LOCK(p1); 5143 is_using_hwpmcs = (p1->p_flag & P_HWPMC) != 0; 5144 PROC_UNLOCK(p1); 5145 5146 /* 5147 * If there are system-wide sampling PMCs active, we need to 5148 * log all fork events to their owner's logs. 5149 */ 5150 PMC_EPOCH_ENTER(); 5151 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5152 if (po->po_flags & PMC_PO_OWNS_LOGFILE) { 5153 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); 5154 pmclog_process_proccreate(po, newproc, 1); 5155 } 5156 } 5157 PMC_EPOCH_EXIT(); 5158 5159 if (!is_using_hwpmcs) 5160 return; 5161 5162 PMC_GET_SX_XLOCK(); 5163 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1, 5164 p1->p_pid, p1->p_comm, newproc); 5165 5166 /* 5167 * If the parent process (curthread->td_proc) is a 5168 * target of any PMCs, look for PMCs that are to be 5169 * inherited, and link these into the new process 5170 * descriptor. 5171 */ 5172 ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE); 5173 if (ppold == NULL) 5174 goto done; /* nothing to do */ 5175 5176 do_descendants = false; 5177 for (ri = 0; ri < md->pmd_npmc; ri++) { 5178 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && 5179 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { 5180 do_descendants = true; 5181 break; 5182 } 5183 } 5184 if (!do_descendants) /* nothing to do */ 5185 goto done; 5186 5187 /* 5188 * Now mark the new process as being tracked by this driver. 5189 */ 5190 PROC_LOCK(newproc); 5191 newproc->p_flag |= P_HWPMC; 5192 PROC_UNLOCK(newproc); 5193 5194 /* Allocate a descriptor for the new process. */ 5195 ppnew = pmc_find_process_descriptor(newproc, PMC_FLAG_ALLOCATE); 5196 if (ppnew == NULL) 5197 goto done; 5198 5199 /* 5200 * Run through all PMCs that were targeting the old process 5201 * and which specified F_DESCENDANTS and attach them to the 5202 * new process. 5203 * 5204 * Log the fork event to all owners of PMCs attached to this 5205 * process, if not already logged. 5206 */ 5207 for (ri = 0; ri < md->pmd_npmc; ri++) { 5208 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && 5209 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { 5210 pmc_link_target_process(pm, ppnew); 5211 po = pm->pm_owner; 5212 if (po->po_sscount == 0 && 5213 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { 5214 pmclog_process_procfork(po, p1->p_pid, 5215 newproc->p_pid); 5216 } 5217 } 5218 } 5219 5220 done: 5221 sx_xunlock(&pmc_sx); 5222 } 5223 5224 static void 5225 pmc_process_threadcreate(struct thread *td) 5226 { 5227 struct pmc_owner *po; 5228 5229 PMC_EPOCH_ENTER(); 5230 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5231 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) 5232 pmclog_process_threadcreate(po, td, 1); 5233 } 5234 PMC_EPOCH_EXIT(); 5235 } 5236 5237 static void 5238 pmc_process_threadexit(struct thread *td) 5239 { 5240 struct pmc_owner *po; 5241 5242 PMC_EPOCH_ENTER(); 5243 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5244 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) 5245 pmclog_process_threadexit(po, td); 5246 } 5247 PMC_EPOCH_EXIT(); 5248 } 5249 5250 static void 5251 pmc_process_proccreate(struct proc *p) 5252 { 5253 struct pmc_owner *po; 5254 5255 PMC_EPOCH_ENTER(); 5256 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5257 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) 5258 pmclog_process_proccreate(po, p, 1 /* sync */); 5259 } 5260 PMC_EPOCH_EXIT(); 5261 } 5262 5263 static void 5264 pmc_process_allproc(struct pmc *pm) 5265 { 5266 struct pmc_owner *po; 5267 struct thread *td; 5268 struct proc *p; 5269 5270 po = pm->pm_owner; 5271 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) 5272 return; 5273 5274 sx_slock(&allproc_lock); 5275 FOREACH_PROC_IN_SYSTEM(p) { 5276 pmclog_process_proccreate(po, p, 0 /* sync */); 5277 PROC_LOCK(p); 5278 FOREACH_THREAD_IN_PROC(p, td) 5279 pmclog_process_threadcreate(po, td, 0 /* sync */); 5280 PROC_UNLOCK(p); 5281 } 5282 sx_sunlock(&allproc_lock); 5283 pmclog_flush(po, 0); 5284 } 5285 5286 static void 5287 pmc_kld_load(void *arg __unused, linker_file_t lf) 5288 { 5289 struct pmc_owner *po; 5290 5291 /* 5292 * Notify owners of system sampling PMCs about KLD operations. 5293 */ 5294 PMC_EPOCH_ENTER(); 5295 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5296 if (po->po_flags & PMC_PO_OWNS_LOGFILE) 5297 pmclog_process_map_in(po, (pid_t) -1, 5298 (uintfptr_t) lf->address, lf->pathname); 5299 } 5300 PMC_EPOCH_EXIT(); 5301 5302 /* 5303 * TODO: Notify owners of (all) process-sampling PMCs too. 5304 */ 5305 } 5306 5307 static void 5308 pmc_kld_unload(void *arg __unused, const char *filename __unused, 5309 caddr_t address, size_t size) 5310 { 5311 struct pmc_owner *po; 5312 5313 PMC_EPOCH_ENTER(); 5314 CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) { 5315 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { 5316 pmclog_process_map_out(po, (pid_t)-1, 5317 (uintfptr_t)address, (uintfptr_t)address + size); 5318 } 5319 } 5320 PMC_EPOCH_EXIT(); 5321 5322 /* 5323 * TODO: Notify owners of process-sampling PMCs. 5324 */ 5325 } 5326 5327 /* 5328 * initialization 5329 */ 5330 static const char * 5331 pmc_name_of_pmcclass(enum pmc_class class) 5332 { 5333 5334 switch (class) { 5335 #undef __PMC_CLASS 5336 #define __PMC_CLASS(S,V,D) \ 5337 case PMC_CLASS_##S: \ 5338 return #S; 5339 __PMC_CLASSES(); 5340 default: 5341 return ("<unknown>"); 5342 } 5343 } 5344 5345 /* 5346 * Base class initializer: allocate structure and set default classes. 5347 */ 5348 struct pmc_mdep * 5349 pmc_mdep_alloc(int nclasses) 5350 { 5351 struct pmc_mdep *md; 5352 int n; 5353 5354 /* SOFT + md classes */ 5355 n = 1 + nclasses; 5356 md = malloc(sizeof(struct pmc_mdep) + n * sizeof(struct pmc_classdep), 5357 M_PMC, M_WAITOK | M_ZERO); 5358 md->pmd_nclass = n; 5359 5360 /* Default methods */ 5361 md->pmd_switch_in = generic_switch_in; 5362 md->pmd_switch_out = generic_switch_out; 5363 5364 /* Add base class. */ 5365 pmc_soft_initialize(md); 5366 return (md); 5367 } 5368 5369 void 5370 pmc_mdep_free(struct pmc_mdep *md) 5371 { 5372 pmc_soft_finalize(md); 5373 free(md, M_PMC); 5374 } 5375 5376 static int 5377 generic_switch_in(struct pmc_cpu *pc __unused, struct pmc_process *pp __unused) 5378 { 5379 5380 return (0); 5381 } 5382 5383 static int 5384 generic_switch_out(struct pmc_cpu *pc __unused, struct pmc_process *pp __unused) 5385 { 5386 5387 return (0); 5388 } 5389 5390 static struct pmc_mdep * 5391 pmc_generic_cpu_initialize(void) 5392 { 5393 struct pmc_mdep *md; 5394 5395 md = pmc_mdep_alloc(0); 5396 5397 md->pmd_cputype = PMC_CPU_GENERIC; 5398 5399 return (md); 5400 } 5401 5402 static void 5403 pmc_generic_cpu_finalize(struct pmc_mdep *md __unused) 5404 { 5405 5406 } 5407 5408 static int 5409 pmc_initialize(void) 5410 { 5411 struct pcpu *pc; 5412 struct pmc_binding pb; 5413 struct pmc_classdep *pcd; 5414 struct pmc_sample *ps; 5415 struct pmc_samplebuffer *sb; 5416 int c, cpu, error, n, ri; 5417 u_int maxcpu, domain; 5418 5419 md = NULL; 5420 error = 0; 5421 5422 pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK); 5423 pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK); 5424 pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK); 5425 pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK); 5426 pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK); 5427 pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK); 5428 pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK); 5429 pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK); 5430 pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK); 5431 pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK); 5432 5433 #ifdef HWPMC_DEBUG 5434 /* parse debug flags first */ 5435 if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags", 5436 pmc_debugstr, sizeof(pmc_debugstr))) { 5437 pmc_debugflags_parse(pmc_debugstr, pmc_debugstr + 5438 strlen(pmc_debugstr)); 5439 } 5440 #endif 5441 5442 PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION); 5443 5444 /* check kernel version */ 5445 if (pmc_kernel_version != PMC_VERSION) { 5446 if (pmc_kernel_version == 0) 5447 printf("hwpmc: this kernel has not been compiled with " 5448 "'options HWPMC_HOOKS'.\n"); 5449 else 5450 printf("hwpmc: kernel version (0x%x) does not match " 5451 "module version (0x%x).\n", pmc_kernel_version, 5452 PMC_VERSION); 5453 return (EPROGMISMATCH); 5454 } 5455 5456 /* 5457 * check sysctl parameters 5458 */ 5459 if (pmc_hashsize <= 0) { 5460 printf("hwpmc: tunable \"hashsize\"=%d must be " 5461 "greater than zero.\n", pmc_hashsize); 5462 pmc_hashsize = PMC_HASH_SIZE; 5463 } 5464 5465 if (pmc_nsamples <= 0 || pmc_nsamples > 65535) { 5466 printf("hwpmc: tunable \"nsamples\"=%d out of " 5467 "range.\n", pmc_nsamples); 5468 pmc_nsamples = PMC_NSAMPLES; 5469 } 5470 pmc_sample_mask = pmc_nsamples - 1; 5471 5472 if (pmc_callchaindepth <= 0 || 5473 pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) { 5474 printf("hwpmc: tunable \"callchaindepth\"=%d out of " 5475 "range - using %d.\n", pmc_callchaindepth, 5476 PMC_CALLCHAIN_DEPTH_MAX); 5477 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX; 5478 } 5479 5480 md = pmc_md_initialize(); 5481 if (md == NULL) { 5482 /* Default to generic CPU. */ 5483 md = pmc_generic_cpu_initialize(); 5484 if (md == NULL) 5485 return (ENOSYS); 5486 } 5487 5488 /* 5489 * Refresh classes base ri. Optional classes may come in different 5490 * order. 5491 */ 5492 for (ri = c = 0; c < md->pmd_nclass; c++) { 5493 pcd = &md->pmd_classdep[c]; 5494 pcd->pcd_ri = ri; 5495 ri += pcd->pcd_num; 5496 } 5497 5498 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1, 5499 ("[pmc,%d] no classes or pmcs", __LINE__)); 5500 5501 /* Compute the map from row-indices to classdep pointers. */ 5502 pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) * 5503 md->pmd_npmc, M_PMC, M_WAITOK | M_ZERO); 5504 5505 for (n = 0; n < md->pmd_npmc; n++) 5506 pmc_rowindex_to_classdep[n] = NULL; 5507 5508 for (ri = c = 0; c < md->pmd_nclass; c++) { 5509 pcd = &md->pmd_classdep[c]; 5510 for (n = 0; n < pcd->pcd_num; n++, ri++) 5511 pmc_rowindex_to_classdep[ri] = pcd; 5512 } 5513 5514 KASSERT(ri == md->pmd_npmc, 5515 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__, 5516 ri, md->pmd_npmc)); 5517 5518 maxcpu = pmc_cpu_max(); 5519 5520 /* allocate space for the per-cpu array */ 5521 pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC, 5522 M_WAITOK | M_ZERO); 5523 5524 /* per-cpu 'saved values' for managing process-mode PMCs */ 5525 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, 5526 M_PMC, M_WAITOK); 5527 5528 /* Perform CPU-dependent initialization. */ 5529 pmc_save_cpu_binding(&pb); 5530 error = 0; 5531 for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) { 5532 if (!pmc_cpu_is_active(cpu)) 5533 continue; 5534 pmc_select_cpu(cpu); 5535 pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) + 5536 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC, 5537 M_WAITOK | M_ZERO); 5538 for (n = 0; error == 0 && n < md->pmd_nclass; n++) 5539 if (md->pmd_classdep[n].pcd_num > 0) 5540 error = md->pmd_classdep[n].pcd_pcpu_init(md, 5541 cpu); 5542 } 5543 pmc_restore_cpu_binding(&pb); 5544 5545 if (error != 0) 5546 return (error); 5547 5548 /* allocate space for the sample array */ 5549 for (cpu = 0; cpu < maxcpu; cpu++) { 5550 if (!pmc_cpu_is_active(cpu)) 5551 continue; 5552 pc = pcpu_find(cpu); 5553 domain = pc->pc_domain; 5554 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + 5555 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, 5556 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5557 5558 KASSERT(pmc_pcpu[cpu] != NULL, 5559 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); 5560 5561 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * 5562 pmc_nsamples * sizeof(uintptr_t), M_PMC, 5563 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5564 5565 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) 5566 ps->ps_pc = sb->ps_callchains + 5567 (n * pmc_callchaindepth); 5568 5569 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; 5570 5571 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + 5572 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, 5573 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5574 5575 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * 5576 pmc_nsamples * sizeof(uintptr_t), M_PMC, 5577 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5578 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) 5579 ps->ps_pc = sb->ps_callchains + 5580 (n * pmc_callchaindepth); 5581 5582 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; 5583 5584 sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + 5585 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, 5586 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5587 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * 5588 pmc_nsamples * sizeof(uintptr_t), M_PMC, 5589 DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); 5590 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) 5591 ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth; 5592 5593 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb; 5594 } 5595 5596 /* allocate space for the row disposition array */ 5597 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc, 5598 M_PMC, M_WAITOK | M_ZERO); 5599 5600 /* mark all PMCs as available */ 5601 for (n = 0; n < md->pmd_npmc; n++) 5602 PMC_MARK_ROW_FREE(n); 5603 5604 /* allocate thread hash tables */ 5605 pmc_ownerhash = hashinit(pmc_hashsize, M_PMC, 5606 &pmc_ownerhashmask); 5607 5608 pmc_processhash = hashinit(pmc_hashsize, M_PMC, 5609 &pmc_processhashmask); 5610 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf", 5611 MTX_SPIN); 5612 5613 CK_LIST_INIT(&pmc_ss_owners); 5614 pmc_ss_count = 0; 5615 5616 /* allocate a pool of spin mutexes */ 5617 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, 5618 MTX_SPIN); 5619 5620 PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx " 5621 "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask, 5622 pmc_processhash, pmc_processhashmask); 5623 5624 /* Initialize a spin mutex for the thread free list. */ 5625 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf", 5626 MTX_SPIN); 5627 5628 /* Initialize the task to prune the thread free list. */ 5629 TASK_INIT(&free_task, 0, pmc_thread_descriptor_pool_free_task, NULL); 5630 5631 /* register process {exit,fork,exec} handlers */ 5632 pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit, 5633 pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY); 5634 pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork, 5635 pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY); 5636 5637 /* register kld event handlers */ 5638 pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load, 5639 NULL, EVENTHANDLER_PRI_ANY); 5640 pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload, 5641 NULL, EVENTHANDLER_PRI_ANY); 5642 5643 /* initialize logging */ 5644 pmclog_initialize(); 5645 5646 /* set hook functions */ 5647 pmc_intr = md->pmd_intr; 5648 wmb(); 5649 pmc_hook = pmc_hook_handler; 5650 5651 if (error == 0) { 5652 printf(PMC_MODULE_NAME ":"); 5653 for (n = 0; n < md->pmd_nclass; n++) { 5654 if (md->pmd_classdep[n].pcd_num == 0) 5655 continue; 5656 pcd = &md->pmd_classdep[n]; 5657 printf(" %s/%d/%d/0x%b", 5658 pmc_name_of_pmcclass(pcd->pcd_class), 5659 pcd->pcd_num, 5660 pcd->pcd_width, 5661 pcd->pcd_caps, 5662 "\20" 5663 "\1INT\2USR\3SYS\4EDG\5THR" 5664 "\6REA\7WRI\10INV\11QUA\12PRC" 5665 "\13TAG\14CSC"); 5666 } 5667 printf("\n"); 5668 } 5669 5670 return (error); 5671 } 5672 5673 /* prepare to be unloaded */ 5674 static void 5675 pmc_cleanup(void) 5676 { 5677 struct pmc_binding pb; 5678 struct pmc_owner *po, *tmp; 5679 struct pmc_ownerhash *ph; 5680 struct pmc_processhash *prh __pmcdbg_used; 5681 u_int maxcpu; 5682 int cpu, c; 5683 5684 PMCDBG0(MOD,INI,0, "cleanup"); 5685 5686 /* switch off sampling */ 5687 CPU_FOREACH(cpu) 5688 DPCPU_ID_SET(cpu, pmc_sampled, 0); 5689 pmc_intr = NULL; 5690 5691 sx_xlock(&pmc_sx); 5692 if (pmc_hook == NULL) { /* being unloaded already */ 5693 sx_xunlock(&pmc_sx); 5694 return; 5695 } 5696 5697 pmc_hook = NULL; /* prevent new threads from entering module */ 5698 5699 /* deregister event handlers */ 5700 EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag); 5701 EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag); 5702 EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag); 5703 EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag); 5704 5705 /* send SIGBUS to all owner threads, free up allocations */ 5706 if (pmc_ownerhash != NULL) { 5707 for (ph = pmc_ownerhash; 5708 ph <= &pmc_ownerhash[pmc_ownerhashmask]; 5709 ph++) { 5710 LIST_FOREACH_SAFE(po, ph, po_next, tmp) { 5711 pmc_remove_owner(po); 5712 5713 PMCDBG3(MOD,INI,2, 5714 "cleanup signal proc=%p (%d, %s)", 5715 po->po_owner, po->po_owner->p_pid, 5716 po->po_owner->p_comm); 5717 5718 PROC_LOCK(po->po_owner); 5719 kern_psignal(po->po_owner, SIGBUS); 5720 PROC_UNLOCK(po->po_owner); 5721 5722 pmc_destroy_owner_descriptor(po); 5723 } 5724 } 5725 } 5726 5727 /* reclaim allocated data structures */ 5728 taskqueue_drain(taskqueue_fast, &free_task); 5729 mtx_destroy(&pmc_threadfreelist_mtx); 5730 pmc_thread_descriptor_pool_drain(); 5731 5732 if (pmc_mtxpool != NULL) 5733 mtx_pool_destroy(&pmc_mtxpool); 5734 5735 mtx_destroy(&pmc_processhash_mtx); 5736 if (pmc_processhash != NULL) { 5737 #ifdef HWPMC_DEBUG 5738 struct pmc_process *pp; 5739 5740 PMCDBG0(MOD,INI,3, "destroy process hash"); 5741 for (prh = pmc_processhash; 5742 prh <= &pmc_processhash[pmc_processhashmask]; 5743 prh++) 5744 LIST_FOREACH(pp, prh, pp_next) 5745 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid); 5746 #endif 5747 5748 hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask); 5749 pmc_processhash = NULL; 5750 } 5751 5752 if (pmc_ownerhash != NULL) { 5753 PMCDBG0(MOD,INI,3, "destroy owner hash"); 5754 hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask); 5755 pmc_ownerhash = NULL; 5756 } 5757 5758 KASSERT(CK_LIST_EMPTY(&pmc_ss_owners), 5759 ("[pmc,%d] Global SS owner list not empty", __LINE__)); 5760 KASSERT(pmc_ss_count == 0, 5761 ("[pmc,%d] Global SS count not empty", __LINE__)); 5762 5763 /* do processor and pmc-class dependent cleanup */ 5764 maxcpu = pmc_cpu_max(); 5765 5766 PMCDBG0(MOD,INI,3, "md cleanup"); 5767 if (md) { 5768 pmc_save_cpu_binding(&pb); 5769 for (cpu = 0; cpu < maxcpu; cpu++) { 5770 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", 5771 cpu, pmc_pcpu[cpu]); 5772 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL) 5773 continue; 5774 5775 pmc_select_cpu(cpu); 5776 for (c = 0; c < md->pmd_nclass; c++) { 5777 if (md->pmd_classdep[c].pcd_num > 0) { 5778 md->pmd_classdep[c].pcd_pcpu_fini(md, 5779 cpu); 5780 } 5781 } 5782 } 5783 5784 if (md->pmd_cputype == PMC_CPU_GENERIC) 5785 pmc_generic_cpu_finalize(md); 5786 else 5787 pmc_md_finalize(md); 5788 5789 pmc_mdep_free(md); 5790 md = NULL; 5791 pmc_restore_cpu_binding(&pb); 5792 } 5793 5794 /* Free per-cpu descriptors. */ 5795 for (cpu = 0; cpu < maxcpu; cpu++) { 5796 if (!pmc_cpu_is_active(cpu)) 5797 continue; 5798 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL, 5799 ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__, 5800 cpu)); 5801 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL, 5802 ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__, 5803 cpu)); 5804 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL, 5805 ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__, 5806 cpu)); 5807 free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC); 5808 free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC); 5809 free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC); 5810 free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC); 5811 free(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC); 5812 free(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC); 5813 free(pmc_pcpu[cpu], M_PMC); 5814 } 5815 5816 free(pmc_pcpu, M_PMC); 5817 pmc_pcpu = NULL; 5818 5819 free(pmc_pcpu_saved, M_PMC); 5820 pmc_pcpu_saved = NULL; 5821 5822 if (pmc_pmcdisp != NULL) { 5823 free(pmc_pmcdisp, M_PMC); 5824 pmc_pmcdisp = NULL; 5825 } 5826 5827 if (pmc_rowindex_to_classdep != NULL) { 5828 free(pmc_rowindex_to_classdep, M_PMC); 5829 pmc_rowindex_to_classdep = NULL; 5830 } 5831 5832 pmclog_shutdown(); 5833 counter_u64_free(pmc_stats.pm_intr_ignored); 5834 counter_u64_free(pmc_stats.pm_intr_processed); 5835 counter_u64_free(pmc_stats.pm_intr_bufferfull); 5836 counter_u64_free(pmc_stats.pm_syscalls); 5837 counter_u64_free(pmc_stats.pm_syscall_errors); 5838 counter_u64_free(pmc_stats.pm_buffer_requests); 5839 counter_u64_free(pmc_stats.pm_buffer_requests_failed); 5840 counter_u64_free(pmc_stats.pm_log_sweeps); 5841 counter_u64_free(pmc_stats.pm_merges); 5842 counter_u64_free(pmc_stats.pm_overwrites); 5843 sx_xunlock(&pmc_sx); /* we are done */ 5844 } 5845 5846 /* 5847 * The function called at load/unload. 5848 */ 5849 static int 5850 load(struct module *module __unused, int cmd, void *arg __unused) 5851 { 5852 int error; 5853 5854 error = 0; 5855 5856 switch (cmd) { 5857 case MOD_LOAD: 5858 /* initialize the subsystem */ 5859 error = pmc_initialize(); 5860 if (error != 0) 5861 break; 5862 PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d", pmc_syscall_num, 5863 pmc_cpu_max()); 5864 break; 5865 case MOD_UNLOAD: 5866 case MOD_SHUTDOWN: 5867 pmc_cleanup(); 5868 PMCDBG0(MOD,INI,1, "unloaded"); 5869 break; 5870 default: 5871 error = EINVAL; 5872 break; 5873 } 5874 5875 return (error); 5876 } 5877