Lines Matching +full:re +full:- +full:sampling
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2003-2008 Joseph Koshy
101 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
104 #define PMC_PCPU_SAVED(C, R) pmc_pcpu_saved[(R) + md->pmd_npmc * (C)]
120 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
121 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
138 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
168 * List of PMC owners with system-wide sampling PMCs.
180 (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
333 * kern.hwpmc.hashsize -- determines the number of rows in the
342 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
349 static uint64_t pmc_sample_mask = PMC_NSAMPLES - 1;
352 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
360 * kern.hwpmc.threadfreelist_entries -- number of free entries
367 * kern.hwpmc.threadfreelist_max -- maximum number of free entries
375 * kern.hwpmc.mincount -- minimum sample count
380 "minimum count for sampling counters");
383 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
384 * allocate system-wide PMCs.
387 * if system-wide measurements need to be taken concurrently with other
388 * per-process measurements. This feature is turned off by default.
398 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
474 kwlen = p - q; in pmc_debugflags_parse()
479 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ in pmc_debugflags_parse()
480 newbits = &tmpflags->pdb_ ## F; in pmc_debugflags_parse()
490 DBG_SET_FLAG_MAJ("sampling", SAM); in pmc_debugflags_parse()
509 if ((kwlen = p - q) == 0) { in pmc_debugflags_parse()
516 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ in pmc_debugflags_parse()
592 if (error == 0 && req->newptr != NULL) { in pmc_debugflags_sysctl_handler()
593 fence = newstr + (n < req->newlen ? n : req->newlen + 1); in pmc_debugflags_sysctl_handler()
613 KASSERT(ri >= 0 && ri < md->pmd_npmc, in pmc_ri_to_classdep()
614 ("[pmc,%d] illegal row-index %d", __LINE__, ri)); in pmc_ri_to_classdep()
620 *adjri = ri - pcd->pcd_ri; in pmc_ri_to_classdep()
621 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num, in pmc_ri_to_classdep()
622 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri)); in pmc_ri_to_classdep()
632 * - target process descriptors, one per target process
633 * - owner process descriptors (and attached lists), one per owner process
634 * - lookup hash tables for owner and target processes
635 * - PMC descriptors (and attached lists)
636 * - per-cpu hardware state
637 * - the 'hook' variable through which the kernel calls into
639 * - the machine hardware state (managed by the MD layer)
643 * - thread context-switch code
644 * - interrupt handlers (possibly on multiple cpus)
645 * - kernel threads on multiple cpus running on behalf of user
647 * - this driver's private kernel threads
653 * - The global SX lock "pmc_sx" is used to protect internal
669 * pmc_sx lock we first check that 'pmc_hook' is non-null before
673 * - Lookups of target process structures and owner process structures
677 * with their own private spin-mutexes, "pmc_processhash_mtx" and
680 * - Interrupt handlers work in a lock free manner. At interrupt
681 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
693 * - Context-switch handling with process-private PMCs needs more
702 * - each target process structure 'pmc_process' has an array
705 * - At context switch IN time, each "target" PMC in RUNNING state
707 * the per-cpu phw array. The 'runcount' for the PMC is
710 * - At context switch OUT time, all process-virtual PMCs are stopped
712 * only if the PMC is in a non-deleted state (the PMCs state could
715 * Note that since in-between a switch IN on a processor and a switch
721 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
726 * state 'DELETED'. If the runcount of the PMC is non-zero then
732 * a spin-mutex. In order to save space, we use a mutex pool.
735 * - Type "pmc-sx", used by the global SX lock.
736 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
737 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
738 * - Type "pmc-leaf", used for all other spin mutexes.
747 PMCDBG0(CPU,BND,2, "save-cpu"); in pmc_save_cpu_binding()
749 pb->pb_bound = sched_is_bound(curthread); in pmc_save_cpu_binding()
750 pb->pb_cpu = curthread->td_oncpu; in pmc_save_cpu_binding()
751 pb->pb_priority = curthread->td_priority; in pmc_save_cpu_binding()
753 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu); in pmc_save_cpu_binding()
762 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d", in pmc_restore_cpu_binding()
763 curthread->td_oncpu, pb->pb_cpu); in pmc_restore_cpu_binding()
765 sched_bind(curthread, pb->pb_cpu); in pmc_restore_cpu_binding()
766 if (!pb->pb_bound) in pmc_restore_cpu_binding()
768 sched_prio(curthread, pb->pb_priority); in pmc_restore_cpu_binding()
770 PMCDBG0(CPU,BND,2, "restore-cpu done"); in pmc_restore_cpu_binding()
786 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu); in pmc_select_cpu()
792 KASSERT(curthread->td_oncpu == cpu, in pmc_select_cpu()
794 cpu, curthread->td_oncpu)); in pmc_select_cpu()
796 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu); in pmc_select_cpu()
802 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
848 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po); in pmc_remove_owner()
854 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) { in pmc_remove_owner()
856 KASSERT(pm->pm_owner == po, in pmc_remove_owner()
857 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po)); in pmc_remove_owner()
863 KASSERT(po->po_sscount == 0, in pmc_remove_owner()
865 KASSERT(LIST_EMPTY(&po->po_pmcs), in pmc_remove_owner()
868 /* de-configure the log file if present */ in pmc_remove_owner()
869 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_remove_owner()
880 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po); in pmc_maybe_remove_owner()
884 * - this process does not own any PMCs in pmc_maybe_remove_owner()
885 * - this process has not allocated a system-wide sampling buffer in pmc_maybe_remove_owner()
887 if (LIST_EMPTY(&po->po_pmcs) && in pmc_maybe_remove_owner()
888 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) { in pmc_maybe_remove_owner()
908 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", in pmc_link_target_process()
909 __LINE__, pm, pp->pp_proc->p_pid)); in pmc_link_target_process()
910 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1), in pmc_link_target_process()
912 __LINE__, pp->pp_refcnt, (void *) pp)); in pmc_link_target_process()
916 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p", in pmc_link_target_process()
920 LIST_FOREACH(pt, &pm->pm_targets, pt_next) { in pmc_link_target_process()
921 if (pt->pt_process == pp) in pmc_link_target_process()
927 pt->pt_process = pp; in pmc_link_target_process()
929 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next); in pmc_link_target_process()
931 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc, in pmc_link_target_process()
934 if (pm->pm_owner->po_owner == pp->pp_proc) in pmc_link_target_process()
935 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER; in pmc_link_target_process()
938 * Initialize the per-process values at this row index. in pmc_link_target_process()
940 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? in pmc_link_target_process()
941 pm->pm_sc.pm_reloadcount : 0; in pmc_link_target_process()
942 pp->pp_refcnt++; in pmc_link_target_process()
945 /* Confirm that the per-thread values at this row index are cleared. */ in pmc_link_target_process()
947 mtx_lock_spin(pp->pp_tdslock); in pmc_link_target_process()
948 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) { in pmc_link_target_process()
949 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0, in pmc_link_target_process()
951 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri)); in pmc_link_target_process()
953 mtx_unlock_spin(pp->pp_tdslock); in pmc_link_target_process()
974 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, in pmc_unlink_target_process()
976 __LINE__, pp->pp_refcnt, (void *) pp)); in pmc_unlink_target_process()
980 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", in pmc_unlink_target_process()
983 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, in pmc_unlink_target_process()
984 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, in pmc_unlink_target_process()
985 ri, pm, pp->pp_pmcs[ri].pp_pmc)); in pmc_unlink_target_process()
987 pp->pp_pmcs[ri].pp_pmc = NULL; in pmc_unlink_target_process()
988 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t)0; in pmc_unlink_target_process()
990 /* Clear the per-thread values at this row index. */ in pmc_unlink_target_process()
992 mtx_lock_spin(pp->pp_tdslock); in pmc_unlink_target_process()
993 LIST_FOREACH(pt, &pp->pp_tds, pt_next) in pmc_unlink_target_process()
994 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t)0; in pmc_unlink_target_process()
995 mtx_unlock_spin(pp->pp_tdslock); in pmc_unlink_target_process()
998 /* Remove owner-specific flags */ in pmc_unlink_target_process()
999 if (pm->pm_owner->po_owner == pp->pp_proc) { in pmc_unlink_target_process()
1000 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; in pmc_unlink_target_process()
1001 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; in pmc_unlink_target_process()
1004 pp->pp_refcnt--; in pmc_unlink_target_process()
1007 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next) in pmc_unlink_target_process()
1008 if (ptgt->pt_process == pp) in pmc_unlink_target_process()
1012 "in pmc %p", __LINE__, pp->pp_proc, pp, pm)); in pmc_unlink_target_process()
1018 if (LIST_EMPTY(&pm->pm_targets)) { in pmc_unlink_target_process()
1019 p = pm->pm_owner->po_owner; in pmc_unlink_target_process()
1043 if ((o = pm->pm_owner->po_owner) == t) in pmc_can_attach()
1047 oc = o->p_ucred; in pmc_can_attach()
1052 tc = t->p_ucred; in pmc_can_attach()
1061 decline_attach = oc->cr_uid != tc->cr_uid && in pmc_can_attach()
1062 oc->cr_uid != tc->cr_svuid && in pmc_can_attach()
1063 oc->cr_uid != tc->cr_ruid; in pmc_can_attach()
1069 for (int i = 0; !decline_attach && i < tc->cr_ngroups; i++) in pmc_can_attach()
1070 decline_attach = !groupmember(tc->cr_groups[i], oc); in pmc_can_attach()
1072 decline_attach = !groupmember(tc->cr_gid, oc) || in pmc_can_attach()
1073 !groupmember(tc->cr_rgid, oc) || in pmc_can_attach()
1074 !groupmember(tc->cr_svgid, oc); in pmc_can_attach()
1094 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm, in pmc_attach_one_process()
1095 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_attach_one_process()
1111 p->p_flag |= P_HWPMC; in pmc_attach_one_process()
1119 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */ in pmc_attach_one_process()
1124 if (pp->pp_pmcs[ri].pp_pmc != NULL) { in pmc_attach_one_process()
1132 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0) in pmc_attach_one_process()
1133 pm->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_attach_one_process()
1135 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */ in pmc_attach_one_process()
1138 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_attach_one_process()
1139 if (p->p_flag & P_KPROC) { in pmc_attach_one_process()
1143 pmc_getfilename(p->p_textvp, &fullpath, &freepath); in pmc_attach_one_process()
1144 pmclog_process_pmcattach(pm, p->p_pid, fullpath); in pmc_attach_one_process()
1148 pmc_log_process_mappings(pm->pm_owner, p); in pmc_attach_one_process()
1154 p->p_flag &= ~P_HWPMC; in pmc_attach_one_process()
1171 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_attach_process()
1177 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0) in pmc_attach_process()
1180 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) in pmc_attach_process()
1193 if (!LIST_EMPTY(&p->p_children)) in pmc_attach_process()
1194 p = LIST_FIRST(&p->p_children); in pmc_attach_process()
1202 p = p->p_pptr; in pmc_attach_process()
1232 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x", in pmc_detach_one_process()
1233 pm, ri, p, p->p_pid, p->p_comm, flags); in pmc_detach_one_process()
1238 if (pp->pp_pmcs[ri].pp_pmc != pm) in pmc_detach_one_process()
1244 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_detach_one_process()
1245 pmclog_process_pmcdetach(pm, p->p_pid); in pmc_detach_one_process()
1252 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, in pmc_detach_one_process()
1254 __LINE__, pp->pp_refcnt, pp)); in pmc_detach_one_process()
1256 if (pp->pp_refcnt != 0) /* still a target of some PMC */ in pmc_detach_one_process()
1265 p->p_flag &= ~P_HWPMC; in pmc_detach_one_process()
1282 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_detach_process()
1284 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) in pmc_detach_process()
1298 if (!LIST_EMPTY(&p->p_children)) { in pmc_detach_process()
1299 p = LIST_FIRST(&p->p_children); in pmc_detach_process()
1308 p = p->p_pptr; in pmc_detach_process()
1314 if (LIST_EMPTY(&pm->pm_targets)) in pmc_detach_process()
1315 pm->pm_flags &= ~PMC_F_ATTACH_DONE; in pmc_detach_process()
1322 * - Inform log owners of the new exec() event
1323 * - Release any PMCs owned by the process before the exec()
1324 * - Detach PMCs from the target if required
1339 p = td->td_proc; in pmc_process_exec()
1340 pmc_getfilename(p->p_textvp, &fullpath, &freepath); in pmc_process_exec()
1345 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_exec()
1346 pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid, in pmc_process_exec()
1347 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); in pmc_process_exec()
1353 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; in pmc_process_exec()
1383 * already received the event because they had system sampling PMCs in pmc_process_exec()
1386 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exec()
1387 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) in pmc_process_exec()
1390 po = pm->pm_owner; in pmc_process_exec()
1391 if (po->po_sscount == 0 && in pmc_process_exec()
1392 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_exec()
1393 pmclog_process_procexec(po, pm->pm_id, p->p_pid, in pmc_process_exec()
1394 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); in pmc_process_exec()
1401 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d", in pmc_process_exec()
1402 p, p->p_pid, p->p_comm, pk->pm_credentialschanged); in pmc_process_exec()
1404 if (pk->pm_credentialschanged == 0) /* no change */ in pmc_process_exec()
1412 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exec()
1413 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { in pmc_process_exec()
1414 if (pmc_can_attach(pm, td->td_proc)) { in pmc_process_exec()
1415 pmc_detach_one_process(td->td_proc, pm, in pmc_process_exec()
1421 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= md->pmd_npmc, in pmc_process_exec()
1423 pp->pp_refcnt, pp)); in pmc_process_exec()
1430 if (pp->pp_refcnt == 0) { in pmc_process_exec()
1453 p = td->td_proc; in pmc_process_csw_in()
1458 KASSERT(pp->pp_proc == td->td_proc, in pmc_process_csw_in()
1463 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ in pmc_process_csw_in()
1466 p->p_pid, p->p_comm, pp); in pmc_process_csw_in()
1472 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_csw_in()
1473 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) in pmc_process_csw_in()
1477 ("[pmc,%d] Target PMC in non-virtual mode (%d)", in pmc_process_csw_in()
1487 if (pm->pm_state != PMC_STATE_RUNNING) in pmc_process_csw_in()
1490 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, in pmc_process_csw_in()
1492 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_csw_in()
1495 counter_u64_add(pm->pm_runcount, 1); in pmc_process_csw_in()
1499 (void)pcd->pcd_config_pmc(cpu, adjri, pm); in pmc_process_csw_in()
1501 phw = pc->pc_hwpmcs[ri]; in pmc_process_csw_in()
1506 KASSERT(phw->phw_pmc == pm, in pmc_process_csw_in()
1507 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, in pmc_process_csw_in()
1508 phw->phw_pmc, pm)); in pmc_process_csw_in()
1513 * Sampling PMCs use a per-thread value, while in pmc_process_csw_in()
1514 * counting mode PMCs use a per-pmc value that is in pmc_process_csw_in()
1529 * If we have a thread descriptor, use the per-thread in pmc_process_csw_in()
1531 * a per-process counter. in pmc_process_csw_in()
1533 * TODO: Remove the per-process "safety net" once in pmc_process_csw_in()
1538 if (pt->pt_pmcs[ri].pt_pmcval > 0) in pmc_process_csw_in()
1539 newvalue = pt->pt_pmcs[ri].pt_pmcval; in pmc_process_csw_in()
1541 newvalue = pm->pm_sc.pm_reloadcount; in pmc_process_csw_in()
1550 newvalue = pp->pp_pmcs[ri].pp_pmcval; in pmc_process_csw_in()
1551 pp->pp_pmcs[ri].pp_pmcval = in pmc_process_csw_in()
1552 pm->pm_sc.pm_reloadcount; in pmc_process_csw_in()
1556 pm->pm_sc.pm_reloadcount, in pmc_process_csw_in()
1559 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount)); in pmc_process_csw_in()
1566 pm->pm_gv.pm_savedvalue; in pmc_process_csw_in()
1572 (void)pcd->pcd_write_pmc(cpu, adjri, pm, newvalue); in pmc_process_csw_in()
1574 /* If a sampling mode PMC, reset stalled state. */ in pmc_process_csw_in()
1576 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_process_csw_in()
1579 pm->pm_pcpu_state[cpu].pps_cpustate = 1; in pmc_process_csw_in()
1582 (void)pcd->pcd_start_pmc(cpu, adjri, pm); in pmc_process_csw_in()
1587 * switch-in actions. in pmc_process_csw_in()
1589 (void)(*md->pmd_switch_in)(pc, pp); in pmc_process_csw_in()
1625 p = td->td_proc; in pmc_process_csw_out()
1630 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ in pmc_process_csw_out()
1633 p->p_pid, p->p_comm, pp); in pmc_process_csw_out()
1650 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_csw_out()
1653 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_csw_out()
1668 * This two-step dance should avoid race conditions where in pmc_process_csw_out()
1669 * an interrupt re-enables the PMC after this code has in pmc_process_csw_out()
1672 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_process_csw_out()
1673 if (pm->pm_pcpu_state[cpu].pps_stalled == 0) in pmc_process_csw_out()
1674 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_process_csw_out()
1676 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_csw_out()
1678 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_csw_out()
1681 counter_u64_add(pm->pm_runcount, -1); in pmc_process_csw_out()
1687 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL && in pmc_process_csw_out()
1688 pp->pp_pmcs[ri].pp_pmc != NULL) { in pmc_process_csw_out()
1689 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, in pmc_process_csw_out()
1691 pm, ri, pp->pp_pmcs[ri].pp_pmc)); in pmc_process_csw_out()
1692 KASSERT(pp->pp_refcnt > 0, in pmc_process_csw_out()
1694 pp->pp_refcnt)); in pmc_process_csw_out()
1696 (void)pcd->pcd_read_pmc(cpu, adjri, pm, &newvalue); in pmc_process_csw_out()
1714 * per-thread counter in the descriptor. If not, in pmc_process_csw_out()
1715 * we will update the per-process counter. in pmc_process_csw_out()
1717 * TODO: Remove the per-process "safety net" in pmc_process_csw_out()
1722 pt->pt_pmcs[ri].pt_pmcval = newvalue; in pmc_process_csw_out()
1725 * For sampling process-virtual PMCs, in pmc_process_csw_out()
1727 * be seen until the next sampling in pmc_process_csw_out()
1736 pp->pp_pmcs[ri].pp_pmcval += newvalue; in pmc_process_csw_out()
1737 if (pp->pp_pmcs[ri].pp_pmcval > in pmc_process_csw_out()
1738 pm->pm_sc.pm_reloadcount) { in pmc_process_csw_out()
1739 pp->pp_pmcs[ri].pp_pmcval -= in pmc_process_csw_out()
1740 pm->pm_sc.pm_reloadcount; in pmc_process_csw_out()
1745 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); in pmc_process_csw_out()
1751 * For counting process-virtual PMCs, in pmc_process_csw_out()
1763 pm->pm_gv.pm_savedvalue += tmp; in pmc_process_csw_out()
1764 pp->pp_pmcs[ri].pp_pmcval += tmp; in pmc_process_csw_out()
1767 if (pm->pm_flags & PMC_F_LOG_PROCCSW) in pmc_process_csw_out()
1773 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_process_csw_out()
1780 (void)(*md->pmd_switch_out)(pc, pp); in pmc_process_csw_out()
1793 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); in pmc_process_thread_add()
1806 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); in pmc_process_thread_delete()
1819 pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame); in pmc_process_thread_userret()
1839 pmc_getfilename((struct vnode *)pkm->pm_file, &fullpath, &freepath); in pmc_process_mmap()
1841 pid = td->td_proc->p_pid; in pmc_process_mmap()
1844 /* Inform owners of all system-wide sampling PMCs. */ in pmc_process_mmap()
1846 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_process_mmap()
1847 pmclog_process_map_in(po, pid, pkm->pm_address, in pmc_process_mmap()
1851 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) in pmc_process_mmap()
1855 * Inform sampling PMC owners tracking this process. in pmc_process_mmap()
1857 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_mmap()
1858 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_mmap()
1860 pmclog_process_map_in(pm->pm_owner, in pmc_process_mmap()
1861 pid, pkm->pm_address, fullpath); in pmc_process_mmap()
1883 pid = td->td_proc->p_pid; in pmc_process_munmap()
1887 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_process_munmap()
1888 pmclog_process_map_out(po, pid, pkm->pm_address, in pmc_process_munmap()
1889 pkm->pm_address + pkm->pm_size); in pmc_process_munmap()
1893 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) in pmc_process_munmap()
1896 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_munmap()
1897 pm = pp->pp_pmcs[ri].pp_pmc; in pmc_process_munmap()
1899 pmclog_process_map_out(pm->pm_owner, pid, in pmc_process_munmap()
1900 pkm->pm_address, pkm->pm_address + pkm->pm_size); in pmc_process_munmap()
1916 ("[pmc,%d] non-sampling PMC (%p) desires mapping information", in pmc_log_kernel_mappings()
1919 po = pm->pm_owner; in pmc_log_kernel_mappings()
1920 if ((po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) != 0) in pmc_log_kernel_mappings()
1930 for (km = kmbase; km->pm_file != NULL; km++) { in pmc_log_kernel_mappings()
1931 PMCDBG2(LOG,REG,1,"%s %p", (char *)km->pm_file, in pmc_log_kernel_mappings()
1932 (void *)km->pm_address); in pmc_log_kernel_mappings()
1933 pmclog_process_map_in(po, (pid_t)-1, km->pm_address, in pmc_log_kernel_mappings()
1934 km->pm_file); in pmc_log_kernel_mappings()
1938 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE; in pmc_log_kernel_mappings()
1964 map = &vm->vm_map; in pmc_log_process_mappings()
1969 "NULL! pid=%d vm_map=%p\n", p->p_pid, map); in pmc_log_process_mappings()
1976 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in pmc_log_process_mappings()
1977 (entry->protection & VM_PROT_EXECUTE) == 0 || in pmc_log_process_mappings()
1978 entry->object.vm_object == NULL) { in pmc_log_process_mappings()
1982 obj = entry->object.vm_object; in pmc_log_process_mappings()
1986 * Walk the backing_object list to find the base (non-shadowed) in pmc_log_process_mappings()
1990 tobj = tobj->backing_object) { in pmc_log_process_mappings()
2004 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj); in pmc_log_process_mappings()
2019 * don't emit redundant MAP-IN directives. in pmc_log_process_mappings()
2021 if (entry->start == last_end && vp == last_vp) { in pmc_log_process_mappings()
2022 last_end = entry->end; in pmc_log_process_mappings()
2037 start_addr = entry->start; in pmc_log_process_mappings()
2038 last_end = entry->end; in pmc_log_process_mappings()
2039 last_timestamp = map->timestamp; in pmc_log_process_mappings()
2054 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath); in pmc_log_process_mappings()
2072 * &map->header, which would cause our loop to abort in pmc_log_process_mappings()
2078 * process exits, so there will always be a non-header in pmc_log_process_mappings()
2082 if (map->timestamp != last_timestamp) in pmc_log_process_mappings()
2083 vm_map_lookup_entry(map, last_end - 1, &entry); in pmc_log_process_mappings()
2111 if (!LIST_EMPTY(&p->p_children)) in pmc_log_all_process_mappings()
2112 p = LIST_FIRST(&p->p_children); in pmc_log_all_process_mappings()
2120 p = p->p_pptr; in pmc_log_all_process_mappings()
2132 "CSW-IN",
2133 "CSW-OUT",
2139 "CALLCHAIN-NMI",
2140 "CALLCHAIN-SOFT",
2142 "THR-CREATE",
2143 "THR-EXIT",
2144 "THR-USERRET",
2145 "THR-CREATE-LOG",
2146 "THR-EXIT-LOG",
2147 "PROC-CREATE-LOG"
2226 KASSERT(td->td_pinned == 1, in pmc_hook_handler()
2230 td->td_pflags &= ~TDP_CALLCHAIN; in pmc_hook_handler()
2244 KASSERT(td->td_pinned == 1, in pmc_hook_handler()
2249 td->td_pflags &= ~TDP_CALLCHAIN; in pmc_hook_handler()
2254 * Call soft PMC sampling intr. in pmc_hook_handler()
2307 po->po_owner = p; in pmc_allocate_owner_descriptor()
2310 TAILQ_INIT(&po->po_logbuffers); in pmc_allocate_owner_descriptor()
2311 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN); in pmc_allocate_owner_descriptor()
2313 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p", in pmc_allocate_owner_descriptor()
2314 p, p->p_pid, p->p_comm, po); in pmc_allocate_owner_descriptor()
2323 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", in pmc_destroy_owner_descriptor()
2324 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); in pmc_destroy_owner_descriptor()
2326 mtx_destroy(&po->po_mtx); in pmc_destroy_owner_descriptor()
2343 pmc_threadfreelist_entries--; in pmc_thread_descriptor_pool_alloc()
2385 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max; in pmc_thread_descriptor_pool_free_task()
2387 delta--; in pmc_thread_descriptor_pool_free_task()
2388 pmc_threadfreelist_entries--; in pmc_thread_descriptor_pool_free_task()
2435 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to in pmc_find_thread_descriptor()
2450 mtx_lock_spin(pp->pp_tdslock); in pmc_find_thread_descriptor()
2451 LIST_FOREACH(pt, &pp->pp_tds, pt_next) { in pmc_find_thread_descriptor()
2452 if (pt->pt_td == td) in pmc_find_thread_descriptor()
2462 pt->pt_td = td; in pmc_find_thread_descriptor()
2463 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next); in pmc_find_thread_descriptor()
2466 mtx_unlock_spin(pp->pp_tdslock); in pmc_find_thread_descriptor()
2542 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we in pmc_find_process_descriptor()
2546 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * in pmc_find_process_descriptor()
2551 if (pp->pp_proc == p) in pmc_find_process_descriptor()
2559 ppnew->pp_proc = p; in pmc_find_process_descriptor()
2560 LIST_INIT(&ppnew->pp_tds); in pmc_find_process_descriptor()
2561 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew); in pmc_find_process_descriptor()
2583 KASSERT(pp->pp_refcnt == 0, in pmc_remove_process_descriptor()
2585 __LINE__, pp, pp->pp_refcnt)); in pmc_remove_process_descriptor()
2600 while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) { in pmc_destroy_process_descriptor()
2622 if (po->po_owner == p) in pmc_find_owner_descriptor()
2626 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> " in pmc_find_owner_descriptor()
2627 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po); in pmc_find_owner_descriptor()
2641 pmc->pm_runcount = counter_u64_alloc(M_WAITOK); in pmc_allocate_pmc_descriptor()
2642 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state) * mp_ncpus, in pmc_allocate_pmc_descriptor()
2644 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc); in pmc_allocate_pmc_descriptor()
2656 KASSERT(pm->pm_state == PMC_STATE_DELETED || in pmc_destroy_pmc_descriptor()
2657 pm->pm_state == PMC_STATE_FREE, in pmc_destroy_pmc_descriptor()
2658 ("[pmc,%d] destroying non-deleted PMC", __LINE__)); in pmc_destroy_pmc_descriptor()
2659 KASSERT(LIST_EMPTY(&pm->pm_targets), in pmc_destroy_pmc_descriptor()
2661 KASSERT(pm->pm_owner == NULL, in pmc_destroy_pmc_descriptor()
2663 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0, in pmc_destroy_pmc_descriptor()
2664 ("[pmc,%d] pmc has non-zero run count %ju", __LINE__, in pmc_destroy_pmc_descriptor()
2665 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_destroy_pmc_descriptor()
2667 counter_u64_free(pm->pm_runcount); in pmc_destroy_pmc_descriptor()
2668 free(pm->pm_pcpu_state, M_PMC); in pmc_destroy_pmc_descriptor()
2684 pmclog_flush(pm->pm_owner, 1); in pmc_wait_for_pmc_idle()
2685 while (counter_u64_fetch(pm->pm_runcount) > 0) { in pmc_wait_for_pmc_idle()
2686 pmclog_flush(pm->pm_owner, 1); in pmc_wait_for_pmc_idle()
2688 maxloop--; in pmc_wait_for_pmc_idle()
2692 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_wait_for_pmc_idle()
2701 * - detaches the PMC from hardware
2702 * - unlinks all target threads that were attached to it
2703 * - removes the PMC from its owner's list
2704 * - destroys the PMC private mutex
2728 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri, in pmc_release_pmc_descriptor()
2744 /* switch off non-stalled CPUs */ in pmc_release_pmc_descriptor()
2745 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_release_pmc_descriptor()
2746 if (pm->pm_state == PMC_STATE_RUNNING && in pmc_release_pmc_descriptor()
2747 pm->pm_pcpu_state[cpu].pps_stalled == 0) { in pmc_release_pmc_descriptor()
2749 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; in pmc_release_pmc_descriptor()
2751 KASSERT(phw->phw_pmc == pm, in pmc_release_pmc_descriptor()
2753 __LINE__, ri, phw->phw_pmc, pm)); in pmc_release_pmc_descriptor()
2757 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_release_pmc_descriptor()
2764 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_release_pmc_descriptor()
2768 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) { in pmc_release_pmc_descriptor()
2769 po = pm->pm_owner; in pmc_release_pmc_descriptor()
2770 po->po_sscount--; in pmc_release_pmc_descriptor()
2771 if (po->po_sscount == 0) { in pmc_release_pmc_descriptor()
2777 pm->pm_state = PMC_STATE_DELETED; in pmc_release_pmc_descriptor()
2783 * per-cpu sample queues. Wait for the queue to drain. in pmc_release_pmc_descriptor()
2797 pm->pm_state = PMC_STATE_DELETED; in pmc_release_pmc_descriptor()
2805 * from this PMC. If a process-record's refcount falls to zero, in pmc_release_pmc_descriptor()
2806 * we remove it from the hash table. The module-wide SX lock in pmc_release_pmc_descriptor()
2809 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { in pmc_release_pmc_descriptor()
2810 pp = ptgt->pt_process; in pmc_release_pmc_descriptor()
2813 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt); in pmc_release_pmc_descriptor()
2819 if (pp->pp_refcnt == 0) { in pmc_release_pmc_descriptor()
2825 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ in pmc_release_pmc_descriptor()
2831 (void)pcd->pcd_release_pmc(cpu, adjri, pm); in pmc_release_pmc_descriptor()
2842 if (pm->pm_owner != NULL) { in pmc_release_pmc_descriptor()
2844 pm->pm_owner = NULL; in pmc_release_pmc_descriptor()
2863 KASSERT(pmc->pm_owner == NULL, in pmc_register_owner()
2865 pmc->pm_owner = po; in pmc_register_owner()
2867 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next); in pmc_register_owner()
2870 p->p_flag |= P_HWPMC; in pmc_register_owner()
2873 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_register_owner()
2876 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p", in pmc_register_owner()
2899 * - the current process is already being profiled by a PMC at index 'ri',
2901 * - the current process has already allocated a PMC at index 'ri'
2912 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d " in pmc_can_allocate_rowindex()
2913 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu); in pmc_can_allocate_rowindex()
2916 * We shouldn't have already allocated a process-mode PMC at in pmc_can_allocate_rowindex()
2919 * We shouldn't have allocated a system-wide PMC on the same in pmc_can_allocate_rowindex()
2923 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { in pmc_can_allocate_rowindex()
2940 if (pp->pp_pmcs[ri].pp_pmc != NULL) in pmc_can_allocate_rowindex()
2943 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok", in pmc_can_allocate_rowindex()
2944 p, p->p_pid, p->p_comm, ri); in pmc_can_allocate_rowindex()
2959 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode); in pmc_can_allocate_row()
2969 * Expected disposition Row-disposition Result in pmc_can_allocate_row()
2984 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode); in pmc_can_allocate_row()
2996 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc, in pmc_find_pmc_descriptor_in_process()
2998 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc)); in pmc_find_pmc_descriptor_in_process()
3000 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { in pmc_find_pmc_descriptor_in_process()
3001 if (pm->pm_id == pmcid) in pmc_find_pmc_descriptor_in_process()
3015 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid); in pmc_find_pmc()
3016 if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc) in pmc_find_pmc()
3019 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) { in pmc_find_pmc()
3025 pp = pmc_find_process_descriptor(curthread->td_proc, in pmc_find_pmc()
3029 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc; in pmc_find_pmc()
3032 if ((opm->pm_flags & in pmc_find_pmc()
3037 po = opm->pm_owner; in pmc_find_pmc()
3043 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm); in pmc_find_pmc()
3070 po = pm->pm_owner; in pmc_start()
3074 po = pm->pm_owner; in pmc_start()
3080 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && in pmc_start()
3081 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) in pmc_start()
3085 * If this is a sampling mode PMC, log mapping information for in pmc_start()
3096 if (LIST_EMPTY(&pm->pm_targets)) { in pmc_start()
3097 error = (pm->pm_flags & PMC_F_ATTACH_DONE) != 0 ? in pmc_start()
3098 ESRCH : pmc_attach_process(po->po_owner, pm); in pmc_start()
3106 pm->pm_state = PMC_STATE_RUNNING; in pmc_start()
3107 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) != 0) in pmc_start()
3115 * A system-wide PMC. in pmc_start()
3117 * Add the owner to the global list if this is a system-wide in pmc_start()
3118 * sampling PMC. in pmc_start()
3126 if (po->po_logprocmaps == 0) { in pmc_start()
3128 po->po_logprocmaps = 1; in pmc_start()
3130 po->po_sscount++; in pmc_start()
3131 if (po->po_sscount == 1) { in pmc_start()
3152 pm->pm_state = PMC_STATE_RUNNING; in pmc_start()
3155 v = PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount : in pmc_start()
3156 pm->pm_sc.pm_initial; in pmc_start()
3157 if ((error = pcd->pcd_write_pmc(cpu, adjri, pm, v)) == 0) { in pmc_start()
3158 /* If a sampling mode PMC, reset stalled state. */ in pmc_start()
3160 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_start()
3163 pm->pm_pcpu_state[cpu].pps_cpustate = 1; in pmc_start()
3164 error = pcd->pcd_start_pmc(cpu, adjri, pm); in pmc_start()
3188 pm->pm_state = PMC_STATE_STOPPED; in pmc_stop()
3191 * If the PMC is a virtual mode one, changing the state to non-RUNNING in pmc_stop()
3201 * A system-mode PMC. Move to the CPU associated with this PMC, and in pmc_stop()
3219 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_stop()
3221 if ((error = pcd->pcd_stop_pmc(cpu, adjri, pm)) == 0) { in pmc_stop()
3222 error = pcd->pcd_read_pmc(cpu, adjri, pm, in pmc_stop()
3223 &pm->pm_sc.pm_initial); in pmc_stop()
3230 po = pm->pm_owner; in pmc_stop()
3232 po->po_sscount--; in pmc_stop()
3233 if (po->po_sscount == 0) { in pmc_stop()
3249 for (n = 0; n < md->pmd_nclass; n++) { in pmc_class_to_classdep()
3250 if (md->pmd_classdep[n].pcd_class == class) in pmc_class_to_classdep()
3251 return (&md->pmd_classdep[n]); in pmc_class_to_classdep()
3300 class = pa->pm_class; in pmc_do_op_pmcallocate()
3301 caps = pa->pm_caps; in pmc_do_op_pmcallocate()
3302 flags = pa->pm_flags; in pmc_do_op_pmcallocate()
3303 mode = pa->pm_mode; in pmc_do_op_pmcallocate()
3304 cpu = pa->pm_cpu; in pmc_do_op_pmcallocate()
3306 p = td->td_proc; in pmc_do_op_pmcallocate()
3319 * System mode PMCs need to specify a non-default CPU. in pmc_do_op_pmcallocate()
3332 * Refuse an allocation for a system-wide PMC if this process has been in pmc_do_op_pmcallocate()
3333 * jailed, or if this process lacks super-user credentials and the in pmc_do_op_pmcallocate()
3337 if (jailed(td->td_ucred)) in pmc_do_op_pmcallocate()
3359 /* PMC_F_USERCALLCHAIN is only valid for sampling mode. */ in pmc_do_op_pmcallocate()
3370 * All sampling mode PMCs need to be able to interrupt the CPU. in pmc_do_op_pmcallocate()
3381 if ((pcd->pcd_caps & caps) != caps) in pmc_do_op_pmcallocate()
3384 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa->pm_ev, in pmc_do_op_pmcallocate()
3388 pmc->pm_id = PMC_ID_MAKE_ID(cpu, pa->pm_mode, class, PMC_ID_INVALID); in pmc_do_op_pmcallocate()
3389 pmc->pm_event = pa->pm_ev; in pmc_do_op_pmcallocate()
3390 pmc->pm_state = PMC_STATE_FREE; in pmc_do_op_pmcallocate()
3391 pmc->pm_caps = caps; in pmc_do_op_pmcallocate()
3392 pmc->pm_flags = flags; in pmc_do_op_pmcallocate()
3394 /* XXX set lower bound on sampling for process counters */ in pmc_do_op_pmcallocate()
3400 if (pa->pm_count < MAX(1, pmc_mincount)) in pmc_do_op_pmcallocate()
3402 "rate %ju - setting to %u\n", in pmc_do_op_pmcallocate()
3403 (uintmax_t)pa->pm_count, in pmc_do_op_pmcallocate()
3405 pmc->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), in pmc_do_op_pmcallocate()
3406 pa->pm_count); in pmc_do_op_pmcallocate()
3408 pmc->pm_sc.pm_initial = pa->pm_count; in pmc_do_op_pmcallocate()
3414 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \ in pmc_do_op_pmcallocate()
3417 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL) in pmc_do_op_pmcallocate()
3421 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { in pmc_do_op_pmcallocate()
3431 if (pcd->pcd_allocate_pmc(cpu, adjri, pmc, pa) == 0) { in pmc_do_op_pmcallocate()
3438 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { in pmc_do_op_pmcallocate()
3445 if (pcd->pcd_allocate_pmc(td->td_oncpu, adjri, pmc, in pmc_do_op_pmcallocate()
3458 if (n == md->pmd_npmc) { in pmc_do_op_pmcallocate()
3464 pmc->pm_id = PMC_ID_MAKE_ID(cpu, mode, class, n); in pmc_do_op_pmcallocate()
3466 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x", in pmc_do_op_pmcallocate()
3467 pmc->pm_event, class, mode, n, pmc->pm_id); in pmc_do_op_pmcallocate()
3470 if ((pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) != 0) in pmc_do_op_pmcallocate()
3471 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_do_op_pmcallocate()
3473 /* All system mode sampling PMCs require a log file. */ in pmc_do_op_pmcallocate()
3475 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_do_op_pmcallocate()
3484 phw = pmc_pcpu[cpu]->pc_hwpmcs[n]; in pmc_do_op_pmcallocate()
3487 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 || in pmc_do_op_pmcallocate()
3488 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) { in pmc_do_op_pmcallocate()
3489 (void)pcd->pcd_release_pmc(cpu, adjri, pmc); in pmc_do_op_pmcallocate()
3498 pmc->pm_state = PMC_STATE_ALLOCATED; in pmc_do_op_pmcallocate()
3499 pmc->pm_class = class; in pmc_do_op_pmcallocate()
3522 pa->pm_pmcid = pmc->pm_id; in pmc_do_op_pmcallocate()
3541 a.pm_pid = td->td_proc->p_pid; in pmc_do_op_pmcattach()
3551 /* PMCs may be (re)attached only when allocated or stopped */ in pmc_do_op_pmcattach()
3552 if (pm->pm_state == PMC_STATE_RUNNING) { in pmc_do_op_pmcattach()
3554 } else if (pm->pm_state != PMC_STATE_ALLOCATED && in pmc_do_op_pmcattach()
3555 pm->pm_state != PMC_STATE_STOPPED) { in pmc_do_op_pmcattach()
3566 if ((p->p_flag & P_WEXIT) != 0) { in pmc_do_op_pmcattach()
3597 a.pm_pid = td->td_proc->p_pid; in pmc_do_op_pmcdetach()
3610 if ((p->p_flag & P_WEXIT) != 0) { in pmc_do_op_pmcdetach()
3649 po = pm->pm_owner; in pmc_do_op_pmcrelease()
3669 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw->pm_pmcid, prw->pm_flags); in pmc_do_op_pmcrw()
3672 if ((prw->pm_flags & (PMC_F_OLDVALUE | PMC_F_NEWVALUE)) == 0) in pmc_do_op_pmcrw()
3676 error = pmc_find_pmc(prw->pm_pmcid, &pm); in pmc_do_op_pmcrw()
3681 if (pm->pm_state != PMC_STATE_ALLOCATED && in pmc_do_op_pmcrw()
3682 pm->pm_state != PMC_STATE_STOPPED && in pmc_do_op_pmcrw()
3683 pm->pm_state != PMC_STATE_RUNNING) in pmc_do_op_pmcrw()
3687 if (pm->pm_state == PMC_STATE_RUNNING && in pmc_do_op_pmcrw()
3688 (prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3695 * get an upto-date reading from hardware for a READ. Writes in pmc_do_op_pmcrw()
3707 cpu = curthread->td_oncpu; in pmc_do_op_pmcrw()
3709 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) { in pmc_do_op_pmcrw()
3710 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) && in pmc_do_op_pmcrw()
3711 (pm->pm_state == PMC_STATE_RUNNING)) { in pmc_do_op_pmcrw()
3712 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, in pmc_do_op_pmcrw()
3715 *valp = pm->pm_gv.pm_savedvalue; in pmc_do_op_pmcrw()
3719 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3720 pm->pm_gv.pm_savedvalue = prw->pm_value; in pmc_do_op_pmcrw()
3737 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) in pmc_do_op_pmcrw()
3738 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, valp); in pmc_do_op_pmcrw()
3741 if (error == 0 && (prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3742 error = (*pcd->pcd_write_pmc)(cpu, adjri, pm, in pmc_do_op_pmcrw()
3743 prw->pm_value); in pmc_do_op_pmcrw()
3752 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3753 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx", in pmc_do_op_pmcrw()
3754 ri, prw->pm_value, *valp); in pmc_do_op_pmcrw()
3756 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, *valp); in pmc_do_op_pmcrw()
3771 op = c->pmop_code; in pmc_syscall_handler()
3772 arg = c->pmop_data; in pmc_syscall_handler()
3783 * Instead, pre-create the process and ignite the loop in pmc_syscall_handler()
3829 p = td->td_proc; in pmc_syscall_handler()
3841 * de-configure it. in pmc_syscall_handler()
3847 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_syscall_handler()
3851 LIST_FOREACH(pm, &po->po_pmcs, pm_next) in pmc_syscall_handler()
3852 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && in pmc_syscall_handler()
3853 pm->pm_state == PMC_STATE_RUNNING) in pmc_syscall_handler()
3874 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
3893 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
3914 gci.pm_cputype = md->pmd_cputype; in pmc_syscall_handler()
3916 gci.pm_npmc = md->pmd_npmc; in pmc_syscall_handler()
3917 gci.pm_nclass = md->pmd_nclass; in pmc_syscall_handler()
3919 pcd = md->pmd_classdep; in pmc_syscall_handler()
3920 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) { in pmc_syscall_handler()
3921 pci->pm_caps = pcd->pcd_caps; in pmc_syscall_handler()
3922 pci->pm_class = pcd->pcd_class; in pmc_syscall_handler()
3923 pci->pm_width = pcd->pcd_width; in pmc_syscall_handler()
3924 pci->pm_num = pcd->pcd_num; in pmc_syscall_handler()
3946 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0) in pmc_syscall_handler()
3960 bcopy(&ps->ps_ev, &dev, sizeof(dev)); in pmc_syscall_handler()
3964 &gei->pm_events[nevent], in pmc_syscall_handler()
3973 error = copyout(&nevent, &gei->pm_nevent, in pmc_syscall_handler()
4043 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) in pmc_syscall_handler()
4060 npmc = md->pmd_npmc; in pmc_syscall_handler()
4067 for (n = 0; n < md->pmd_npmc; n++, p++) { in pmc_syscall_handler()
4074 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0) in pmc_syscall_handler()
4078 p->pm_rowdisp = PMC_DISP_STANDALONE; in pmc_syscall_handler()
4080 p->pm_rowdisp = PMC_DISP_THREAD; in pmc_syscall_handler()
4082 p->pm_rowdisp = PMC_DISP_FREE; in pmc_syscall_handler()
4084 p->pm_ownerpid = -1; in pmc_syscall_handler()
4089 po = pm->pm_owner; in pmc_syscall_handler()
4091 KASSERT(po->po_owner != NULL, in pmc_syscall_handler()
4095 p->pm_ownerpid = po->po_owner->p_pid; in pmc_syscall_handler()
4096 p->pm_mode = PMC_TO_MODE(pm); in pmc_syscall_handler()
4097 p->pm_event = pm->pm_event; in pmc_syscall_handler()
4098 p->pm_flags = pm->pm_flags; in pmc_syscall_handler()
4101 p->pm_reloadcount = in pmc_syscall_handler()
4102 pm->pm_sc.pm_reloadcount; in pmc_syscall_handler()
4109 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size); in pmc_syscall_handler()
4163 if (ri < 0 || ri >= (int) md->pmd_npmc) { in pmc_syscall_handler()
4169 * We can't disable a PMC with a row-index allocated in pmc_syscall_handler()
4181 * in system-wide mode. in pmc_syscall_handler()
4188 phw = pc->pc_hwpmcs[ri]; in pmc_syscall_handler()
4194 if (phw->phw_pmc == NULL) { in pmc_syscall_handler()
4196 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) { in pmc_syscall_handler()
4197 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED; in pmc_syscall_handler()
4200 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) { in pmc_syscall_handler()
4201 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; in pmc_syscall_handler()
4204 /* other cases are a no-op */ in pmc_syscall_handler()
4298 (pm->pm_flags & PMC_F_DESCENDANTS)) { in pmc_syscall_handler()
4305 * equivalent instruction on non-x86 architectures) on in pmc_syscall_handler()
4308 * one process attached to it -- its owner. in pmc_syscall_handler()
4311 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL || in pmc_syscall_handler()
4313 pt->pt_process->pp_proc != pm->pm_owner->po_owner) { in pmc_syscall_handler()
4322 if (pcd->pcd_get_msr == NULL) { in pmc_syscall_handler()
4327 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0) in pmc_syscall_handler()
4338 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS; in pmc_syscall_handler()
4381 error = copyout(&oldvalue, &pprw->pm_value, in pmc_syscall_handler()
4389 * Set the sampling rate for a sampling mode PMC and the in pmc_syscall_handler()
4406 if (pm->pm_state == PMC_STATE_RUNNING) { in pmc_syscall_handler()
4418 "rate %ju - setting to %u\n", in pmc_syscall_handler()
4421 pm->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), in pmc_syscall_handler()
4424 pm->pm_sc.pm_initial = sc.pm_count; in pmc_syscall_handler()
4449 KASSERT(pmcid == pm->pm_id, in pmc_syscall_handler()
4451 pm->pm_id, pmcid)); in pmc_syscall_handler()
4453 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */ in pmc_syscall_handler()
4455 else if (pm->pm_state != PMC_STATE_STOPPED && in pmc_syscall_handler()
4456 pm->pm_state != PMC_STATE_ALLOCATED) { in pmc_syscall_handler()
4491 KASSERT(pmcid == pm->pm_id, in pmc_syscall_handler()
4493 pm->pm_id, pmcid)); in pmc_syscall_handler()
4495 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */ in pmc_syscall_handler()
4497 else if (pm->pm_state != PMC_STATE_RUNNING) { in pmc_syscall_handler()
4521 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
4526 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { in pmc_syscall_handler()
4571 if ((td->td_pflags & TDP_CALLCHAIN) != 0) in pmc_post_callchain_callback()
4576 * `td->td_pflags' will be safe to touch because this thread in pmc_post_callchain_callback()
4579 td->td_pflags |= TDP_CALLCHAIN; in pmc_post_callchain_callback()
4591 * Find a free slot in the per-cpu array of samples and capture the
4614 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_add_sample()
4617 if (psb->ps_considx != psb->ps_prodidx && in pmc_add_sample()
4618 ps->ps_nsamples) { /* in use, reader hasn't caught up */ in pmc_add_sample()
4619 pm->pm_pcpu_state[cpu].pps_stalled = 1; in pmc_add_sample()
4623 (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_add_sample()
4624 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_add_sample()
4632 inuserspace, (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_add_sample()
4633 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_add_sample()
4636 ps->ps_pmc = pm; in pmc_add_sample()
4637 ps->ps_td = td; in pmc_add_sample()
4638 ps->ps_pid = td->td_proc->p_pid; in pmc_add_sample()
4639 ps->ps_tid = td->td_tid; in pmc_add_sample()
4640 ps->ps_tsc = pmc_rdtsc(); in pmc_add_sample()
4641 ps->ps_ticks = ticks; in pmc_add_sample()
4642 ps->ps_cpu = cpu; in pmc_add_sample()
4643 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0; in pmc_add_sample()
4645 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ? in pmc_add_sample()
4648 MPASS(ps->ps_pc != NULL); in pmc_add_sample()
4650 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf); in pmc_add_sample()
4657 callchaindepth = pmc_save_kernel_callchain(ps->ps_pc, in pmc_add_sample()
4665 ps->ps_nsamples = callchaindepth; /* mark entry as in-use */ in pmc_add_sample()
4667 ps->ps_nsamples_actual = callchaindepth; in pmc_add_sample()
4668 ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING; in pmc_add_sample()
4671 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, in pmc_add_sample()
4673 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_add_sample()
4675 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */ in pmc_add_sample()
4677 psb->ps_prodidx++; in pmc_add_sample()
4698 if ((pm->pm_flags & PMC_F_USERCALLCHAIN) && in pmc_process_interrupt()
4699 (td->td_proc->p_flag & P_KPROC) == 0 && !TRAPF_USERMODE(tf)) { in pmc_process_interrupt()
4700 atomic_add_int(&td->td_pmcpend, 1); in pmc_process_interrupt()
4722 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_capture_user_callchain()
4728 KASSERT(td->td_pflags & TDP_CALLCHAIN, in pmc_capture_user_callchain()
4733 nrecords = atomic_readandclear_32(&td->td_pmcpend); in pmc_capture_user_callchain()
4735 for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx; in pmc_capture_user_callchain()
4744 if (ps->ps_nsamples == PMC_SAMPLE_FREE) { in pmc_capture_user_callchain()
4748 if (ps->ps_td != td || in pmc_capture_user_callchain()
4749 ps->ps_nsamples != PMC_USER_CALLCHAIN_PENDING || in pmc_capture_user_callchain()
4750 ps->ps_pmc->pm_state != PMC_STATE_RUNNING) in pmc_capture_user_callchain()
4753 KASSERT(ps->ps_cpu == cpu, in pmc_capture_user_callchain()
4755 ps->ps_cpu, PCPU_GET(cpuid))); in pmc_capture_user_callchain()
4757 pm = ps->ps_pmc; in pmc_capture_user_callchain()
4758 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN, in pmc_capture_user_callchain()
4761 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_capture_user_callchain()
4763 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_capture_user_callchain()
4766 nsamples = ps->ps_nsamples_actual; in pmc_capture_user_callchain()
4775 if (__predict_true(nsamples < pmc_callchaindepth - 1)) in pmc_capture_user_callchain()
4776 nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples, in pmc_capture_user_callchain()
4777 pmc_callchaindepth - nsamples - 1, tf); in pmc_capture_user_callchain()
4789 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { in pmc_capture_user_callchain()
4790 ps->ps_nsamples = nsamples; in pmc_capture_user_callchain()
4796 counter_u64_add(pm->pm_runcount, -1); in pmc_capture_user_callchain()
4799 if (nrecords-- == 1) in pmc_capture_user_callchain()
4802 if (__predict_false(ring == PMC_UR && td->td_pmcpend)) { in pmc_capture_user_callchain()
4808 td->td_pmcpend = 0; in pmc_capture_user_callchain()
4812 if ((ticks - start_ticks) > hz) in pmc_capture_user_callchain()
4813 log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks)); in pmc_capture_user_callchain()
4838 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_process_samples()
4839 delta = psb->ps_prodidx - psb->ps_considx; in pmc_process_samples()
4841 MPASS(psb->ps_considx <= psb->ps_prodidx); in pmc_process_samples()
4842 for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) { in pmc_process_samples()
4845 if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE)) in pmc_process_samples()
4848 /* skip non-running samples */ in pmc_process_samples()
4849 pm = ps->ps_pmc; in pmc_process_samples()
4850 if (pm->pm_state != PMC_STATE_RUNNING) in pmc_process_samples()
4853 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_samples()
4855 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_samples()
4857 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__, in pmc_process_samples()
4860 po = pm->pm_owner; in pmc_process_samples()
4863 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { in pmc_process_samples()
4869 if (ticks - ps->ps_ticks > 1) { in pmc_process_samples()
4884 pm, ps->ps_nsamples, ps->ps_flags, in pmc_process_samples()
4885 (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_process_samples()
4886 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_process_samples()
4889 * If this is a process-mode PMC that is attached to in pmc_process_samples()
4891 * profiling statistics like timer-based profiling in pmc_process_samples()
4894 * Otherwise, this is either a sampling-mode PMC that in pmc_process_samples()
4896 * or a system-wide sampling PMC. Dispatch a log in pmc_process_samples()
4899 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) { in pmc_process_samples()
4900 if (ps->ps_flags & PMC_CC_F_USERSPACE) { in pmc_process_samples()
4901 td = FIRST_THREAD_IN_PROC(po->po_owner); in pmc_process_samples()
4902 addupc_intr(td, ps->ps_pc[0], 1); in pmc_process_samples()
4908 ps->ps_nsamples = 0; /* mark entry as free */ in pmc_process_samples()
4909 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_samples()
4911 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_samples()
4913 counter_u64_add(pm->pm_runcount, -1); in pmc_process_samples()
4918 /* Do not re-enable stalled PMCs if we failed to process any samples */ in pmc_process_samples()
4923 * Restart any stalled sampling PMCs on this CPU. in pmc_process_samples()
4929 for (n = 0; n < md->pmd_npmc; n++) { in pmc_process_samples()
4933 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_samples()
4936 pm->pm_state != PMC_STATE_RUNNING || /* !active */ in pmc_process_samples()
4937 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */ in pmc_process_samples()
4938 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */ in pmc_process_samples()
4939 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */ in pmc_process_samples()
4942 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_process_samples()
4943 (void)(*pcd->pcd_start_pmc)(cpu, adjri, pm); in pmc_process_samples()
4978 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; in pmc_process_exit()
4986 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_exit()
4987 pmclog_process_sysexit(po, p->p_pid); in pmc_process_exit()
4992 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid, in pmc_process_exit()
4993 p->p_comm); in pmc_process_exit()
5008 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid, in pmc_process_exit()
5009 p->p_comm); in pmc_process_exit()
5013 cpu = curthread->td_oncpu; in pmc_process_exit()
5021 PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp); in pmc_process_exit()
5030 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exit()
5038 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_exit()
5046 pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state); in pmc_process_exit()
5051 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, in pmc_process_exit()
5053 pp->pp_pmcs[ri].pp_pmc)); in pmc_process_exit()
5054 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_exit()
5056 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_exit()
5060 * two-step dance should avoid race conditions where an in pmc_process_exit()
5061 * interrupt re-enables the PMC after this code has already in pmc_process_exit()
5064 if (pm->pm_pcpu_state[cpu].pps_cpustate) { in pmc_process_exit()
5065 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_process_exit()
5066 if (!pm->pm_pcpu_state[cpu].pps_stalled) { in pmc_process_exit()
5067 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_process_exit()
5070 pcd->pcd_read_pmc(cpu, adjri, pm, in pmc_process_exit()
5072 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); in pmc_process_exit()
5075 pm->pm_gv.pm_savedvalue += tmp; in pmc_process_exit()
5076 pp->pp_pmcs[ri].pp_pmcval += tmp; in pmc_process_exit()
5082 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_exit()
5085 counter_u64_add(pm->pm_runcount, -1); in pmc_process_exit()
5086 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_process_exit()
5092 (void)md->pmd_switch_out(pmc_pcpu[cpu], pp); in pmc_process_exit()
5094 critical_exit(); /* ok to be pre-empted now */ in pmc_process_exit()
5102 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exit()
5103 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { in pmc_process_exit()
5104 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && in pmc_process_exit()
5118 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_exit()
5144 is_using_hwpmcs = (p1->p_flag & P_HWPMC) != 0; in pmc_process_fork()
5148 * If there are system-wide sampling PMCs active, we need to in pmc_process_fork()
5153 if (po->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_process_fork()
5154 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); in pmc_process_fork()
5164 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1, in pmc_process_fork()
5165 p1->p_pid, p1->p_comm, newproc); in pmc_process_fork()
5168 * If the parent process (curthread->td_proc) is a in pmc_process_fork()
5173 ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE); in pmc_process_fork()
5178 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_fork()
5179 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_fork()
5180 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { in pmc_process_fork()
5192 newproc->p_flag |= P_HWPMC; in pmc_process_fork()
5208 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_fork()
5209 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_fork()
5210 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { in pmc_process_fork()
5212 po = pm->pm_owner; in pmc_process_fork()
5213 if (po->po_sscount == 0 && in pmc_process_fork()
5214 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_fork()
5215 pmclog_process_procfork(po, p1->p_pid, in pmc_process_fork()
5216 newproc->p_pid); in pmc_process_fork()
5232 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_threadcreate()
5245 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_threadexit()
5258 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_proccreate()
5271 po = pm->pm_owner; in pmc_process_allproc()
5272 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) in pmc_process_allproc()
5293 * Notify owners of system sampling PMCs about KLD operations. in pmc_kld_load()
5297 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_kld_load()
5298 pmclog_process_map_in(po, (pid_t) -1, in pmc_kld_load()
5299 (uintfptr_t) lf->address, lf->pathname); in pmc_kld_load()
5304 * TODO: Notify owners of (all) process-sampling PMCs too. in pmc_kld_load()
5316 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_kld_unload()
5317 pmclog_process_map_out(po, (pid_t)-1, in pmc_kld_unload()
5324 * TODO: Notify owners of process-sampling PMCs. in pmc_kld_unload()
5359 md->pmd_nclass = n; in pmc_mdep_alloc()
5362 md->pmd_switch_in = generic_switch_in; in pmc_mdep_alloc()
5363 md->pmd_switch_out = generic_switch_out; in pmc_mdep_alloc()
5398 md->pmd_cputype = PMC_CPU_GENERIC; in pmc_generic_cpu_initialize()
5471 pmc_sample_mask = pmc_nsamples - 1; in pmc_initialize()
5476 "range - using %d.\n", pmc_callchaindepth, in pmc_initialize()
5493 for (ri = c = 0; c < md->pmd_nclass; c++) { in pmc_initialize()
5494 pcd = &md->pmd_classdep[c]; in pmc_initialize()
5495 pcd->pcd_ri = ri; in pmc_initialize()
5496 ri += pcd->pcd_num; in pmc_initialize()
5499 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1, in pmc_initialize()
5502 /* Compute the map from row-indices to classdep pointers. */ in pmc_initialize()
5504 md->pmd_npmc, M_PMC, M_WAITOK | M_ZERO); in pmc_initialize()
5506 for (n = 0; n < md->pmd_npmc; n++) in pmc_initialize()
5509 for (ri = c = 0; c < md->pmd_nclass; c++) { in pmc_initialize()
5510 pcd = &md->pmd_classdep[c]; in pmc_initialize()
5511 for (n = 0; n < pcd->pcd_num; n++, ri++) in pmc_initialize()
5515 KASSERT(ri == md->pmd_npmc, in pmc_initialize()
5516 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__, in pmc_initialize()
5517 ri, md->pmd_npmc)); in pmc_initialize()
5521 /* allocate space for the per-cpu array */ in pmc_initialize()
5525 /* per-cpu 'saved values' for managing process-mode PMCs */ in pmc_initialize()
5526 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, in pmc_initialize()
5529 /* Perform CPU-dependent initialization. */ in pmc_initialize()
5537 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC, in pmc_initialize()
5539 for (n = 0; error == 0 && n < md->pmd_nclass; n++) in pmc_initialize()
5540 if (md->pmd_classdep[n].pcd_num > 0) in pmc_initialize()
5541 error = md->pmd_classdep[n].pcd_pcpu_init(md, in pmc_initialize()
5554 domain = pc->pc_domain; in pmc_initialize()
5560 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); in pmc_initialize()
5562 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5566 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5567 ps->ps_pc = sb->ps_callchains + in pmc_initialize()
5570 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; in pmc_initialize()
5576 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5579 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5580 ps->ps_pc = sb->ps_callchains + in pmc_initialize()
5583 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; in pmc_initialize()
5588 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5591 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5592 ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth; in pmc_initialize()
5594 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb; in pmc_initialize()
5598 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc, in pmc_initialize()
5602 for (n = 0; n < md->pmd_npmc; n++) in pmc_initialize()
5611 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf", in pmc_initialize()
5618 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, in pmc_initialize()
5626 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf", in pmc_initialize()
5648 pmc_intr = md->pmd_intr; in pmc_initialize()
5654 for (n = 0; n < md->pmd_nclass; n++) { in pmc_initialize()
5655 if (md->pmd_classdep[n].pcd_num == 0) in pmc_initialize()
5657 pcd = &md->pmd_classdep[n]; in pmc_initialize()
5659 pmc_name_of_pmcclass(pcd->pcd_class), in pmc_initialize()
5660 pcd->pcd_num, in pmc_initialize()
5661 pcd->pcd_width, in pmc_initialize()
5662 pcd->pcd_caps, in pmc_initialize()
5687 /* switch off sampling */ in pmc_cleanup()
5716 po->po_owner, po->po_owner->p_pid, in pmc_cleanup()
5717 po->po_owner->p_comm); in pmc_cleanup()
5719 PROC_LOCK(po->po_owner); in pmc_cleanup()
5720 kern_psignal(po->po_owner, SIGBUS); in pmc_cleanup()
5721 PROC_UNLOCK(po->po_owner); in pmc_cleanup()
5746 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid); in pmc_cleanup()
5764 /* do processor and pmc-class dependent cleanup */ in pmc_cleanup()
5771 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", in pmc_cleanup()
5777 for (c = 0; c < md->pmd_nclass; c++) { in pmc_cleanup()
5778 if (md->pmd_classdep[c].pcd_num > 0) { in pmc_cleanup()
5779 md->pmd_classdep[c].pcd_pcpu_fini(md, in pmc_cleanup()
5785 if (md->pmd_cputype == PMC_CPU_GENERIC) in pmc_cleanup()
5795 /* Free per-cpu descriptors. */ in pmc_cleanup()
5799 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL, in pmc_cleanup()
5802 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL, in pmc_cleanup()
5805 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL, in pmc_cleanup()
5808 free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC); in pmc_cleanup()
5809 free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC); in pmc_cleanup()
5810 free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC); in pmc_cleanup()
5811 free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC); in pmc_cleanup()
5812 free(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC); in pmc_cleanup()
5813 free(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC); in pmc_cleanup()