Lines Matching +full:use +full:- +full:sw +full:- +full:pm
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2003-2008 Joseph Koshy
12 * Redistribution and use in source and binary forms, with or without
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 struct pmc_cpu **pmc_pcpu; /* per-cpu state */
104 #define PMC_PCPU_SAVED(C, R) pmc_pcpu_saved[(R) + md->pmd_npmc * (C)]
120 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
121 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
138 atomic_add_int(&pmc_pmcdisp[(R)], -1); \
168 * List of PMC owners with system-wide sampling PMCs.
180 (md->pmd_npmc * sizeof(struct pmc_threadpmcstate)))
202 static int pmc_add_sample(ring_type_t ring, struct pmc *pm,
206 static int pmc_attach_process(struct proc *p, struct pmc *pm);
209 static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
213 static int pmc_can_attach(struct pmc *pm, struct proc *p);
217 static int pmc_detach_process(struct proc *p, struct pmc *pm);
218 static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
221 static void pmc_destroy_pmc_descriptor(struct pmc *pm);
224 static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
232 static void pmc_link_target_process(struct pmc *pm,
235 static void pmc_log_kernel_mappings(struct pmc *pm);
239 static void pmc_process_allproc(struct pmc *pm);
257 static int pmc_start(struct pmc *pm);
258 static int pmc_stop(struct pmc *pm);
333 * kern.hwpmc.hashsize -- determines the number of rows in the
342 * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
349 static uint64_t pmc_sample_mask = PMC_NSAMPLES - 1;
352 * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
360 * kern.hwpmc.threadfreelist_entries -- number of free entries
367 * kern.hwpmc.threadfreelist_max -- maximum number of free entries
375 * kern.hwpmc.mincount -- minimum sample count
383 * security.bsd.unprivileged_syspmcs -- allow non-root processes to
384 * allocate system-wide PMCs.
387 * if system-wide measurements need to be taken concurrently with other
388 * per-process measurements. This feature is turned off by default.
398 * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
474 kwlen = p - q; in pmc_debugflags_parse()
479 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ in pmc_debugflags_parse()
480 newbits = &tmpflags->pdb_ ## F; in pmc_debugflags_parse()
509 if ((kwlen = p - q) == 0) { in pmc_debugflags_parse()
516 if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ in pmc_debugflags_parse()
592 if (error == 0 && req->newptr != NULL) { in pmc_debugflags_sysctl_handler()
593 fence = newstr + (n < req->newlen ? n : req->newlen + 1); in pmc_debugflags_sysctl_handler()
613 KASSERT(ri >= 0 && ri < md->pmd_npmc, in pmc_ri_to_classdep()
614 ("[pmc,%d] illegal row-index %d", __LINE__, ri)); in pmc_ri_to_classdep()
620 *adjri = ri - pcd->pcd_ri; in pmc_ri_to_classdep()
621 KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num, in pmc_ri_to_classdep()
622 ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri)); in pmc_ri_to_classdep()
632 * - target process descriptors, one per target process
633 * - owner process descriptors (and attached lists), one per owner process
634 * - lookup hash tables for owner and target processes
635 * - PMC descriptors (and attached lists)
636 * - per-cpu hardware state
637 * - the 'hook' variable through which the kernel calls into
639 * - the machine hardware state (managed by the MD layer)
643 * - thread context-switch code
644 * - interrupt handlers (possibly on multiple cpus)
645 * - kernel threads on multiple cpus running on behalf of user
647 * - this driver's private kernel threads
653 * - The global SX lock "pmc_sx" is used to protect internal
669 * pmc_sx lock we first check that 'pmc_hook' is non-null before
671 * and other threads seeking to use the module.
673 * - Lookups of target process structures and owner process structures
674 * cannot use the global "pmc_sx" SX lock because these lookups need
677 * with their own private spin-mutexes, "pmc_processhash_mtx" and
680 * - Interrupt handlers work in a lock free manner. At interrupt
681 * time, handlers look at the PMC pointer (phw->phw_pmc) configured
693 * - Context-switch handling with process-private PMCs needs more
702 * - each target process structure 'pmc_process' has an array
705 * - At context switch IN time, each "target" PMC in RUNNING state
707 * the per-cpu phw array. The 'runcount' for the PMC is
710 * - At context switch OUT time, all process-virtual PMCs are stopped
712 * only if the PMC is in a non-deleted state (the PMCs state could
715 * Note that since in-between a switch IN on a processor and a switch
721 * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
726 * state 'DELETED'. If the runcount of the PMC is non-zero then
732 * a spin-mutex. In order to save space, we use a mutex pool.
734 * In terms of lock types used by witness(4), we use:
735 * - Type "pmc-sx", used by the global SX lock.
736 * - Type "pmc-sleep", for sleep mutexes used by logger threads.
737 * - Type "pmc-per-proc", for protecting PMC owner descriptors.
738 * - Type "pmc-leaf", used for all other spin mutexes.
747 PMCDBG0(CPU,BND,2, "save-cpu"); in pmc_save_cpu_binding()
749 pb->pb_bound = sched_is_bound(curthread); in pmc_save_cpu_binding()
750 pb->pb_cpu = curthread->td_oncpu; in pmc_save_cpu_binding()
751 pb->pb_priority = curthread->td_priority; in pmc_save_cpu_binding()
753 PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu); in pmc_save_cpu_binding()
762 PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d", in pmc_restore_cpu_binding()
763 curthread->td_oncpu, pb->pb_cpu); in pmc_restore_cpu_binding()
765 sched_bind(curthread, pb->pb_cpu); in pmc_restore_cpu_binding()
766 if (!pb->pb_bound) in pmc_restore_cpu_binding()
768 sched_prio(curthread, pb->pb_priority); in pmc_restore_cpu_binding()
770 PMCDBG0(CPU,BND,2, "restore-cpu done"); in pmc_restore_cpu_binding()
786 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu); in pmc_select_cpu()
792 KASSERT(curthread->td_oncpu == cpu, in pmc_select_cpu()
794 cpu, curthread->td_oncpu)); in pmc_select_cpu()
796 PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu); in pmc_select_cpu()
802 * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
844 struct pmc *pm, *tmp; in pmc_remove_owner() local
848 PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po); in pmc_remove_owner()
854 LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) { in pmc_remove_owner()
855 PMCDBG1(OWN,ORM,2, "pmc=%p", pm); in pmc_remove_owner()
856 KASSERT(pm->pm_owner == po, in pmc_remove_owner()
857 ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po)); in pmc_remove_owner()
859 pmc_release_pmc_descriptor(pm); /* will unlink from the list */ in pmc_remove_owner()
860 pmc_destroy_pmc_descriptor(pm); in pmc_remove_owner()
863 KASSERT(po->po_sscount == 0, in pmc_remove_owner()
865 KASSERT(LIST_EMPTY(&po->po_pmcs), in pmc_remove_owner()
868 /* de-configure the log file if present */ in pmc_remove_owner()
869 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_remove_owner()
880 PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po); in pmc_maybe_remove_owner()
884 * - this process does not own any PMCs in pmc_maybe_remove_owner()
885 * - this process has not allocated a system-wide sampling buffer in pmc_maybe_remove_owner()
887 if (LIST_EMPTY(&po->po_pmcs) && in pmc_maybe_remove_owner()
888 ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) { in pmc_maybe_remove_owner()
898 pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) in pmc_link_target_process() argument
905 KASSERT(pm != NULL && pp != NULL, in pmc_link_target_process()
906 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); in pmc_link_target_process()
907 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), in pmc_link_target_process()
908 ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", in pmc_link_target_process()
909 __LINE__, pm, pp->pp_proc->p_pid)); in pmc_link_target_process()
910 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1), in pmc_link_target_process()
912 __LINE__, pp->pp_refcnt, (void *) pp)); in pmc_link_target_process()
914 ri = PMC_TO_ROWINDEX(pm); in pmc_link_target_process()
916 PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p", in pmc_link_target_process()
917 pm, ri, pp); in pmc_link_target_process()
920 LIST_FOREACH(pt, &pm->pm_targets, pt_next) { in pmc_link_target_process()
921 if (pt->pt_process == pp) in pmc_link_target_process()
923 __LINE__, pp, pm)); in pmc_link_target_process()
927 pt->pt_process = pp; in pmc_link_target_process()
929 LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next); in pmc_link_target_process()
931 atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc, in pmc_link_target_process()
932 (uintptr_t)pm); in pmc_link_target_process()
934 if (pm->pm_owner->po_owner == pp->pp_proc) in pmc_link_target_process()
935 pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER; in pmc_link_target_process()
938 * Initialize the per-process values at this row index. in pmc_link_target_process()
940 pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? in pmc_link_target_process()
941 pm->pm_sc.pm_reloadcount : 0; in pmc_link_target_process()
942 pp->pp_refcnt++; in pmc_link_target_process()
945 /* Confirm that the per-thread values at this row index are cleared. */ in pmc_link_target_process()
946 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { in pmc_link_target_process()
947 mtx_lock_spin(pp->pp_tdslock); in pmc_link_target_process()
948 LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) { in pmc_link_target_process()
949 KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0, in pmc_link_target_process()
951 "ri=%d", __LINE__, pp->pp_proc->p_pid, ri)); in pmc_link_target_process()
953 mtx_unlock_spin(pp->pp_tdslock); in pmc_link_target_process()
962 pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) in pmc_unlink_target_process() argument
971 KASSERT(pm != NULL && pp != NULL, in pmc_unlink_target_process()
972 ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); in pmc_unlink_target_process()
974 KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, in pmc_unlink_target_process()
976 __LINE__, pp->pp_refcnt, (void *) pp)); in pmc_unlink_target_process()
978 ri = PMC_TO_ROWINDEX(pm); in pmc_unlink_target_process()
980 PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", in pmc_unlink_target_process()
981 pm, ri, pp); in pmc_unlink_target_process()
983 KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, in pmc_unlink_target_process()
984 ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, in pmc_unlink_target_process()
985 ri, pm, pp->pp_pmcs[ri].pp_pmc)); in pmc_unlink_target_process()
987 pp->pp_pmcs[ri].pp_pmc = NULL; in pmc_unlink_target_process()
988 pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t)0; in pmc_unlink_target_process()
990 /* Clear the per-thread values at this row index. */ in pmc_unlink_target_process()
991 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { in pmc_unlink_target_process()
992 mtx_lock_spin(pp->pp_tdslock); in pmc_unlink_target_process()
993 LIST_FOREACH(pt, &pp->pp_tds, pt_next) in pmc_unlink_target_process()
994 pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t)0; in pmc_unlink_target_process()
995 mtx_unlock_spin(pp->pp_tdslock); in pmc_unlink_target_process()
998 /* Remove owner-specific flags */ in pmc_unlink_target_process()
999 if (pm->pm_owner->po_owner == pp->pp_proc) { in pmc_unlink_target_process()
1000 pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; in pmc_unlink_target_process()
1001 pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; in pmc_unlink_target_process()
1004 pp->pp_refcnt--; in pmc_unlink_target_process()
1007 LIST_FOREACH(ptgt, &pm->pm_targets, pt_next) in pmc_unlink_target_process()
1008 if (ptgt->pt_process == pp) in pmc_unlink_target_process()
1012 "in pmc %p", __LINE__, pp->pp_proc, pp, pm)); in pmc_unlink_target_process()
1018 if (LIST_EMPTY(&pm->pm_targets)) { in pmc_unlink_target_process()
1019 p = pm->pm_owner->po_owner; in pmc_unlink_target_process()
1029 * Check if PMC 'pm' may be attached to target process 't'.
1033 pmc_can_attach(struct pmc *pm, struct proc *t) in pmc_can_attach() argument
1043 if ((o = pm->pm_owner->po_owner) == t) in pmc_can_attach()
1047 oc = o->p_ucred; in pmc_can_attach()
1052 tc = t->p_ucred; in pmc_can_attach()
1061 decline_attach = oc->cr_uid != tc->cr_uid && in pmc_can_attach()
1062 oc->cr_uid != tc->cr_svuid && in pmc_can_attach()
1063 oc->cr_uid != tc->cr_ruid; in pmc_can_attach()
1069 for (i = 0; !decline_attach && i < tc->cr_ngroups; i++) in pmc_can_attach()
1070 decline_attach = !groupmember(tc->cr_groups[i], oc); in pmc_can_attach()
1074 decline_attach = !groupmember(tc->cr_rgid, oc) || in pmc_can_attach()
1075 !groupmember(tc->cr_svgid, oc); in pmc_can_attach()
1087 pmc_attach_one_process(struct proc *p, struct pmc *pm) in pmc_attach_one_process() argument
1095 PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm, in pmc_attach_one_process()
1096 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_attach_one_process()
1108 ri = PMC_TO_ROWINDEX(pm); in pmc_attach_one_process()
1112 p->p_flag |= P_HWPMC; in pmc_attach_one_process()
1120 if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */ in pmc_attach_one_process()
1125 if (pp->pp_pmcs[ri].pp_pmc != NULL) { in pmc_attach_one_process()
1130 pmc_link_target_process(pm, pp); in pmc_attach_one_process()
1132 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) && in pmc_attach_one_process()
1133 (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0) in pmc_attach_one_process()
1134 pm->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_attach_one_process()
1136 pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */ in pmc_attach_one_process()
1139 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_attach_one_process()
1140 if (p->p_flag & P_KPROC) { in pmc_attach_one_process()
1144 pmc_getfilename(p->p_textvp, &fullpath, &freepath); in pmc_attach_one_process()
1145 pmclog_process_pmcattach(pm, p->p_pid, fullpath); in pmc_attach_one_process()
1148 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) in pmc_attach_one_process()
1149 pmc_log_process_mappings(pm->pm_owner, p); in pmc_attach_one_process()
1155 p->p_flag &= ~P_HWPMC; in pmc_attach_one_process()
1164 pmc_attach_process(struct proc *p, struct pmc *pm) in pmc_attach_process() argument
1171 PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm, in pmc_attach_process()
1172 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_attach_process()
1178 if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0) in pmc_attach_process()
1181 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) in pmc_attach_process()
1182 return (pmc_attach_one_process(p, pm)); in pmc_attach_process()
1192 if ((error = pmc_attach_one_process(p, pm)) != 0) in pmc_attach_process()
1194 if (!LIST_EMPTY(&p->p_children)) in pmc_attach_process()
1195 p = LIST_FIRST(&p->p_children); in pmc_attach_process()
1203 p = p->p_pptr; in pmc_attach_process()
1208 (void)pmc_detach_process(top, pm); in pmc_attach_process()
1221 pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags) in pmc_detach_one_process() argument
1228 KASSERT(pm != NULL, in pmc_detach_one_process()
1229 ("[pmc,%d] null pm pointer", __LINE__)); in pmc_detach_one_process()
1231 ri = PMC_TO_ROWINDEX(pm); in pmc_detach_one_process()
1233 PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x", in pmc_detach_one_process()
1234 pm, ri, p, p->p_pid, p->p_comm, flags); in pmc_detach_one_process()
1239 if (pp->pp_pmcs[ri].pp_pmc != pm) in pmc_detach_one_process()
1242 pmc_unlink_target_process(pm, pp); in pmc_detach_one_process()
1245 if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_detach_one_process()
1246 pmclog_process_pmcdetach(pm, p->p_pid); in pmc_detach_one_process()
1253 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, in pmc_detach_one_process()
1255 __LINE__, pp->pp_refcnt, pp)); in pmc_detach_one_process()
1257 if (pp->pp_refcnt != 0) /* still a target of some PMC */ in pmc_detach_one_process()
1266 p->p_flag &= ~P_HWPMC; in pmc_detach_one_process()
1276 pmc_detach_process(struct proc *p, struct pmc *pm) in pmc_detach_process() argument
1282 PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm, in pmc_detach_process()
1283 PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); in pmc_detach_process()
1285 if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) in pmc_detach_process()
1286 return (pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE)); in pmc_detach_process()
1297 (void)pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); in pmc_detach_process()
1299 if (!LIST_EMPTY(&p->p_children)) { in pmc_detach_process()
1300 p = LIST_FIRST(&p->p_children); in pmc_detach_process()
1309 p = p->p_pptr; in pmc_detach_process()
1315 if (LIST_EMPTY(&pm->pm_targets)) in pmc_detach_process()
1316 pm->pm_flags &= ~PMC_F_ATTACH_DONE; in pmc_detach_process()
1323 * - Inform log owners of the new exec() event
1324 * - Release any PMCs owned by the process before the exec()
1325 * - Detach PMCs from the target if required
1330 struct pmc *pm; in pmc_process_exec() local
1340 p = td->td_proc; in pmc_process_exec()
1341 pmc_getfilename(p->p_textvp, &fullpath, &freepath); in pmc_process_exec()
1346 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_exec()
1347 pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid, in pmc_process_exec()
1348 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); in pmc_process_exec()
1354 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; in pmc_process_exec()
1387 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exec()
1388 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) in pmc_process_exec()
1391 po = pm->pm_owner; in pmc_process_exec()
1392 if (po->po_sscount == 0 && in pmc_process_exec()
1393 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_exec()
1394 pmclog_process_procexec(po, pm->pm_id, p->p_pid, in pmc_process_exec()
1395 pk->pm_baseaddr, pk->pm_dynaddr, fullpath); in pmc_process_exec()
1402 PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d", in pmc_process_exec()
1403 p, p->p_pid, p->p_comm, pk->pm_credentialschanged); in pmc_process_exec()
1405 if (pk->pm_credentialschanged == 0) /* no change */ in pmc_process_exec()
1413 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exec()
1414 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { in pmc_process_exec()
1415 if (pmc_can_attach(pm, td->td_proc) != 0) { in pmc_process_exec()
1416 pmc_detach_one_process(td->td_proc, pm, in pmc_process_exec()
1422 KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= md->pmd_npmc, in pmc_process_exec()
1424 pp->pp_refcnt, pp)); in pmc_process_exec()
1431 if (pp->pp_refcnt == 0) { in pmc_process_exec()
1443 struct pmc *pm; in pmc_process_csw_in() local
1454 p = td->td_proc; in pmc_process_csw_in()
1459 KASSERT(pp->pp_proc == td->td_proc, in pmc_process_csw_in()
1464 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ in pmc_process_csw_in()
1467 p->p_pid, p->p_comm, pp); in pmc_process_csw_in()
1473 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_csw_in()
1474 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) in pmc_process_csw_in()
1477 KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), in pmc_process_csw_in()
1478 ("[pmc,%d] Target PMC in non-virtual mode (%d)", in pmc_process_csw_in()
1479 __LINE__, PMC_TO_MODE(pm))); in pmc_process_csw_in()
1480 KASSERT(PMC_TO_ROWINDEX(pm) == ri, in pmc_process_csw_in()
1482 __LINE__, PMC_TO_ROWINDEX(pm), ri)); in pmc_process_csw_in()
1488 if (pm->pm_state != PMC_STATE_RUNNING) in pmc_process_csw_in()
1491 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, in pmc_process_csw_in()
1492 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, in pmc_process_csw_in()
1493 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_csw_in()
1496 counter_u64_add(pm->pm_runcount, 1); in pmc_process_csw_in()
1498 /* configure the HWPMC we are going to use. */ in pmc_process_csw_in()
1500 (void)pcd->pcd_config_pmc(cpu, adjri, pm); in pmc_process_csw_in()
1502 phw = pc->pc_hwpmcs[ri]; in pmc_process_csw_in()
1507 KASSERT(phw->phw_pmc == pm, in pmc_process_csw_in()
1508 ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, in pmc_process_csw_in()
1509 phw->phw_pmc, pm)); in pmc_process_csw_in()
1514 * Sampling PMCs use a per-thread value, while in pmc_process_csw_in()
1515 * counting mode PMCs use a per-pmc value that is in pmc_process_csw_in()
1518 if (PMC_TO_MODE(pm) == PMC_MODE_TS) { in pmc_process_csw_in()
1527 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_process_csw_in()
1530 * If we have a thread descriptor, use the per-thread in pmc_process_csw_in()
1531 * counter in the descriptor. If not, we will use in pmc_process_csw_in()
1532 * a per-process counter. in pmc_process_csw_in()
1534 * TODO: Remove the per-process "safety net" once in pmc_process_csw_in()
1539 if (pt->pt_pmcs[ri].pt_pmcval > 0) in pmc_process_csw_in()
1540 newvalue = pt->pt_pmcs[ri].pt_pmcval; in pmc_process_csw_in()
1542 newvalue = pm->pm_sc.pm_reloadcount; in pmc_process_csw_in()
1545 * Use the saved value calculated after the most in pmc_process_csw_in()
1551 newvalue = pp->pp_pmcs[ri].pp_pmcval; in pmc_process_csw_in()
1552 pp->pp_pmcs[ri].pp_pmcval = in pmc_process_csw_in()
1553 pm->pm_sc.pm_reloadcount; in pmc_process_csw_in()
1555 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_process_csw_in()
1557 pm->pm_sc.pm_reloadcount, in pmc_process_csw_in()
1560 cpu, ri, newvalue, pm->pm_sc.pm_reloadcount)); in pmc_process_csw_in()
1562 KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, in pmc_process_csw_in()
1564 PMC_TO_MODE(pm))); in pmc_process_csw_in()
1565 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_process_csw_in()
1567 pm->pm_gv.pm_savedvalue; in pmc_process_csw_in()
1568 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_process_csw_in()
1573 (void)pcd->pcd_write_pmc(cpu, adjri, pm, newvalue); in pmc_process_csw_in()
1576 if (PMC_TO_MODE(pm) == PMC_MODE_TS) in pmc_process_csw_in()
1577 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_process_csw_in()
1580 pm->pm_pcpu_state[cpu].pps_cpustate = 1; in pmc_process_csw_in()
1583 (void)pcd->pcd_start_pmc(cpu, adjri, pm); in pmc_process_csw_in()
1588 * switch-in actions. in pmc_process_csw_in()
1590 (void)(*md->pmd_switch_in)(pc, pp); in pmc_process_csw_in()
1601 struct pmc *pm; in pmc_process_csw_out() local
1626 p = td->td_proc; in pmc_process_csw_out()
1631 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ in pmc_process_csw_out()
1634 p->p_pid, p->p_comm, pp); in pmc_process_csw_out()
1651 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_csw_out()
1653 pm = NULL; in pmc_process_csw_out()
1654 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_csw_out()
1656 if (pm == NULL) /* nothing at this row index */ in pmc_process_csw_out()
1659 mode = PMC_TO_MODE(pm); in pmc_process_csw_out()
1663 KASSERT(PMC_TO_ROWINDEX(pm) == ri, in pmc_process_csw_out()
1665 __LINE__, PMC_TO_ROWINDEX(pm), ri)); in pmc_process_csw_out()
1669 * This two-step dance should avoid race conditions where in pmc_process_csw_out()
1670 * an interrupt re-enables the PMC after this code has in pmc_process_csw_out()
1673 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_process_csw_out()
1674 if (pm->pm_pcpu_state[cpu].pps_stalled == 0) in pmc_process_csw_out()
1675 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_process_csw_out()
1677 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_csw_out()
1678 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, in pmc_process_csw_out()
1679 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_csw_out()
1682 counter_u64_add(pm->pm_runcount, -1); in pmc_process_csw_out()
1688 if (pm->pm_state != PMC_STATE_DELETED && pp != NULL && in pmc_process_csw_out()
1689 pp->pp_pmcs[ri].pp_pmc != NULL) { in pmc_process_csw_out()
1690 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, in pmc_process_csw_out()
1691 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, in pmc_process_csw_out()
1692 pm, ri, pp->pp_pmcs[ri].pp_pmc)); in pmc_process_csw_out()
1693 KASSERT(pp->pp_refcnt > 0, in pmc_process_csw_out()
1695 pp->pp_refcnt)); in pmc_process_csw_out()
1697 (void)pcd->pcd_read_pmc(cpu, adjri, pm, &newvalue); in pmc_process_csw_out()
1711 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_process_csw_out()
1715 * per-thread counter in the descriptor. If not, in pmc_process_csw_out()
1716 * we will update the per-process counter. in pmc_process_csw_out()
1718 * TODO: Remove the per-process "safety net" in pmc_process_csw_out()
1723 pt->pt_pmcs[ri].pt_pmcval = newvalue; in pmc_process_csw_out()
1726 * For sampling process-virtual PMCs, in pmc_process_csw_out()
1735 * every time we use it.) in pmc_process_csw_out()
1737 pp->pp_pmcs[ri].pp_pmcval += newvalue; in pmc_process_csw_out()
1738 if (pp->pp_pmcs[ri].pp_pmcval > in pmc_process_csw_out()
1739 pm->pm_sc.pm_reloadcount) { in pmc_process_csw_out()
1740 pp->pp_pmcs[ri].pp_pmcval -= in pmc_process_csw_out()
1741 pm->pm_sc.pm_reloadcount; in pmc_process_csw_out()
1744 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_process_csw_out()
1746 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); in pmc_process_csw_out()
1752 * For counting process-virtual PMCs, in pmc_process_csw_out()
1763 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_process_csw_out()
1764 pm->pm_gv.pm_savedvalue += tmp; in pmc_process_csw_out()
1765 pp->pp_pmcs[ri].pp_pmcval += tmp; in pmc_process_csw_out()
1766 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_process_csw_out()
1768 if (pm->pm_flags & PMC_F_LOG_PROCCSW) in pmc_process_csw_out()
1769 pmclog_process_proccsw(pm, pp, tmp, td); in pmc_process_csw_out()
1774 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_process_csw_out()
1781 (void)(*md->pmd_switch_out)(pc, pp); in pmc_process_csw_out()
1794 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); in pmc_process_thread_add()
1807 pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); in pmc_process_thread_delete()
1820 pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame); in pmc_process_thread_userret()
1830 const struct pmc *pm; in pmc_process_mmap() local
1840 pmc_getfilename((struct vnode *)pkm->pm_file, &fullpath, &freepath); in pmc_process_mmap()
1842 pid = td->td_proc->p_pid; in pmc_process_mmap()
1845 /* Inform owners of all system-wide sampling PMCs. */ in pmc_process_mmap()
1847 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_process_mmap()
1848 pmclog_process_map_in(po, pid, pkm->pm_address, in pmc_process_mmap()
1852 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) in pmc_process_mmap()
1858 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_mmap()
1859 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_mmap()
1860 PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { in pmc_process_mmap()
1861 pmclog_process_map_in(pm->pm_owner, in pmc_process_mmap()
1862 pid, pkm->pm_address, fullpath); in pmc_process_mmap()
1878 const struct pmc *pm; in pmc_process_munmap() local
1884 pid = td->td_proc->p_pid; in pmc_process_munmap()
1888 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_process_munmap()
1889 pmclog_process_map_out(po, pid, pkm->pm_address, in pmc_process_munmap()
1890 pkm->pm_address + pkm->pm_size); in pmc_process_munmap()
1894 if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) in pmc_process_munmap()
1897 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_munmap()
1898 pm = pp->pp_pmcs[ri].pp_pmc; in pmc_process_munmap()
1899 if (pm != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { in pmc_process_munmap()
1900 pmclog_process_map_out(pm->pm_owner, pid, in pmc_process_munmap()
1901 pkm->pm_address, pkm->pm_address + pkm->pm_size); in pmc_process_munmap()
1910 pmc_log_kernel_mappings(struct pmc *pm) in pmc_log_kernel_mappings() argument
1916 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), in pmc_log_kernel_mappings()
1917 ("[pmc,%d] non-sampling PMC (%p) desires mapping information", in pmc_log_kernel_mappings()
1918 __LINE__, (void *) pm)); in pmc_log_kernel_mappings()
1920 po = pm->pm_owner; in pmc_log_kernel_mappings()
1921 if ((po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) != 0) in pmc_log_kernel_mappings()
1924 if (PMC_TO_MODE(pm) == PMC_MODE_SS) in pmc_log_kernel_mappings()
1925 pmc_process_allproc(pm); in pmc_log_kernel_mappings()
1931 for (km = kmbase; km->pm_file != NULL; km++) { in pmc_log_kernel_mappings()
1932 PMCDBG2(LOG,REG,1,"%s %p", (char *)km->pm_file, in pmc_log_kernel_mappings()
1933 (void *)km->pm_address); in pmc_log_kernel_mappings()
1934 pmclog_process_map_in(po, (pid_t)-1, km->pm_address, in pmc_log_kernel_mappings()
1935 km->pm_file); in pmc_log_kernel_mappings()
1939 po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE; in pmc_log_kernel_mappings()
1965 map = &vm->vm_map; in pmc_log_process_mappings()
1970 "NULL! pid=%d vm_map=%p\n", p->p_pid, map); in pmc_log_process_mappings()
1977 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in pmc_log_process_mappings()
1978 (entry->protection & VM_PROT_EXECUTE) == 0 || in pmc_log_process_mappings()
1979 entry->object.vm_object == NULL) { in pmc_log_process_mappings()
1983 obj = entry->object.vm_object; in pmc_log_process_mappings()
1987 * Walk the backing_object list to find the base (non-shadowed) in pmc_log_process_mappings()
1991 tobj = tobj->backing_object) { in pmc_log_process_mappings()
2005 "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj); in pmc_log_process_mappings()
2020 * don't emit redundant MAP-IN directives. in pmc_log_process_mappings()
2022 if (entry->start == last_end && vp == last_vp) { in pmc_log_process_mappings()
2023 last_end = entry->end; in pmc_log_process_mappings()
2038 start_addr = entry->start; in pmc_log_process_mappings()
2039 last_end = entry->end; in pmc_log_process_mappings()
2040 last_timestamp = map->timestamp; in pmc_log_process_mappings()
2055 pmclog_process_map_in(po, p->p_pid, start_addr, fullpath); in pmc_log_process_mappings()
2073 * &map->header, which would cause our loop to abort in pmc_log_process_mappings()
2079 * process exits, so there will always be a non-header in pmc_log_process_mappings()
2083 if (map->timestamp != last_timestamp) in pmc_log_process_mappings()
2084 vm_map_lookup_entry(map, last_end - 1, &entry); in pmc_log_process_mappings()
2112 if (!LIST_EMPTY(&p->p_children)) in pmc_log_all_process_mappings()
2113 p = LIST_FIRST(&p->p_children); in pmc_log_all_process_mappings()
2121 p = p->p_pptr; in pmc_log_all_process_mappings()
2133 "CSW-IN",
2134 "CSW-OUT",
2140 "CALLCHAIN-NMI",
2141 "CALLCHAIN-SOFT",
2143 "THR-CREATE",
2144 "THR-EXIT",
2145 "THR-USERRET",
2146 "THR-CREATE-LOG",
2147 "THR-EXIT-LOG",
2148 "PROC-CREATE-LOG"
2227 KASSERT(td->td_pinned == 1, in pmc_hook_handler()
2231 td->td_pflags &= ~TDP_CALLCHAIN; in pmc_hook_handler()
2245 KASSERT(td->td_pinned == 1, in pmc_hook_handler()
2250 td->td_pflags &= ~TDP_CALLCHAIN; in pmc_hook_handler()
2308 po->po_owner = p; in pmc_allocate_owner_descriptor()
2311 TAILQ_INIT(&po->po_logbuffers); in pmc_allocate_owner_descriptor()
2312 mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN); in pmc_allocate_owner_descriptor()
2314 PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p", in pmc_allocate_owner_descriptor()
2315 p, p->p_pid, p->p_comm, po); in pmc_allocate_owner_descriptor()
2324 PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", in pmc_destroy_owner_descriptor()
2325 po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); in pmc_destroy_owner_descriptor()
2327 mtx_destroy(&po->po_mtx); in pmc_destroy_owner_descriptor()
2344 pmc_threadfreelist_entries--; in pmc_thread_descriptor_pool_alloc()
2352 * Add a thread descriptor to the free pool. We use this instead of free()
2386 delta = pmc_threadfreelist_entries - pmc_threadfreelist_max; in pmc_thread_descriptor_pool_free_task()
2388 delta--; in pmc_thread_descriptor_pool_free_task()
2389 pmc_threadfreelist_entries--; in pmc_thread_descriptor_pool_free_task()
2436 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to in pmc_find_thread_descriptor()
2451 mtx_lock_spin(pp->pp_tdslock); in pmc_find_thread_descriptor()
2452 LIST_FOREACH(pt, &pp->pp_tds, pt_next) { in pmc_find_thread_descriptor()
2453 if (pt->pt_td == td) in pmc_find_thread_descriptor()
2463 pt->pt_td = td; in pmc_find_thread_descriptor()
2464 LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next); in pmc_find_thread_descriptor()
2467 mtx_unlock_spin(pp->pp_tdslock); in pmc_find_thread_descriptor()
2543 * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we in pmc_find_process_descriptor()
2547 ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * in pmc_find_process_descriptor()
2552 if (pp->pp_proc == p) in pmc_find_process_descriptor()
2560 ppnew->pp_proc = p; in pmc_find_process_descriptor()
2561 LIST_INIT(&ppnew->pp_tds); in pmc_find_process_descriptor()
2562 ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew); in pmc_find_process_descriptor()
2584 KASSERT(pp->pp_refcnt == 0, in pmc_remove_process_descriptor()
2586 __LINE__, pp, pp->pp_refcnt)); in pmc_remove_process_descriptor()
2601 while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) { in pmc_destroy_process_descriptor()
2623 if (po->po_owner == p) in pmc_find_owner_descriptor()
2627 PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> " in pmc_find_owner_descriptor()
2628 "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po); in pmc_find_owner_descriptor()
2642 pmc->pm_runcount = counter_u64_alloc(M_WAITOK); in pmc_allocate_pmc_descriptor()
2643 pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state) * mp_ncpus, in pmc_allocate_pmc_descriptor()
2645 PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc); in pmc_allocate_pmc_descriptor()
2654 pmc_destroy_pmc_descriptor(struct pmc *pm) in pmc_destroy_pmc_descriptor() argument
2657 KASSERT(pm->pm_state == PMC_STATE_DELETED || in pmc_destroy_pmc_descriptor()
2658 pm->pm_state == PMC_STATE_FREE, in pmc_destroy_pmc_descriptor()
2659 ("[pmc,%d] destroying non-deleted PMC", __LINE__)); in pmc_destroy_pmc_descriptor()
2660 KASSERT(LIST_EMPTY(&pm->pm_targets), in pmc_destroy_pmc_descriptor()
2662 KASSERT(pm->pm_owner == NULL, in pmc_destroy_pmc_descriptor()
2664 KASSERT(counter_u64_fetch(pm->pm_runcount) == 0, in pmc_destroy_pmc_descriptor()
2665 ("[pmc,%d] pmc has non-zero run count %ju", __LINE__, in pmc_destroy_pmc_descriptor()
2666 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_destroy_pmc_descriptor()
2668 counter_u64_free(pm->pm_runcount); in pmc_destroy_pmc_descriptor()
2669 free(pm->pm_pcpu_state, M_PMC); in pmc_destroy_pmc_descriptor()
2670 free(pm, M_PMC); in pmc_destroy_pmc_descriptor()
2674 pmc_wait_for_pmc_idle(struct pmc *pm) in pmc_wait_for_pmc_idle() argument
2685 pmclog_flush(pm->pm_owner, 1); in pmc_wait_for_pmc_idle()
2686 while (counter_u64_fetch(pm->pm_runcount) > 0) { in pmc_wait_for_pmc_idle()
2687 pmclog_flush(pm->pm_owner, 1); in pmc_wait_for_pmc_idle()
2689 maxloop--; in pmc_wait_for_pmc_idle()
2692 "pmc to be free", __LINE__, PMC_TO_ROWINDEX(pm), in pmc_wait_for_pmc_idle()
2693 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_wait_for_pmc_idle()
2702 * - detaches the PMC from hardware
2703 * - unlinks all target threads that were attached to it
2704 * - removes the PMC from its owner's list
2705 * - destroys the PMC private mutex
2711 pmc_release_pmc_descriptor(struct pmc *pm) in pmc_release_pmc_descriptor() argument
2723 KASSERT(pm, ("[pmc,%d] null pmc", __LINE__)); in pmc_release_pmc_descriptor()
2725 ri = PMC_TO_ROWINDEX(pm); in pmc_release_pmc_descriptor()
2727 mode = PMC_TO_MODE(pm); in pmc_release_pmc_descriptor()
2729 PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri, in pmc_release_pmc_descriptor()
2742 cpu = PMC_TO_CPU(pm); in pmc_release_pmc_descriptor()
2745 /* switch off non-stalled CPUs */ in pmc_release_pmc_descriptor()
2746 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_release_pmc_descriptor()
2747 if (pm->pm_state == PMC_STATE_RUNNING && in pmc_release_pmc_descriptor()
2748 pm->pm_pcpu_state[cpu].pps_stalled == 0) { in pmc_release_pmc_descriptor()
2750 phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; in pmc_release_pmc_descriptor()
2752 KASSERT(phw->phw_pmc == pm, in pmc_release_pmc_descriptor()
2753 ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", in pmc_release_pmc_descriptor()
2754 __LINE__, ri, phw->phw_pmc, pm)); in pmc_release_pmc_descriptor()
2758 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_release_pmc_descriptor()
2765 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_release_pmc_descriptor()
2769 if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) { in pmc_release_pmc_descriptor()
2770 po = pm->pm_owner; in pmc_release_pmc_descriptor()
2771 po->po_sscount--; in pmc_release_pmc_descriptor()
2772 if (po->po_sscount == 0) { in pmc_release_pmc_descriptor()
2778 pm->pm_state = PMC_STATE_DELETED; in pmc_release_pmc_descriptor()
2784 * per-cpu sample queues. Wait for the queue to drain. in pmc_release_pmc_descriptor()
2786 pmc_wait_for_pmc_idle(pm); in pmc_release_pmc_descriptor()
2798 pm->pm_state = PMC_STATE_DELETED; in pmc_release_pmc_descriptor()
2801 pmc_wait_for_pmc_idle(pm); in pmc_release_pmc_descriptor()
2806 * from this PMC. If a process-record's refcount falls to zero, in pmc_release_pmc_descriptor()
2807 * we remove it from the hash table. The module-wide SX lock in pmc_release_pmc_descriptor()
2810 LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { in pmc_release_pmc_descriptor()
2811 pp = ptgt->pt_process; in pmc_release_pmc_descriptor()
2812 pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */ in pmc_release_pmc_descriptor()
2814 PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt); in pmc_release_pmc_descriptor()
2820 if (pp->pp_refcnt == 0) { in pmc_release_pmc_descriptor()
2826 cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ in pmc_release_pmc_descriptor()
2832 (void)pcd->pcd_release_pmc(cpu, adjri, pm); in pmc_release_pmc_descriptor()
2837 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) in pmc_release_pmc_descriptor()
2843 if (pm->pm_owner != NULL) { in pmc_release_pmc_descriptor()
2844 LIST_REMOVE(pm, pm_next); in pmc_release_pmc_descriptor()
2845 pm->pm_owner = NULL; in pmc_release_pmc_descriptor()
2864 KASSERT(pmc->pm_owner == NULL, in pmc_register_owner()
2866 pmc->pm_owner = po; in pmc_register_owner()
2868 LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next); in pmc_register_owner()
2871 p->p_flag |= P_HWPMC; in pmc_register_owner()
2874 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_register_owner()
2877 PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p", in pmc_register_owner()
2900 * - the current process is already being profiled by a PMC at index 'ri',
2902 * - the current process has already allocated a PMC at index 'ri'
2908 struct pmc *pm; in pmc_can_allocate_rowindex() local
2913 PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d " in pmc_can_allocate_rowindex()
2914 "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu); in pmc_can_allocate_rowindex()
2917 * We shouldn't have already allocated a process-mode PMC at in pmc_can_allocate_rowindex()
2920 * We shouldn't have allocated a system-wide PMC on the same in pmc_can_allocate_rowindex()
2924 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { in pmc_can_allocate_rowindex()
2925 if (PMC_TO_ROWINDEX(pm) == ri) { in pmc_can_allocate_rowindex()
2926 mode = PMC_TO_MODE(pm); in pmc_can_allocate_rowindex()
2930 PMC_TO_CPU(pm) == cpu) in pmc_can_allocate_rowindex()
2941 if (pp->pp_pmcs[ri].pp_pmc != NULL) in pmc_can_allocate_rowindex()
2944 PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok", in pmc_can_allocate_rowindex()
2945 p, p->p_pid, p->p_comm, ri); in pmc_can_allocate_rowindex()
2960 PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode); in pmc_can_allocate_row()
2970 * Expected disposition Row-disposition Result in pmc_can_allocate_row()
2985 PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode); in pmc_can_allocate_row()
2995 struct pmc *pm; in pmc_find_pmc_descriptor_in_process() local
2997 KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc, in pmc_find_pmc_descriptor_in_process()
2999 PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc)); in pmc_find_pmc_descriptor_in_process()
3001 LIST_FOREACH(pm, &po->po_pmcs, pm_next) { in pmc_find_pmc_descriptor_in_process()
3002 if (pm->pm_id == pmcid) in pmc_find_pmc_descriptor_in_process()
3003 return (pm); in pmc_find_pmc_descriptor_in_process()
3012 struct pmc *pm, *opm; in pmc_find_pmc() local
3016 PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid); in pmc_find_pmc()
3017 if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc) in pmc_find_pmc()
3020 if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) { in pmc_find_pmc()
3026 pp = pmc_find_process_descriptor(curthread->td_proc, in pmc_find_pmc()
3030 opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc; in pmc_find_pmc()
3033 if ((opm->pm_flags & in pmc_find_pmc()
3038 po = opm->pm_owner; in pmc_find_pmc()
3041 if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL) in pmc_find_pmc()
3044 PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm); in pmc_find_pmc()
3046 *pmc = pm; in pmc_find_pmc()
3054 pmc_start(struct pmc *pm) in pmc_start() argument
3063 KASSERT(pm != NULL, in pmc_start()
3064 ("[pmc,%d] null pm", __LINE__)); in pmc_start()
3066 mode = PMC_TO_MODE(pm); in pmc_start()
3067 ri = PMC_TO_ROWINDEX(pm); in pmc_start()
3071 po = pm->pm_owner; in pmc_start()
3073 PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri); in pmc_start()
3075 po = pm->pm_owner; in pmc_start()
3081 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && in pmc_start()
3082 (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) in pmc_start()
3089 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) in pmc_start()
3090 pmc_log_kernel_mappings(pm); in pmc_start()
3097 if (LIST_EMPTY(&pm->pm_targets)) { in pmc_start()
3098 error = (pm->pm_flags & PMC_F_ATTACH_DONE) != 0 ? in pmc_start()
3099 ESRCH : pmc_attach_process(po->po_owner, pm); in pmc_start()
3107 pm->pm_state = PMC_STATE_RUNNING; in pmc_start()
3108 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) != 0) in pmc_start()
3116 * A system-wide PMC. in pmc_start()
3118 * Add the owner to the global list if this is a system-wide in pmc_start()
3127 if (po->po_logprocmaps == 0) { in pmc_start()
3129 po->po_logprocmaps = 1; in pmc_start()
3131 po->po_sscount++; in pmc_start()
3132 if (po->po_sscount == 1) { in pmc_start()
3144 cpu = PMC_TO_CPU(pm); in pmc_start()
3153 pm->pm_state = PMC_STATE_RUNNING; in pmc_start()
3156 v = PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount : in pmc_start()
3157 pm->pm_sc.pm_initial; in pmc_start()
3158 if ((error = pcd->pcd_write_pmc(cpu, adjri, pm, v)) == 0) { in pmc_start()
3161 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_start()
3164 pm->pm_pcpu_state[cpu].pps_cpustate = 1; in pmc_start()
3165 error = pcd->pcd_start_pmc(cpu, adjri, pm); in pmc_start()
3177 pmc_stop(struct pmc *pm) in pmc_stop() argument
3184 KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__)); in pmc_stop()
3186 PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, PMC_TO_MODE(pm), in pmc_stop()
3187 PMC_TO_ROWINDEX(pm)); in pmc_stop()
3189 pm->pm_state = PMC_STATE_STOPPED; in pmc_stop()
3192 * If the PMC is a virtual mode one, changing the state to non-RUNNING in pmc_stop()
3198 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) in pmc_stop()
3202 * A system-mode PMC. Move to the CPU associated with this PMC, and in pmc_stop()
3209 cpu = PMC_TO_CPU(pm); in pmc_stop()
3217 ri = PMC_TO_ROWINDEX(pm); in pmc_stop()
3220 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_stop()
3222 if ((error = pcd->pcd_stop_pmc(cpu, adjri, pm)) == 0) { in pmc_stop()
3223 error = pcd->pcd_read_pmc(cpu, adjri, pm, in pmc_stop()
3224 &pm->pm_sc.pm_initial); in pmc_stop()
3231 po = pm->pm_owner; in pmc_stop()
3232 if (PMC_TO_MODE(pm) == PMC_MODE_SS) { in pmc_stop()
3233 po->po_sscount--; in pmc_stop()
3234 if (po->po_sscount == 0) { in pmc_stop()
3250 for (n = 0; n < md->pmd_nclass; n++) { in pmc_class_to_classdep()
3251 if (md->pmd_classdep[n].pcd_class == class) in pmc_class_to_classdep()
3252 return (&md->pmd_classdep[n]); in pmc_class_to_classdep()
3301 class = pa->pm_class; in pmc_do_op_pmcallocate()
3302 caps = pa->pm_caps; in pmc_do_op_pmcallocate()
3303 flags = pa->pm_flags; in pmc_do_op_pmcallocate()
3304 mode = pa->pm_mode; in pmc_do_op_pmcallocate()
3305 cpu = pa->pm_cpu; in pmc_do_op_pmcallocate()
3307 p = td->td_proc; in pmc_do_op_pmcallocate()
3320 * System mode PMCs need to specify a non-default CPU. in pmc_do_op_pmcallocate()
3333 * Refuse an allocation for a system-wide PMC if this process has been in pmc_do_op_pmcallocate()
3334 * jailed, or if this process lacks super-user credentials and the in pmc_do_op_pmcallocate()
3338 if (jailed(td->td_ucred)) in pmc_do_op_pmcallocate()
3382 if ((pcd->pcd_caps & caps) != caps) in pmc_do_op_pmcallocate()
3385 PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa->pm_ev, in pmc_do_op_pmcallocate()
3389 pmc->pm_id = PMC_ID_MAKE_ID(cpu, pa->pm_mode, class, PMC_ID_INVALID); in pmc_do_op_pmcallocate()
3390 pmc->pm_event = pa->pm_ev; in pmc_do_op_pmcallocate()
3391 pmc->pm_state = PMC_STATE_FREE; in pmc_do_op_pmcallocate()
3392 pmc->pm_caps = caps; in pmc_do_op_pmcallocate()
3393 pmc->pm_flags = flags; in pmc_do_op_pmcallocate()
3401 if (pa->pm_count < MAX(1, pmc_mincount)) in pmc_do_op_pmcallocate()
3403 "rate %ju - setting to %u\n", in pmc_do_op_pmcallocate()
3404 (uintmax_t)pa->pm_count, in pmc_do_op_pmcallocate()
3406 pmc->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), in pmc_do_op_pmcallocate()
3407 pa->pm_count); in pmc_do_op_pmcallocate()
3409 pmc->pm_sc.pm_initial = pa->pm_count; in pmc_do_op_pmcallocate()
3415 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \ in pmc_do_op_pmcallocate()
3418 (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL) in pmc_do_op_pmcallocate()
3422 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { in pmc_do_op_pmcallocate()
3432 if (pcd->pcd_allocate_pmc(cpu, adjri, pmc, pa) == 0) { in pmc_do_op_pmcallocate()
3439 for (n = pcd->pcd_ri; n < md->pmd_npmc; n++) { in pmc_do_op_pmcallocate()
3446 if (pcd->pcd_allocate_pmc(td->td_oncpu, adjri, pmc, in pmc_do_op_pmcallocate()
3459 if (n == md->pmd_npmc) { in pmc_do_op_pmcallocate()
3465 pmc->pm_id = PMC_ID_MAKE_ID(cpu, mode, class, n); in pmc_do_op_pmcallocate()
3467 PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x", in pmc_do_op_pmcallocate()
3468 pmc->pm_event, class, mode, n, pmc->pm_id); in pmc_do_op_pmcallocate()
3471 if ((pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) != 0) in pmc_do_op_pmcallocate()
3472 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_do_op_pmcallocate()
3476 pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; in pmc_do_op_pmcallocate()
3485 phw = pmc_pcpu[cpu]->pc_hwpmcs[n]; in pmc_do_op_pmcallocate()
3488 if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 || in pmc_do_op_pmcallocate()
3489 (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) { in pmc_do_op_pmcallocate()
3490 (void)pcd->pcd_release_pmc(cpu, adjri, pmc); in pmc_do_op_pmcallocate()
3499 pmc->pm_state = PMC_STATE_ALLOCATED; in pmc_do_op_pmcallocate()
3500 pmc->pm_class = class; in pmc_do_op_pmcallocate()
3523 pa->pm_pmcid = pmc->pm_id; in pmc_do_op_pmcallocate()
3533 struct pmc *pm; in pmc_do_op_pmcattach() local
3542 a.pm_pid = td->td_proc->p_pid; in pmc_do_op_pmcattach()
3545 error = pmc_find_pmc(a.pm_pmc, &pm); in pmc_do_op_pmcattach()
3549 if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) in pmc_do_op_pmcattach()
3553 if (pm->pm_state == PMC_STATE_RUNNING) { in pmc_do_op_pmcattach()
3555 } else if (pm->pm_state != PMC_STATE_ALLOCATED && in pmc_do_op_pmcattach()
3556 pm->pm_state != PMC_STATE_STOPPED) { in pmc_do_op_pmcattach()
3567 if ((p->p_flag & P_WEXIT) != 0) { in pmc_do_op_pmcattach()
3580 error = pmc_attach_process(p, pm); in pmc_do_op_pmcattach()
3591 struct pmc *pm; in pmc_do_op_pmcdetach() local
3598 a.pm_pid = td->td_proc->p_pid; in pmc_do_op_pmcdetach()
3600 error = pmc_find_pmc(a.pm_pmc, &pm); in pmc_do_op_pmcdetach()
3611 if ((p->p_flag & P_WEXIT) != 0) { in pmc_do_op_pmcdetach()
3619 error = pmc_detach_process(p, pm); in pmc_do_op_pmcdetach()
3631 struct pmc *pm; in pmc_do_op_pmcrelease() local
3637 * Use pmc_release_pmc_descriptor() to switch off the in pmc_do_op_pmcrelease()
3646 error = pmc_find_pmc(pmcid, &pm); in pmc_do_op_pmcrelease()
3650 po = pm->pm_owner; in pmc_do_op_pmcrelease()
3651 pmc_release_pmc_descriptor(pm); in pmc_do_op_pmcrelease()
3653 pmc_destroy_pmc_descriptor(pm); in pmc_do_op_pmcrelease()
3666 struct pmc *pm; in pmc_do_op_pmcrw() local
3670 PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw->pm_pmcid, prw->pm_flags); in pmc_do_op_pmcrw()
3673 if ((prw->pm_flags & (PMC_F_OLDVALUE | PMC_F_NEWVALUE)) == 0) in pmc_do_op_pmcrw()
3677 error = pmc_find_pmc(prw->pm_pmcid, &pm); in pmc_do_op_pmcrw()
3682 if (pm->pm_state != PMC_STATE_ALLOCATED && in pmc_do_op_pmcrw()
3683 pm->pm_state != PMC_STATE_STOPPED && in pmc_do_op_pmcrw()
3684 pm->pm_state != PMC_STATE_RUNNING) in pmc_do_op_pmcrw()
3688 if (pm->pm_state == PMC_STATE_RUNNING && in pmc_do_op_pmcrw()
3689 (prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3692 if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) { in pmc_do_op_pmcrw()
3696 * get an upto-date reading from hardware for a READ. Writes in pmc_do_op_pmcrw()
3704 ri = PMC_TO_ROWINDEX(pm); in pmc_do_op_pmcrw()
3707 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_do_op_pmcrw()
3708 cpu = curthread->td_oncpu; in pmc_do_op_pmcrw()
3710 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) { in pmc_do_op_pmcrw()
3711 if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) && in pmc_do_op_pmcrw()
3712 (pm->pm_state == PMC_STATE_RUNNING)) { in pmc_do_op_pmcrw()
3713 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, in pmc_do_op_pmcrw()
3716 *valp = pm->pm_gv.pm_savedvalue; in pmc_do_op_pmcrw()
3720 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3721 pm->pm_gv.pm_savedvalue = prw->pm_value; in pmc_do_op_pmcrw()
3723 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_do_op_pmcrw()
3725 cpu = PMC_TO_CPU(pm); in pmc_do_op_pmcrw()
3726 ri = PMC_TO_ROWINDEX(pm); in pmc_do_op_pmcrw()
3738 if ((prw->pm_flags & PMC_F_OLDVALUE) != 0) in pmc_do_op_pmcrw()
3739 error = (*pcd->pcd_read_pmc)(cpu, adjri, pm, valp); in pmc_do_op_pmcrw()
3742 if (error == 0 && (prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3743 error = (*pcd->pcd_write_pmc)(cpu, adjri, pm, in pmc_do_op_pmcrw()
3744 prw->pm_value); in pmc_do_op_pmcrw()
3753 if ((prw->pm_flags & PMC_F_NEWVALUE) != 0) in pmc_do_op_pmcrw()
3754 PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx", in pmc_do_op_pmcrw()
3755 ri, prw->pm_value, *valp); in pmc_do_op_pmcrw()
3757 PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, *valp); in pmc_do_op_pmcrw()
3772 op = c->pmop_code; in pmc_syscall_handler()
3773 arg = c->pmop_data; in pmc_syscall_handler()
3784 * Instead, pre-create the process and ignite the loop in pmc_syscall_handler()
3813 struct pmc *pm; in pmc_syscall_handler() local
3830 p = td->td_proc; in pmc_syscall_handler()
3842 * de-configure it. in pmc_syscall_handler()
3848 } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_syscall_handler()
3852 LIST_FOREACH(pm, &po->po_pmcs, pm_next) in pmc_syscall_handler()
3853 if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && in pmc_syscall_handler()
3854 pm->pm_state == PMC_STATE_RUNNING) in pmc_syscall_handler()
3855 pmc_stop(pm); in pmc_syscall_handler()
3875 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
3894 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
3915 gci.pm_cputype = md->pmd_cputype; in pmc_syscall_handler()
3917 gci.pm_npmc = md->pmd_npmc; in pmc_syscall_handler()
3918 gci.pm_nclass = md->pmd_nclass; in pmc_syscall_handler()
3920 pcd = md->pmd_classdep; in pmc_syscall_handler()
3921 for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) { in pmc_syscall_handler()
3922 pci->pm_caps = pcd->pcd_caps; in pmc_syscall_handler()
3923 pci->pm_class = pcd->pcd_class; in pmc_syscall_handler()
3924 pci->pm_width = pcd->pcd_width; in pmc_syscall_handler()
3925 pci->pm_num = pcd->pcd_num; in pmc_syscall_handler()
3947 if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0) in pmc_syscall_handler()
3961 bcopy(&ps->ps_ev, &dev, sizeof(dev)); in pmc_syscall_handler()
3965 &gei->pm_events[nevent], in pmc_syscall_handler()
3974 error = copyout(&nevent, &gei->pm_nevent, in pmc_syscall_handler()
4031 struct pmc *pm; in pmc_syscall_handler() local
4044 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) in pmc_syscall_handler()
4061 npmc = md->pmd_npmc; in pmc_syscall_handler()
4068 for (n = 0; n < md->pmd_npmc; n++, p++) { in pmc_syscall_handler()
4075 if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0) in pmc_syscall_handler()
4079 p->pm_rowdisp = PMC_DISP_STANDALONE; in pmc_syscall_handler()
4081 p->pm_rowdisp = PMC_DISP_THREAD; in pmc_syscall_handler()
4083 p->pm_rowdisp = PMC_DISP_FREE; in pmc_syscall_handler()
4085 p->pm_ownerpid = -1; in pmc_syscall_handler()
4087 if (pm == NULL) /* no PMC associated */ in pmc_syscall_handler()
4090 po = pm->pm_owner; in pmc_syscall_handler()
4092 KASSERT(po->po_owner != NULL, in pmc_syscall_handler()
4096 p->pm_ownerpid = po->po_owner->p_pid; in pmc_syscall_handler()
4097 p->pm_mode = PMC_TO_MODE(pm); in pmc_syscall_handler()
4098 p->pm_event = pm->pm_event; in pmc_syscall_handler()
4099 p->pm_flags = pm->pm_flags; in pmc_syscall_handler()
4101 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) in pmc_syscall_handler()
4102 p->pm_reloadcount = in pmc_syscall_handler()
4103 pm->pm_sc.pm_reloadcount; in pmc_syscall_handler()
4110 error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size); in pmc_syscall_handler()
4164 if (ri < 0 || ri >= (int) md->pmd_npmc) { in pmc_syscall_handler()
4170 * We can't disable a PMC with a row-index allocated in pmc_syscall_handler()
4182 * in system-wide mode. in pmc_syscall_handler()
4189 phw = pc->pc_hwpmcs[ri]; in pmc_syscall_handler()
4195 if (phw->phw_pmc == NULL) { in pmc_syscall_handler()
4197 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) { in pmc_syscall_handler()
4198 phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED; in pmc_syscall_handler()
4201 (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) { in pmc_syscall_handler()
4202 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; in pmc_syscall_handler()
4205 /* other cases are a no-op */ in pmc_syscall_handler()
4266 * 'pmc_id'. This allows processes to directly use RDPMC in pmc_syscall_handler()
4274 struct pmc *pm; in pmc_syscall_handler() local
4284 if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0) in pmc_syscall_handler()
4298 if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) || in pmc_syscall_handler()
4299 (pm->pm_flags & PMC_F_DESCENDANTS)) { in pmc_syscall_handler()
4305 * It only makes sense to use a RDPMC (or its in pmc_syscall_handler()
4306 * equivalent instruction on non-x86 architectures) on in pmc_syscall_handler()
4309 * one process attached to it -- its owner. in pmc_syscall_handler()
4312 if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL || in pmc_syscall_handler()
4314 pt->pt_process->pp_proc != pm->pm_owner->po_owner) { in pmc_syscall_handler()
4319 ri = PMC_TO_ROWINDEX(pm); in pmc_syscall_handler()
4323 if (pcd->pcd_get_msr == NULL) { in pmc_syscall_handler()
4328 if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0) in pmc_syscall_handler()
4339 pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS; in pmc_syscall_handler()
4382 error = copyout(&oldvalue, &pprw->pm_value, in pmc_syscall_handler()
4396 struct pmc *pm; in pmc_syscall_handler() local
4404 if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0) in pmc_syscall_handler()
4407 if (pm->pm_state == PMC_STATE_RUNNING) { in pmc_syscall_handler()
4412 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { in pmc_syscall_handler()
4419 "rate %ju - setting to %u\n", in pmc_syscall_handler()
4422 pm->pm_sc.pm_reloadcount = MAX(MAX(1, pmc_mincount), in pmc_syscall_handler()
4425 pm->pm_sc.pm_initial = sc.pm_count; in pmc_syscall_handler()
4437 struct pmc *pm; in pmc_syscall_handler() local
4447 if ((error = pmc_find_pmc(pmcid, &pm)) != 0) in pmc_syscall_handler()
4450 KASSERT(pmcid == pm->pm_id, in pmc_syscall_handler()
4452 pm->pm_id, pmcid)); in pmc_syscall_handler()
4454 if (pm->pm_state == PMC_STATE_RUNNING) /* already running */ in pmc_syscall_handler()
4456 else if (pm->pm_state != PMC_STATE_STOPPED && in pmc_syscall_handler()
4457 pm->pm_state != PMC_STATE_ALLOCATED) { in pmc_syscall_handler()
4462 error = pmc_start(pm); in pmc_syscall_handler()
4474 struct pmc *pm; in pmc_syscall_handler() local
4489 if ((error = pmc_find_pmc(pmcid, &pm)) != 0) in pmc_syscall_handler()
4492 KASSERT(pmcid == pm->pm_id, in pmc_syscall_handler()
4494 pm->pm_id, pmcid)); in pmc_syscall_handler()
4496 if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */ in pmc_syscall_handler()
4498 else if (pm->pm_state != PMC_STATE_RUNNING) { in pmc_syscall_handler()
4503 error = pmc_stop(pm); in pmc_syscall_handler()
4522 if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { in pmc_syscall_handler()
4527 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { in pmc_syscall_handler()
4572 if ((td->td_pflags & TDP_CALLCHAIN) != 0) in pmc_post_callchain_callback()
4577 * `td->td_pflags' will be safe to touch because this thread in pmc_post_callchain_callback()
4580 td->td_pflags |= TDP_CALLCHAIN; in pmc_post_callchain_callback()
4592 * Find a free slot in the per-cpu array of samples and capture the
4598 * use any of the locking primitives supplied by the OS.
4601 pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf) in pmc_add_sample() argument
4615 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_add_sample()
4618 if (psb->ps_considx != psb->ps_prodidx && in pmc_add_sample()
4619 ps->ps_nsamples) { /* in use, reader hasn't caught up */ in pmc_add_sample()
4620 pm->pm_pcpu_state[cpu].pps_stalled = 1; in pmc_add_sample()
4622 PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", in pmc_add_sample()
4623 cpu, pm, tf, inuserspace, in pmc_add_sample()
4624 (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_add_sample()
4625 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_add_sample()
4632 PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, tf, in pmc_add_sample()
4633 inuserspace, (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_add_sample()
4634 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_add_sample()
4637 ps->ps_pmc = pm; in pmc_add_sample()
4638 ps->ps_td = td; in pmc_add_sample()
4639 ps->ps_pid = td->td_proc->p_pid; in pmc_add_sample()
4640 ps->ps_tid = td->td_tid; in pmc_add_sample()
4641 ps->ps_tsc = pmc_rdtsc(); in pmc_add_sample()
4642 ps->ps_ticks = ticks; in pmc_add_sample()
4643 ps->ps_cpu = cpu; in pmc_add_sample()
4644 ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0; in pmc_add_sample()
4646 callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ? in pmc_add_sample()
4649 MPASS(ps->ps_pc != NULL); in pmc_add_sample()
4651 ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf); in pmc_add_sample()
4658 callchaindepth = pmc_save_kernel_callchain(ps->ps_pc, in pmc_add_sample()
4666 ps->ps_nsamples = callchaindepth; /* mark entry as in-use */ in pmc_add_sample()
4668 ps->ps_nsamples_actual = callchaindepth; in pmc_add_sample()
4669 ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING; in pmc_add_sample()
4672 KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, in pmc_add_sample()
4673 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, in pmc_add_sample()
4674 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_add_sample()
4676 counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */ in pmc_add_sample()
4678 psb->ps_prodidx++; in pmc_add_sample()
4690 * This function may be called from an NMI handler. It cannot use any of the
4694 pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf) in pmc_process_interrupt() argument
4699 if ((pm->pm_flags & PMC_F_USERCALLCHAIN) && in pmc_process_interrupt()
4700 (td->td_proc->p_flag & P_KPROC) == 0 && !TRAPF_USERMODE(tf)) { in pmc_process_interrupt()
4701 atomic_add_int(&td->td_pmcpend, 1); in pmc_process_interrupt()
4702 return (pmc_add_sample(PMC_UR, pm, tf)); in pmc_process_interrupt()
4704 return (pmc_add_sample(ring, pm, tf)); in pmc_process_interrupt()
4715 struct pmc *pm; in pmc_capture_user_callchain() local
4723 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_capture_user_callchain()
4729 KASSERT(td->td_pflags & TDP_CALLCHAIN, in pmc_capture_user_callchain()
4734 nrecords = atomic_readandclear_32(&td->td_pmcpend); in pmc_capture_user_callchain()
4736 for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx; in pmc_capture_user_callchain()
4745 if (ps->ps_nsamples == PMC_SAMPLE_FREE) { in pmc_capture_user_callchain()
4749 if (ps->ps_td != td || in pmc_capture_user_callchain()
4750 ps->ps_nsamples != PMC_USER_CALLCHAIN_PENDING || in pmc_capture_user_callchain()
4751 ps->ps_pmc->pm_state != PMC_STATE_RUNNING) in pmc_capture_user_callchain()
4754 KASSERT(ps->ps_cpu == cpu, in pmc_capture_user_callchain()
4756 ps->ps_cpu, PCPU_GET(cpuid))); in pmc_capture_user_callchain()
4758 pm = ps->ps_pmc; in pmc_capture_user_callchain()
4759 KASSERT(pm->pm_flags & PMC_F_CALLCHAIN, in pmc_capture_user_callchain()
4762 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_capture_user_callchain()
4764 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_capture_user_callchain()
4767 nsamples = ps->ps_nsamples_actual; in pmc_capture_user_callchain()
4776 if (__predict_true(nsamples < pmc_callchaindepth - 1)) in pmc_capture_user_callchain()
4777 nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples, in pmc_capture_user_callchain()
4778 pmc_callchaindepth - nsamples - 1, tf); in pmc_capture_user_callchain()
4790 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { in pmc_capture_user_callchain()
4791 ps->ps_nsamples = nsamples; in pmc_capture_user_callchain()
4797 counter_u64_add(pm->pm_runcount, -1); in pmc_capture_user_callchain()
4800 if (nrecords-- == 1) in pmc_capture_user_callchain()
4803 if (__predict_false(ring == PMC_UR && td->td_pmcpend)) { in pmc_capture_user_callchain()
4809 td->td_pmcpend = 0; in pmc_capture_user_callchain()
4813 if ((ticks - start_ticks) > hz) in pmc_capture_user_callchain()
4814 log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks)); in pmc_capture_user_callchain()
4826 struct pmc *pm; in pmc_process_samples() local
4839 psb = pmc_pcpu[cpu]->pc_sb[ring]; in pmc_process_samples()
4840 delta = psb->ps_prodidx - psb->ps_considx; in pmc_process_samples()
4842 MPASS(psb->ps_considx <= psb->ps_prodidx); in pmc_process_samples()
4843 for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) { in pmc_process_samples()
4846 if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE)) in pmc_process_samples()
4849 /* skip non-running samples */ in pmc_process_samples()
4850 pm = ps->ps_pmc; in pmc_process_samples()
4851 if (pm->pm_state != PMC_STATE_RUNNING) in pmc_process_samples()
4854 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_samples()
4855 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, in pmc_process_samples()
4856 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_samples()
4857 KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), in pmc_process_samples()
4858 ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__, in pmc_process_samples()
4859 pm, PMC_TO_MODE(pm))); in pmc_process_samples()
4861 po = pm->pm_owner; in pmc_process_samples()
4864 if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { in pmc_process_samples()
4870 if (ticks - ps->ps_ticks > 1) { in pmc_process_samples()
4884 PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu, in pmc_process_samples()
4885 pm, ps->ps_nsamples, ps->ps_flags, in pmc_process_samples()
4886 (int)(psb->ps_prodidx & pmc_sample_mask), in pmc_process_samples()
4887 (int)(psb->ps_considx & pmc_sample_mask)); in pmc_process_samples()
4890 * If this is a process-mode PMC that is attached to in pmc_process_samples()
4892 * profiling statistics like timer-based profiling in pmc_process_samples()
4895 * Otherwise, this is either a sampling-mode PMC that in pmc_process_samples()
4897 * or a system-wide sampling PMC. Dispatch a log in pmc_process_samples()
4900 if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) { in pmc_process_samples()
4901 if (ps->ps_flags & PMC_CC_F_USERSPACE) { in pmc_process_samples()
4902 td = FIRST_THREAD_IN_PROC(po->po_owner); in pmc_process_samples()
4903 addupc_intr(td, ps->ps_pc[0], 1); in pmc_process_samples()
4906 pmclog_process_callchain(pm, ps); in pmc_process_samples()
4909 ps->ps_nsamples = 0; /* mark entry as free */ in pmc_process_samples()
4910 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_samples()
4911 ("[pmc,%d] pm=%p runcount %ju", __LINE__, pm, in pmc_process_samples()
4912 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_samples()
4914 counter_u64_add(pm->pm_runcount, -1); in pmc_process_samples()
4919 /* Do not re-enable stalled PMCs if we failed to process any samples */ in pmc_process_samples()
4930 for (n = 0; n < md->pmd_npmc; n++) { in pmc_process_samples()
4934 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_samples()
4936 if (pm == NULL || /* !cfg'ed */ in pmc_process_samples()
4937 pm->pm_state != PMC_STATE_RUNNING || /* !active */ in pmc_process_samples()
4938 !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */ in pmc_process_samples()
4939 !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */ in pmc_process_samples()
4940 !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */ in pmc_process_samples()
4943 pm->pm_pcpu_state[cpu].pps_stalled = 0; in pmc_process_samples()
4944 (void)(*pcd->pcd_start_pmc)(cpu, adjri, pm); in pmc_process_samples()
4970 struct pmc *pm; in pmc_process_exit() local
4979 is_using_hwpmcs = (p->p_flag & P_HWPMC) != 0; in pmc_process_exit()
4987 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_exit()
4988 pmclog_process_sysexit(po, p->p_pid); in pmc_process_exit()
4993 PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid, in pmc_process_exit()
4994 p->p_comm); in pmc_process_exit()
5009 PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid, in pmc_process_exit()
5010 p->p_comm); in pmc_process_exit()
5014 cpu = curthread->td_oncpu; in pmc_process_exit()
5022 PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp); in pmc_process_exit()
5031 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exit()
5036 pm = NULL; in pmc_process_exit()
5039 (void)(*pcd->pcd_get_config)(cpu, adjri, &pm); in pmc_process_exit()
5041 PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm); in pmc_process_exit()
5043 if (pm == NULL || !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) in pmc_process_exit()
5046 PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p state=%d", ri, in pmc_process_exit()
5047 pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state); in pmc_process_exit()
5049 KASSERT(PMC_TO_ROWINDEX(pm) == ri, in pmc_process_exit()
5051 PMC_TO_ROWINDEX(pm), ri)); in pmc_process_exit()
5052 KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, in pmc_process_exit()
5053 ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, in pmc_process_exit()
5054 pp->pp_pmcs[ri].pp_pmc)); in pmc_process_exit()
5055 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_exit()
5057 (uintmax_t)counter_u64_fetch(pm->pm_runcount))); in pmc_process_exit()
5061 * two-step dance should avoid race conditions where an in pmc_process_exit()
5062 * interrupt re-enables the PMC after this code has already in pmc_process_exit()
5065 if (pm->pm_pcpu_state[cpu].pps_cpustate) { in pmc_process_exit()
5066 pm->pm_pcpu_state[cpu].pps_cpustate = 0; in pmc_process_exit()
5067 if (!pm->pm_pcpu_state[cpu].pps_stalled) { in pmc_process_exit()
5068 (void)pcd->pcd_stop_pmc(cpu, adjri, pm); in pmc_process_exit()
5070 if (PMC_TO_MODE(pm) == PMC_MODE_TC) { in pmc_process_exit()
5071 pcd->pcd_read_pmc(cpu, adjri, pm, in pmc_process_exit()
5073 tmp = newvalue - PMC_PCPU_SAVED(cpu, ri); in pmc_process_exit()
5075 mtx_pool_lock_spin(pmc_mtxpool, pm); in pmc_process_exit()
5076 pm->pm_gv.pm_savedvalue += tmp; in pmc_process_exit()
5077 pp->pp_pmcs[ri].pp_pmcval += tmp; in pmc_process_exit()
5078 mtx_pool_unlock_spin(pmc_mtxpool, pm); in pmc_process_exit()
5083 KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, in pmc_process_exit()
5086 counter_u64_add(pm->pm_runcount, -1); in pmc_process_exit()
5087 (void)pcd->pcd_config_pmc(cpu, adjri, NULL); in pmc_process_exit()
5093 (void)md->pmd_switch_out(pmc_pcpu[cpu], pp); in pmc_process_exit()
5095 critical_exit(); /* ok to be pre-empted now */ in pmc_process_exit()
5103 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_exit()
5104 if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { in pmc_process_exit()
5105 if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) != 0 && in pmc_process_exit()
5106 PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) { in pmc_process_exit()
5107 pmclog_process_procexit(pm, pp); in pmc_process_exit()
5109 pmc_unlink_target_process(pm, pp); in pmc_process_exit()
5119 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_exit()
5138 struct pmc *pm; in pmc_process_fork() local
5145 is_using_hwpmcs = (p1->p_flag & P_HWPMC) != 0; in pmc_process_fork()
5149 * If there are system-wide sampling PMCs active, we need to in pmc_process_fork()
5154 if (po->po_flags & PMC_PO_OWNS_LOGFILE) { in pmc_process_fork()
5155 pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); in pmc_process_fork()
5165 PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1, in pmc_process_fork()
5166 p1->p_pid, p1->p_comm, newproc); in pmc_process_fork()
5169 * If the parent process (curthread->td_proc) is a in pmc_process_fork()
5174 ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE); in pmc_process_fork()
5179 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_fork()
5180 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_fork()
5181 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { in pmc_process_fork()
5193 newproc->p_flag |= P_HWPMC; in pmc_process_fork()
5209 for (ri = 0; ri < md->pmd_npmc; ri++) { in pmc_process_fork()
5210 if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && in pmc_process_fork()
5211 (pm->pm_flags & PMC_F_DESCENDANTS) != 0) { in pmc_process_fork()
5212 pmc_link_target_process(pm, ppnew); in pmc_process_fork()
5213 po = pm->pm_owner; in pmc_process_fork()
5214 if (po->po_sscount == 0 && in pmc_process_fork()
5215 (po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_process_fork()
5216 pmclog_process_procfork(po, p1->p_pid, in pmc_process_fork()
5217 newproc->p_pid); in pmc_process_fork()
5233 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_threadcreate()
5246 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_threadexit()
5259 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) in pmc_process_proccreate()
5266 pmc_process_allproc(struct pmc *pm) in pmc_process_allproc() argument
5272 po = pm->pm_owner; in pmc_process_allproc()
5273 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) in pmc_process_allproc()
5298 if (po->po_flags & PMC_PO_OWNS_LOGFILE) in pmc_kld_load()
5299 pmclog_process_map_in(po, (pid_t) -1, in pmc_kld_load()
5300 (uintfptr_t) lf->address, lf->pathname); in pmc_kld_load()
5305 * TODO: Notify owners of (all) process-sampling PMCs too. in pmc_kld_load()
5317 if ((po->po_flags & PMC_PO_OWNS_LOGFILE) != 0) { in pmc_kld_unload()
5318 pmclog_process_map_out(po, (pid_t)-1, in pmc_kld_unload()
5325 * TODO: Notify owners of process-sampling PMCs. in pmc_kld_unload()
5360 md->pmd_nclass = n; in pmc_mdep_alloc()
5363 md->pmd_switch_in = generic_switch_in; in pmc_mdep_alloc()
5364 md->pmd_switch_out = generic_switch_out; in pmc_mdep_alloc()
5399 md->pmd_cputype = PMC_CPU_GENERIC; in pmc_generic_cpu_initialize()
5472 pmc_sample_mask = pmc_nsamples - 1; in pmc_initialize()
5477 "range - using %d.\n", pmc_callchaindepth, in pmc_initialize()
5494 for (ri = c = 0; c < md->pmd_nclass; c++) { in pmc_initialize()
5495 pcd = &md->pmd_classdep[c]; in pmc_initialize()
5496 pcd->pcd_ri = ri; in pmc_initialize()
5497 ri += pcd->pcd_num; in pmc_initialize()
5500 KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1, in pmc_initialize()
5503 /* Compute the map from row-indices to classdep pointers. */ in pmc_initialize()
5505 md->pmd_npmc, M_PMC, M_WAITOK | M_ZERO); in pmc_initialize()
5507 for (n = 0; n < md->pmd_npmc; n++) in pmc_initialize()
5510 for (ri = c = 0; c < md->pmd_nclass; c++) { in pmc_initialize()
5511 pcd = &md->pmd_classdep[c]; in pmc_initialize()
5512 for (n = 0; n < pcd->pcd_num; n++, ri++) in pmc_initialize()
5516 KASSERT(ri == md->pmd_npmc, in pmc_initialize()
5517 ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__, in pmc_initialize()
5518 ri, md->pmd_npmc)); in pmc_initialize()
5522 /* allocate space for the per-cpu array */ in pmc_initialize()
5526 /* per-cpu 'saved values' for managing process-mode PMCs */ in pmc_initialize()
5527 pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, in pmc_initialize()
5530 /* Perform CPU-dependent initialization. */ in pmc_initialize()
5538 md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC, in pmc_initialize()
5540 for (n = 0; error == 0 && n < md->pmd_nclass; n++) in pmc_initialize()
5541 if (md->pmd_classdep[n].pcd_num > 0) in pmc_initialize()
5542 error = md->pmd_classdep[n].pcd_pcpu_init(md, in pmc_initialize()
5555 domain = pc->pc_domain; in pmc_initialize()
5561 ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); in pmc_initialize()
5563 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5567 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5568 ps->ps_pc = sb->ps_callchains + in pmc_initialize()
5571 pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; in pmc_initialize()
5577 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5580 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5581 ps->ps_pc = sb->ps_callchains + in pmc_initialize()
5584 pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; in pmc_initialize()
5589 sb->ps_callchains = malloc_domainset(pmc_callchaindepth * in pmc_initialize()
5592 for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) in pmc_initialize()
5593 ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth; in pmc_initialize()
5595 pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb; in pmc_initialize()
5599 pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc, in pmc_initialize()
5603 for (n = 0; n < md->pmd_npmc; n++) in pmc_initialize()
5612 mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf", in pmc_initialize()
5619 pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, in pmc_initialize()
5627 mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf", in pmc_initialize()
5649 pmc_intr = md->pmd_intr; in pmc_initialize()
5655 for (n = 0; n < md->pmd_nclass; n++) { in pmc_initialize()
5656 if (md->pmd_classdep[n].pcd_num == 0) in pmc_initialize()
5658 pcd = &md->pmd_classdep[n]; in pmc_initialize()
5660 pmc_name_of_pmcclass(pcd->pcd_class), in pmc_initialize()
5661 pcd->pcd_num, in pmc_initialize()
5662 pcd->pcd_width, in pmc_initialize()
5663 pcd->pcd_caps, in pmc_initialize()
5717 po->po_owner, po->po_owner->p_pid, in pmc_cleanup()
5718 po->po_owner->p_comm); in pmc_cleanup()
5720 PROC_LOCK(po->po_owner); in pmc_cleanup()
5721 kern_psignal(po->po_owner, SIGBUS); in pmc_cleanup()
5722 PROC_UNLOCK(po->po_owner); in pmc_cleanup()
5747 PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid); in pmc_cleanup()
5765 /* do processor and pmc-class dependent cleanup */ in pmc_cleanup()
5772 PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", in pmc_cleanup()
5778 for (c = 0; c < md->pmd_nclass; c++) { in pmc_cleanup()
5779 if (md->pmd_classdep[c].pcd_num > 0) { in pmc_cleanup()
5780 md->pmd_classdep[c].pcd_pcpu_fini(md, in pmc_cleanup()
5786 if (md->pmd_cputype == PMC_CPU_GENERIC) in pmc_cleanup()
5796 /* Free per-cpu descriptors. */ in pmc_cleanup()
5800 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL, in pmc_cleanup()
5803 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL, in pmc_cleanup()
5804 ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__, in pmc_cleanup()
5806 KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL, in pmc_cleanup()
5809 free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC); in pmc_cleanup()
5810 free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC); in pmc_cleanup()
5811 free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC); in pmc_cleanup()
5812 free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC); in pmc_cleanup()
5813 free(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC); in pmc_cleanup()
5814 free(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC); in pmc_cleanup()