hwpmc_mod.c (d67023a15586e858f7aa9a57e5803d4db72c1f7b) | hwpmc_mod.c (122ccdc1cae8922f3845fc4f44b4e0470d342b9b) |
---|---|
1/*- | 1/*- |
2 * Copyright (c) 2003-2007 Joseph Koshy | 2 * Copyright (c) 2003-2008 Joseph Koshy |
3 * Copyright (c) 2007 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by A. Joseph Koshy under 7 * sponsorship from the FreeBSD Foundation and Google, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions --- 82 unchanged lines hidden (view full) --- 93#define PMC_MARK_ROW_FREE(R) do { \ 94 pmc_pmcdisp[(R)] = 0; \ 95} while (0) 96 97#define PMC_MARK_ROW_STANDALONE(R) do { \ 98 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 99 __LINE__)); \ 100 atomic_add_int(&pmc_pmcdisp[(R)], -1); \ | 3 * Copyright (c) 2007 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by A. Joseph Koshy under 7 * sponsorship from the FreeBSD Foundation and Google, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions --- 82 unchanged lines hidden (view full) --- 93#define PMC_MARK_ROW_FREE(R) do { \ 94 pmc_pmcdisp[(R)] = 0; \ 95} while (0) 96 97#define PMC_MARK_ROW_STANDALONE(R) do { \ 98 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 99 __LINE__)); \ 100 atomic_add_int(&pmc_pmcdisp[(R)], -1); \ |
101 KASSERT(pmc_pmcdisp[(R)] >= (-mp_ncpus), ("[pmc,%d] row " \ 102 "disposition error", __LINE__)); \ | 101 KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \ 102 ("[pmc,%d] row disposition error", __LINE__)); \ |
103} while (0) 104 105#define PMC_UNMARK_ROW_STANDALONE(R) do { \ 106 atomic_add_int(&pmc_pmcdisp[(R)], 1); \ 107 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 108 __LINE__)); \ 109} while (0) 110 --- 521 unchanged lines hidden (view full) --- 632 633/* 634 * move execution over the specified cpu and bind it there. 635 */ 636 637static void 638pmc_select_cpu(int cpu) 639{ | 103} while (0) 104 105#define PMC_UNMARK_ROW_STANDALONE(R) do { \ 106 atomic_add_int(&pmc_pmcdisp[(R)], 1); \ 107 KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ 108 __LINE__)); \ 109} while (0) 110 --- 521 unchanged lines hidden (view full) --- 632 633/* 634 * move execution over the specified cpu and bind it there. 635 */ 636 637static void 638pmc_select_cpu(int cpu) 639{ |
640 KASSERT(cpu >= 0 && cpu < mp_ncpus, | 640 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), |
641 ("[pmc,%d] bad cpu number %d", __LINE__, cpu)); 642 | 641 ("[pmc,%d] bad cpu number %d", __LINE__, cpu)); 642 |
643 /* never move to a disabled CPU */ 644 KASSERT(pmc_cpu_is_disabled(cpu) == 0, ("[pmc,%d] selecting " 645 "disabled CPU %d", __LINE__, cpu)); | 643 /* Never move to an inactive CPU. */ 644 KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive " 645 "CPU %d", __LINE__, cpu)); |
646 647 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu); 648 thread_lock(curthread); 649 sched_bind(curthread, cpu); 650 thread_unlock(curthread); 651 652 KASSERT(curthread->td_oncpu == cpu, 653 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__, --- 527 unchanged lines hidden (view full) --- 1181 1182 critical_enter(); /* no preemption from this point */ 1183 1184 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1185 1186 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1187 p->p_pid, p->p_comm, pp); 1188 | 646 647 PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu); 648 thread_lock(curthread); 649 sched_bind(curthread, cpu); 650 thread_unlock(curthread); 651 652 KASSERT(curthread->td_oncpu == cpu, 653 ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__, --- 527 unchanged lines hidden (view full) --- 1181 1182 critical_enter(); /* no preemption from this point */ 1183 1184 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1185 1186 PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1187 p->p_pid, p->p_comm, pp); 1188 |
1189 KASSERT(cpu >= 0 && cpu < mp_ncpus, | 1189 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), |
1190 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu)); 1191 1192 pc = pmc_pcpu[cpu]; 1193 1194 for (ri = 0; ri < md->pmd_npmc; ri++) { 1195 1196 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) 1197 continue; --- 108 unchanged lines hidden (view full) --- 1306 1307 critical_enter(); 1308 1309 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1310 1311 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1312 p->p_pid, p->p_comm, pp); 1313 | 1190 ("[pmc,%d] wierd CPU id %d", __LINE__, cpu)); 1191 1192 pc = pmc_pcpu[cpu]; 1193 1194 for (ri = 0; ri < md->pmd_npmc; ri++) { 1195 1196 if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) 1197 continue; --- 108 unchanged lines hidden (view full) --- 1306 1307 critical_enter(); 1308 1309 cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ 1310 1311 PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, 1312 p->p_pid, p->p_comm, pp); 1313 |
1314 KASSERT(cpu >= 0 && cpu < mp_ncpus, | 1314 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), |
1315 ("[pmc,%d wierd CPU id %d", __LINE__, cpu)); 1316 1317 pc = pmc_pcpu[cpu]; 1318 1319 /* 1320 * When a PMC gets unlinked from a target PMC, it will 1321 * be removed from the target's pp_pmc[] array. 1322 * --- 710 unchanged lines hidden (view full) --- 2033} 2034 2035static void 2036pmc_wait_for_pmc_idle(struct pmc *pm) 2037{ 2038#ifdef DEBUG 2039 volatile int maxloop; 2040 | 1315 ("[pmc,%d wierd CPU id %d", __LINE__, cpu)); 1316 1317 pc = pmc_pcpu[cpu]; 1318 1319 /* 1320 * When a PMC gets unlinked from a target PMC, it will 1321 * be removed from the target's pp_pmc[] array. 1322 * --- 710 unchanged lines hidden (view full) --- 2033} 2034 2035static void 2036pmc_wait_for_pmc_idle(struct pmc *pm) 2037{ 2038#ifdef DEBUG 2039 volatile int maxloop; 2040 |
2041 maxloop = 100 * mp_ncpus; | 2041 maxloop = 100 * pmc_cpu_max(); |
2042#endif 2043 2044 /* 2045 * Loop (with a forced context switch) till the PMC's runcount 2046 * comes down to zero. 2047 */ 2048 while (atomic_load_acq_32(&pm->pm_runcount) > 0) { 2049#ifdef DEBUG --- 444 unchanged lines hidden (view full) --- 2494 * Move to the CPU associated with this 2495 * PMC, and start the hardware. 2496 */ 2497 2498 pmc_save_cpu_binding(&pb); 2499 2500 cpu = PMC_TO_CPU(pm); 2501 | 2042#endif 2043 2044 /* 2045 * Loop (with a forced context switch) till the PMC's runcount 2046 * comes down to zero. 2047 */ 2048 while (atomic_load_acq_32(&pm->pm_runcount) > 0) { 2049#ifdef DEBUG --- 444 unchanged lines hidden (view full) --- 2494 * Move to the CPU associated with this 2495 * PMC, and start the hardware. 2496 */ 2497 2498 pmc_save_cpu_binding(&pb); 2499 2500 cpu = PMC_TO_CPU(pm); 2501 |
2502 if (pmc_cpu_is_disabled(cpu)) | 2502 if (!pmc_cpu_is_active(cpu)) |
2503 return ENXIO; 2504 2505 pmc_select_cpu(cpu); 2506 2507 /* 2508 * global PMCs are configured at allocation time 2509 * so write out the initial value and start the PMC. 2510 */ --- 50 unchanged lines hidden (view full) --- 2561 * 'initial count' so that a subsequent PMCSTART will 2562 * resume counting from the current hardware count. 2563 */ 2564 2565 pmc_save_cpu_binding(&pb); 2566 2567 cpu = PMC_TO_CPU(pm); 2568 | 2503 return ENXIO; 2504 2505 pmc_select_cpu(cpu); 2506 2507 /* 2508 * global PMCs are configured at allocation time 2509 * so write out the initial value and start the PMC. 2510 */ --- 50 unchanged lines hidden (view full) --- 2561 * 'initial count' so that a subsequent PMCSTART will 2562 * resume counting from the current hardware count. 2563 */ 2564 2565 pmc_save_cpu_binding(&pb); 2566 2567 cpu = PMC_TO_CPU(pm); 2568 |
2569 KASSERT(cpu >= 0 && cpu < mp_ncpus, | 2569 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), |
2570 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu)); 2571 | 2570 ("[pmc,%d] illegal cpu=%d", __LINE__, cpu)); 2571 |
2572 if (pmc_cpu_is_disabled(cpu)) | 2572 if (!pmc_cpu_is_active(cpu)) |
2573 return ENXIO; 2574 2575 pmc_select_cpu(cpu); 2576 2577 ri = PMC_TO_ROWINDEX(pm); 2578 2579 critical_enter(); 2580 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0) --- 148 unchanged lines hidden (view full) --- 2729 * Retrieve hardware configuration. 2730 */ 2731 2732 case PMC_OP_GETCPUINFO: /* CPU information */ 2733 { 2734 struct pmc_op_getcpuinfo gci; 2735 2736 gci.pm_cputype = md->pmd_cputype; | 2573 return ENXIO; 2574 2575 pmc_select_cpu(cpu); 2576 2577 ri = PMC_TO_ROWINDEX(pm); 2578 2579 critical_enter(); 2580 if ((error = md->pmd_stop_pmc(cpu, ri)) == 0) --- 148 unchanged lines hidden (view full) --- 2729 * Retrieve hardware configuration. 2730 */ 2731 2732 case PMC_OP_GETCPUINFO: /* CPU information */ 2733 { 2734 struct pmc_op_getcpuinfo gci; 2735 2736 gci.pm_cputype = md->pmd_cputype; |
2737 gci.pm_ncpu = mp_ncpus; | 2737 gci.pm_ncpu = pmc_cpu_max(); |
2738 gci.pm_npmc = md->pmd_npmc; 2739 gci.pm_nclass = md->pmd_nclass; 2740 bcopy(md->pmd_classes, &gci.pm_classes, 2741 sizeof(gci.pm_classes)); 2742 error = copyout(&gci, arg, sizeof(gci)); 2743 } 2744 break; 2745 --- 51 unchanged lines hidden (view full) --- 2797 2798 PMC_DOWNGRADE_SX(); 2799 2800 gpi = (struct pmc_op_getpmcinfo *) arg; 2801 2802 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) 2803 break; 2804 | 2738 gci.pm_npmc = md->pmd_npmc; 2739 gci.pm_nclass = md->pmd_nclass; 2740 bcopy(md->pmd_classes, &gci.pm_classes, 2741 sizeof(gci.pm_classes)); 2742 error = copyout(&gci, arg, sizeof(gci)); 2743 } 2744 break; 2745 --- 51 unchanged lines hidden (view full) --- 2797 2798 PMC_DOWNGRADE_SX(); 2799 2800 gpi = (struct pmc_op_getpmcinfo *) arg; 2801 2802 if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) 2803 break; 2804 |
2805 if (cpu >= (unsigned int) mp_ncpus) { | 2805 if (cpu >= pmc_cpu_max()) { |
2806 error = EINVAL; 2807 break; 2808 } 2809 | 2806 error = EINVAL; 2807 break; 2808 } 2809 |
2810 if (pmc_cpu_is_disabled(cpu)) { | 2810 if (!pmc_cpu_is_active(cpu)) { |
2811 error = ENXIO; 2812 break; 2813 } 2814 2815 /* switch to CPU 'cpu' */ 2816 pmc_save_cpu_binding(&pb); 2817 pmc_select_cpu(cpu); 2818 --- 72 unchanged lines hidden (view full) --- 2891 if (error) 2892 break; 2893 2894 if ((error = copyin(arg, &pma, sizeof(pma))) != 0) 2895 break; 2896 2897 cpu = pma.pm_cpu; 2898 | 2811 error = ENXIO; 2812 break; 2813 } 2814 2815 /* switch to CPU 'cpu' */ 2816 pmc_save_cpu_binding(&pb); 2817 pmc_select_cpu(cpu); 2818 --- 72 unchanged lines hidden (view full) --- 2891 if (error) 2892 break; 2893 2894 if ((error = copyin(arg, &pma, sizeof(pma))) != 0) 2895 break; 2896 2897 cpu = pma.pm_cpu; 2898 |
2899 if (cpu < 0 || cpu >= mp_ncpus) { | 2899 if (cpu < 0 || cpu >= (int) pmc_cpu_max()) { |
2900 error = EINVAL; 2901 break; 2902 } 2903 | 2900 error = EINVAL; 2901 break; 2902 } 2903 |
2904 if (pmc_cpu_is_disabled(cpu)) { | 2904 if (!pmc_cpu_is_active(cpu)) { |
2905 error = ENXIO; 2906 break; 2907 } 2908 2909 request = pma.pm_state; 2910 2911 if (request != PMC_STATE_DISABLED && 2912 request != PMC_STATE_FREE) { --- 71 unchanged lines hidden (view full) --- 2984 break; 2985 2986 caps = pa.pm_caps; 2987 mode = pa.pm_mode; 2988 cpu = pa.pm_cpu; 2989 2990 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC && 2991 mode != PMC_MODE_TS && mode != PMC_MODE_TC) || | 2905 error = ENXIO; 2906 break; 2907 } 2908 2909 request = pma.pm_state; 2910 2911 if (request != PMC_STATE_DISABLED && 2912 request != PMC_STATE_FREE) { --- 71 unchanged lines hidden (view full) --- 2984 break; 2985 2986 caps = pa.pm_caps; 2987 mode = pa.pm_mode; 2988 cpu = pa.pm_cpu; 2989 2990 if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC && 2991 mode != PMC_MODE_TS && mode != PMC_MODE_TC) || |
2992 (cpu != (u_int) PMC_CPU_ANY && cpu >= (u_int) mp_ncpus)) { | 2992 (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) { |
2993 error = EINVAL; 2994 break; 2995 } 2996 2997 /* 2998 * Virtual PMCs should only ask for a default CPU. 2999 * System mode PMCs need to specify a non-default CPU. 3000 */ 3001 3002 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) || 3003 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) { 3004 error = EINVAL; 3005 break; 3006 } 3007 3008 /* | 2993 error = EINVAL; 2994 break; 2995 } 2996 2997 /* 2998 * Virtual PMCs should only ask for a default CPU. 2999 * System mode PMCs need to specify a non-default CPU. 3000 */ 3001 3002 if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) || 3003 (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) { 3004 error = EINVAL; 3005 break; 3006 } 3007 3008 /* |
3009 * Check that a disabled CPU is not being asked for. | 3009 * Check that an inactive CPU is not being asked for. |
3010 */ 3011 | 3010 */ 3011 |
3012 if (PMC_IS_SYSTEM_MODE(mode) && pmc_cpu_is_disabled(cpu)) { | 3012 if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) { |
3013 error = ENXIO; 3014 break; 3015 } 3016 3017 /* 3018 * Refuse an allocation for a system-wide PMC if this 3019 * process has been jailed, or if this process lacks 3020 * super-user credentials and the sysctl tunable --- 496 unchanged lines hidden (view full) --- 3517 pm->pm_gv.pm_savedvalue = prw.pm_value; 3518 3519 mtx_pool_unlock_spin(pmc_mtxpool, pm); 3520 3521 } else { /* System mode PMCs */ 3522 cpu = PMC_TO_CPU(pm); 3523 ri = PMC_TO_ROWINDEX(pm); 3524 | 3013 error = ENXIO; 3014 break; 3015 } 3016 3017 /* 3018 * Refuse an allocation for a system-wide PMC if this 3019 * process has been jailed, or if this process lacks 3020 * super-user credentials and the sysctl tunable --- 496 unchanged lines hidden (view full) --- 3517 pm->pm_gv.pm_savedvalue = prw.pm_value; 3518 3519 mtx_pool_unlock_spin(pmc_mtxpool, pm); 3520 3521 } else { /* System mode PMCs */ 3522 cpu = PMC_TO_CPU(pm); 3523 ri = PMC_TO_ROWINDEX(pm); 3524 |
3525 if (pmc_cpu_is_disabled(cpu)) { | 3525 if (!pmc_cpu_is_active(cpu)) { |
3526 error = ENXIO; 3527 break; 3528 } 3529 3530 /* move this thread to CPU 'cpu' */ 3531 pmc_save_cpu_binding(&pb); 3532 pmc_select_cpu(cpu); 3533 --- 753 unchanged lines hidden (view full) --- 4287#define __PMC_CLASS(N) #N , 4288 __PMC_CLASSES() 4289}; 4290 4291static int 4292pmc_initialize(void) 4293{ 4294 int cpu, error, n; | 3526 error = ENXIO; 3527 break; 3528 } 3529 3530 /* move this thread to CPU 'cpu' */ 3531 pmc_save_cpu_binding(&pb); 3532 pmc_select_cpu(cpu); 3533 --- 753 unchanged lines hidden (view full) --- 4287#define __PMC_CLASS(N) #N , 4288 __PMC_CLASSES() 4289}; 4290 4291static int 4292pmc_initialize(void) 4293{ 4294 int cpu, error, n; |
4295 unsigned int maxcpu; |
|
4295 struct pmc_binding pb; 4296 struct pmc_sample *ps; 4297 struct pmc_samplebuffer *sb; 4298 4299 md = NULL; 4300 error = 0; 4301 4302#ifdef DEBUG --- 41 unchanged lines hidden (view full) --- 4344 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; 4345 } 4346 4347 md = pmc_md_initialize(); 4348 4349 if (md == NULL || md->pmd_init == NULL) 4350 return ENOSYS; 4351 | 4296 struct pmc_binding pb; 4297 struct pmc_sample *ps; 4298 struct pmc_samplebuffer *sb; 4299 4300 md = NULL; 4301 error = 0; 4302 4303#ifdef DEBUG --- 41 unchanged lines hidden (view full) --- 4345 pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; 4346 } 4347 4348 md = pmc_md_initialize(); 4349 4350 if (md == NULL || md->pmd_init == NULL) 4351 return ENOSYS; 4352 |
4353 maxcpu = pmc_cpu_max(); 4354 |
|
4352 /* allocate space for the per-cpu array */ | 4355 /* allocate space for the per-cpu array */ |
4353 MALLOC(pmc_pcpu, struct pmc_cpu **, mp_ncpus * sizeof(struct pmc_cpu *), | 4356 MALLOC(pmc_pcpu, struct pmc_cpu **, maxcpu * sizeof(struct pmc_cpu *), |
4354 M_PMC, M_WAITOK|M_ZERO); 4355 4356 /* per-cpu 'saved values' for managing process-mode PMCs */ 4357 MALLOC(pmc_pcpu_saved, pmc_value_t *, | 4357 M_PMC, M_WAITOK|M_ZERO); 4358 4359 /* per-cpu 'saved values' for managing process-mode PMCs */ 4360 MALLOC(pmc_pcpu_saved, pmc_value_t *, |
4358 sizeof(pmc_value_t) * mp_ncpus * md->pmd_npmc, M_PMC, M_WAITOK); | 4361 sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, M_PMC, M_WAITOK); |
4359 | 4362 |
4360 /* perform cpu dependent initialization */ | 4363 /* Perform CPU-dependent initialization. */ |
4361 pmc_save_cpu_binding(&pb); | 4364 pmc_save_cpu_binding(&pb); |
4362 for (cpu = 0; cpu < mp_ncpus; cpu++) { 4363 if (pmc_cpu_is_disabled(cpu)) | 4365 for (cpu = 0; cpu < maxcpu; cpu++) { 4366 if (!pmc_cpu_is_active(cpu)) |
4364 continue; 4365 pmc_select_cpu(cpu); 4366 if ((error = md->pmd_init(cpu)) != 0) 4367 break; 4368 } 4369 pmc_restore_cpu_binding(&pb); 4370 4371 if (error != 0) 4372 return error; 4373 4374 /* allocate space for the sample array */ | 4367 continue; 4368 pmc_select_cpu(cpu); 4369 if ((error = md->pmd_init(cpu)) != 0) 4370 break; 4371 } 4372 pmc_restore_cpu_binding(&pb); 4373 4374 if (error != 0) 4375 return error; 4376 4377 /* allocate space for the sample array */ |
4375 for (cpu = 0; cpu < mp_ncpus; cpu++) { 4376 if (pmc_cpu_is_disabled(cpu)) | 4378 for (cpu = 0; cpu < maxcpu; cpu++) { 4379 if (!pmc_cpu_is_active(cpu)) |
4377 continue; 4378 MALLOC(sb, struct pmc_samplebuffer *, 4379 sizeof(struct pmc_samplebuffer) + 4380 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, 4381 M_WAITOK|M_ZERO); 4382 4383 sb->ps_read = sb->ps_write = sb->ps_samples; 4384 sb->ps_fence = sb->ps_samples + pmc_nsamples; --- 73 unchanged lines hidden (view full) --- 4458 return error; 4459} 4460 4461/* prepare to be unloaded */ 4462static void 4463pmc_cleanup(void) 4464{ 4465 int cpu; | 4380 continue; 4381 MALLOC(sb, struct pmc_samplebuffer *, 4382 sizeof(struct pmc_samplebuffer) + 4383 pmc_nsamples * sizeof(struct pmc_sample), M_PMC, 4384 M_WAITOK|M_ZERO); 4385 4386 sb->ps_read = sb->ps_write = sb->ps_samples; 4387 sb->ps_fence = sb->ps_samples + pmc_nsamples; --- 73 unchanged lines hidden (view full) --- 4461 return error; 4462} 4463 4464/* prepare to be unloaded */ 4465static void 4466pmc_cleanup(void) 4467{ 4468 int cpu; |
4469 unsigned int maxcpu; |
|
4466 struct pmc_ownerhash *ph; 4467 struct pmc_owner *po, *tmp; 4468 struct pmc_binding pb; 4469#ifdef DEBUG 4470 struct pmc_processhash *prh; 4471#endif 4472 4473 PMCDBG(MOD,INI,0, "%s", "cleanup"); --- 63 unchanged lines hidden (view full) --- 4537 pmc_ownerhash = NULL; 4538 } 4539 4540 KASSERT(LIST_EMPTY(&pmc_ss_owners), 4541 ("[pmc,%d] Global SS owner list not empty", __LINE__)); 4542 KASSERT(pmc_ss_count == 0, 4543 ("[pmc,%d] Global SS count not empty", __LINE__)); 4544 | 4470 struct pmc_ownerhash *ph; 4471 struct pmc_owner *po, *tmp; 4472 struct pmc_binding pb; 4473#ifdef DEBUG 4474 struct pmc_processhash *prh; 4475#endif 4476 4477 PMCDBG(MOD,INI,0, "%s", "cleanup"); --- 63 unchanged lines hidden (view full) --- 4541 pmc_ownerhash = NULL; 4542 } 4543 4544 KASSERT(LIST_EMPTY(&pmc_ss_owners), 4545 ("[pmc,%d] Global SS owner list not empty", __LINE__)); 4546 KASSERT(pmc_ss_count == 0, 4547 ("[pmc,%d] Global SS count not empty", __LINE__)); 4548 |
4545 /* free the per-cpu sample buffers */ 4546 for (cpu = 0; cpu < mp_ncpus; cpu++) { 4547 if (pmc_cpu_is_disabled(cpu)) | 4549 /* Free the per-cpu sample buffers. */ 4550 maxcpu = pmc_cpu_max(); 4551 for (cpu = 0; cpu < maxcpu; cpu++) { 4552 if (!pmc_cpu_is_active(cpu)) |
4548 continue; 4549 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL, 4550 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__, 4551 cpu)); 4552 FREE(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC); 4553 FREE(pmc_pcpu[cpu]->pc_sb, M_PMC); 4554 pmc_pcpu[cpu]->pc_sb = NULL; 4555 } 4556 4557 /* do processor dependent cleanup */ 4558 PMCDBG(MOD,INI,3, "%s", "md cleanup"); 4559 if (md) { 4560 pmc_save_cpu_binding(&pb); | 4553 continue; 4554 KASSERT(pmc_pcpu[cpu]->pc_sb != NULL, 4555 ("[pmc,%d] Null cpu sample buffer cpu=%d", __LINE__, 4556 cpu)); 4557 FREE(pmc_pcpu[cpu]->pc_sb->ps_callchains, M_PMC); 4558 FREE(pmc_pcpu[cpu]->pc_sb, M_PMC); 4559 pmc_pcpu[cpu]->pc_sb = NULL; 4560 } 4561 4562 /* do processor dependent cleanup */ 4563 PMCDBG(MOD,INI,3, "%s", "md cleanup"); 4564 if (md) { 4565 pmc_save_cpu_binding(&pb); |
4561 for (cpu = 0; cpu < mp_ncpus; cpu++) { | 4566 for (cpu = 0; cpu < maxcpu; cpu++) { |
4562 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", 4563 cpu, pmc_pcpu[cpu]); | 4567 PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", 4568 cpu, pmc_pcpu[cpu]); |
4564 if (pmc_cpu_is_disabled(cpu)) | 4569 if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL) |
4565 continue; 4566 pmc_select_cpu(cpu); | 4570 continue; 4571 pmc_select_cpu(cpu); |
4567 if (pmc_pcpu[cpu]) 4568 (void) md->pmd_cleanup(cpu); | 4572 if (md->pmd_cleanup) 4573 md->pmd_cleanup(cpu); |
4569 } 4570 FREE(md, M_PMC); 4571 md = NULL; 4572 pmc_restore_cpu_binding(&pb); 4573 } 4574 4575 /* deallocate per-cpu structures */ 4576 FREE(pmc_pcpu, M_PMC); --- 24 unchanged lines hidden (view full) --- 4601 error = 0; 4602 4603 switch (cmd) { 4604 case MOD_LOAD : 4605 /* initialize the subsystem */ 4606 error = pmc_initialize(); 4607 if (error != 0) 4608 break; | 4574 } 4575 FREE(md, M_PMC); 4576 md = NULL; 4577 pmc_restore_cpu_binding(&pb); 4578 } 4579 4580 /* deallocate per-cpu structures */ 4581 FREE(pmc_pcpu, M_PMC); --- 24 unchanged lines hidden (view full) --- 4606 error = 0; 4607 4608 switch (cmd) { 4609 case MOD_LOAD : 4610 /* initialize the subsystem */ 4611 error = pmc_initialize(); 4612 if (error != 0) 4613 break; |
4609 PMCDBG(MOD,INI,1, "syscall=%d ncpus=%d", 4610 pmc_syscall_num, mp_ncpus); | 4614 PMCDBG(MOD,INI,1, "syscall=%d maxcpu=%d", 4615 pmc_syscall_num, pmc_cpu_max()); |
4611 break; 4612 4613 4614 case MOD_UNLOAD : 4615 case MOD_SHUTDOWN: 4616 pmc_cleanup(); 4617 PMCDBG(MOD,INI,1, "%s", "unloaded"); 4618 break; 4619 4620 default : 4621 error = EINVAL; /* XXX should panic(9) */ 4622 break; 4623 } 4624 4625 return error; 4626} 4627 4628/* memory pool */ 4629MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module"); | 4616 break; 4617 4618 4619 case MOD_UNLOAD : 4620 case MOD_SHUTDOWN: 4621 pmc_cleanup(); 4622 PMCDBG(MOD,INI,1, "%s", "unloaded"); 4623 break; 4624 4625 default : 4626 error = EINVAL; /* XXX should panic(9) */ 4627 break; 4628 } 4629 4630 return error; 4631} 4632 4633/* memory pool */ 4634MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module"); |