1 /*- 2 * Copyright (c) 2012 Fabien Thomas 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/pmc.h> 32 #include <sys/pmckern.h> 33 #include <sys/systm.h> 34 #include <sys/mutex.h> 35 36 #include <machine/cpu.h> 37 #include <machine/cpufunc.h> 38 39 #include "hwpmc_soft.h" 40 41 /* 42 * Software PMC support. 43 */ 44 45 #define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \ 46 PMC_CAP_USER | PMC_CAP_SYSTEM) 47 48 struct soft_descr { 49 struct pmc_descr pm_descr; /* "base class" */ 50 }; 51 52 static struct soft_descr soft_pmcdesc[SOFT_NPMCS] = 53 { 54 #define SOFT_PMCDESCR(N) \ 55 { \ 56 .pm_descr = \ 57 { \ 58 .pd_name = #N, \ 59 .pd_class = PMC_CLASS_SOFT, \ 60 .pd_caps = SOFT_CAPS, \ 61 .pd_width = 64 \ 62 }, \ 63 } 64 65 SOFT_PMCDESCR(SOFT0), 66 SOFT_PMCDESCR(SOFT1), 67 SOFT_PMCDESCR(SOFT2), 68 SOFT_PMCDESCR(SOFT3), 69 SOFT_PMCDESCR(SOFT4), 70 SOFT_PMCDESCR(SOFT5), 71 SOFT_PMCDESCR(SOFT6), 72 SOFT_PMCDESCR(SOFT7), 73 SOFT_PMCDESCR(SOFT8), 74 SOFT_PMCDESCR(SOFT9), 75 SOFT_PMCDESCR(SOFT10), 76 SOFT_PMCDESCR(SOFT11), 77 SOFT_PMCDESCR(SOFT12), 78 SOFT_PMCDESCR(SOFT13), 79 SOFT_PMCDESCR(SOFT14), 80 SOFT_PMCDESCR(SOFT15) 81 }; 82 83 /* 84 * Per-CPU data structure. 85 */ 86 87 struct soft_cpu { 88 struct pmc_hw soft_hw[SOFT_NPMCS]; 89 pmc_value_t soft_values[SOFT_NPMCS]; 90 }; 91 92 93 static struct soft_cpu **soft_pcpu; 94 95 static int 96 soft_allocate_pmc(int cpu, int ri, struct pmc *pm, 97 const struct pmc_op_pmcallocate *a) 98 { 99 enum pmc_event ev; 100 struct pmc_soft *ps; 101 102 (void) cpu; 103 104 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 105 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 106 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 107 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 108 109 if (a->pm_class != PMC_CLASS_SOFT) 110 return (EINVAL); 111 112 if ((pm->pm_caps & SOFT_CAPS) == 0) 113 return (EINVAL); 114 115 if ((pm->pm_caps & ~SOFT_CAPS) != 0) 116 return (EPERM); 117 118 ev = pm->pm_event; 119 if ((int)ev < PMC_EV_SOFT_FIRST || (int)ev > PMC_EV_SOFT_LAST) 120 return (EINVAL); 121 122 /* Check if event is registered. */ 123 ps = pmc_soft_ev_acquire(ev); 124 if (ps == NULL) 125 return (EINVAL); 126 pmc_soft_ev_release(ps); 127 /* Module unload is protected by pmc SX lock. */ 128 if (ps->ps_alloc != NULL) 129 ps->ps_alloc(); 130 131 return (0); 132 } 133 134 static int 135 soft_config_pmc(int cpu, int ri, struct pmc *pm) 136 { 137 struct pmc_hw *phw; 138 139 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 140 141 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 142 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 143 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 144 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 145 146 phw = &soft_pcpu[cpu]->soft_hw[ri]; 147 148 KASSERT(pm == NULL || phw->phw_pmc == NULL, 149 ("[soft,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__, 150 pm, phw->phw_pmc)); 151 152 phw->phw_pmc = pm; 153 154 return (0); 155 } 156 157 static int 158 soft_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 159 { 160 int error; 161 size_t copied; 162 const struct soft_descr *pd; 163 struct pmc_hw *phw; 164 165 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 166 ("[soft,%d] illegal CPU %d", __LINE__, cpu)); 167 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 168 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 169 170 phw = &soft_pcpu[cpu]->soft_hw[ri]; 171 pd = &soft_pmcdesc[ri]; 172 173 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, 174 PMC_NAME_MAX, &copied)) != 0) 175 return (error); 176 177 pi->pm_class = pd->pm_descr.pd_class; 178 179 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 180 pi->pm_enabled = TRUE; 181 *ppmc = phw->phw_pmc; 182 } else { 183 pi->pm_enabled = FALSE; 184 *ppmc = NULL; 185 } 186 187 return (0); 188 } 189 190 static int 191 soft_get_config(int cpu, int ri, struct pmc **ppm) 192 { 193 (void) ri; 194 195 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 196 ("[soft,%d] illegal CPU %d", __LINE__, cpu)); 197 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 198 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 199 200 *ppm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc; 201 return (0); 202 } 203 204 static int 205 soft_pcpu_fini(struct pmc_mdep *md, int cpu) 206 { 207 int ri; 208 struct pmc_cpu *pc; 209 210 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 211 ("[soft,%d] illegal cpu %d", __LINE__, cpu)); 212 KASSERT(soft_pcpu[cpu] != NULL, ("[soft,%d] null pcpu", __LINE__)); 213 214 free(soft_pcpu[cpu], M_PMC); 215 soft_pcpu[cpu] = NULL; 216 217 ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri; 218 219 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 220 ("[soft,%d] ri=%d", __LINE__, ri)); 221 222 pc = pmc_pcpu[cpu]; 223 pc->pc_hwpmcs[ri] = NULL; 224 225 return (0); 226 } 227 228 static int 229 soft_pcpu_init(struct pmc_mdep *md, int cpu) 230 { 231 int first_ri, n; 232 struct pmc_cpu *pc; 233 struct soft_cpu *soft_pc; 234 struct pmc_hw *phw; 235 236 237 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 238 ("[soft,%d] illegal cpu %d", __LINE__, cpu)); 239 KASSERT(soft_pcpu, ("[soft,%d] null pcpu", __LINE__)); 240 KASSERT(soft_pcpu[cpu] == NULL, ("[soft,%d] non-null per-cpu", 241 __LINE__)); 242 243 soft_pc = malloc(sizeof(struct soft_cpu), M_PMC, M_WAITOK|M_ZERO); 244 pc = pmc_pcpu[cpu]; 245 246 KASSERT(pc != NULL, ("[soft,%d] cpu %d null per-cpu", __LINE__, cpu)); 247 248 soft_pcpu[cpu] = soft_pc; 249 phw = soft_pc->soft_hw; 250 first_ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri; 251 252 for (n = 0; n < SOFT_NPMCS; n++, phw++) { 253 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 254 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 255 phw->phw_pmc = NULL; 256 pc->pc_hwpmcs[n + first_ri] = phw; 257 } 258 259 return (0); 260 } 261 262 static int 263 soft_read_pmc(int cpu, int ri, pmc_value_t *v) 264 { 265 struct pmc *pm; 266 const struct pmc_hw *phw; 267 268 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 269 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 270 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 271 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 272 273 phw = &soft_pcpu[cpu]->soft_hw[ri]; 274 pm = phw->phw_pmc; 275 276 KASSERT(pm != NULL, 277 ("[soft,%d] no owner for PHW [cpu%d,pmc%d]", __LINE__, cpu, ri)); 278 279 PMCDBG(MDP,REA,1,"soft-read id=%d", ri); 280 281 *v = soft_pcpu[cpu]->soft_values[ri]; 282 283 return (0); 284 } 285 286 static int 287 soft_write_pmc(int cpu, int ri, pmc_value_t v) 288 { 289 struct pmc *pm; 290 const struct soft_descr *pd; 291 292 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 293 ("[soft,%d] illegal cpu value %d", __LINE__, cpu)); 294 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 295 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 296 297 pm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc; 298 pd = &soft_pmcdesc[ri]; 299 300 KASSERT(pm, 301 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 302 303 PMCDBG(MDP,WRI,1, "soft-write cpu=%d ri=%d v=%jx", cpu, ri, v); 304 305 soft_pcpu[cpu]->soft_values[ri] = v; 306 307 return (0); 308 } 309 310 static int 311 soft_release_pmc(int cpu, int ri, struct pmc *pmc) 312 { 313 struct pmc_hw *phw; 314 enum pmc_event ev; 315 struct pmc_soft *ps; 316 317 (void) pmc; 318 319 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 320 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 321 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 322 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 323 324 phw = &soft_pcpu[cpu]->soft_hw[ri]; 325 326 KASSERT(phw->phw_pmc == NULL, 327 ("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 328 329 ev = pmc->pm_event; 330 331 /* Check if event is registered. */ 332 ps = pmc_soft_ev_acquire(ev); 333 KASSERT(ps != NULL, 334 ("[soft,%d] unregistered event %d", __LINE__, ev)); 335 pmc_soft_ev_release(ps); 336 /* Module unload is protected by pmc SX lock. */ 337 if (ps->ps_release != NULL) 338 ps->ps_release(); 339 return (0); 340 } 341 342 static int 343 soft_start_pmc(int cpu, int ri) 344 { 345 struct pmc *pm; 346 struct soft_cpu *pc; 347 struct pmc_soft *ps; 348 349 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 350 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 351 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 352 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 353 354 pc = soft_pcpu[cpu]; 355 pm = pc->soft_hw[ri].phw_pmc; 356 357 KASSERT(pm, 358 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 359 360 ps = pmc_soft_ev_acquire(pm->pm_event); 361 if (ps == NULL) 362 return (EINVAL); 363 atomic_add_int(&ps->ps_running, 1); 364 pmc_soft_ev_release(ps); 365 366 return (0); 367 } 368 369 static int 370 soft_stop_pmc(int cpu, int ri) 371 { 372 struct pmc *pm; 373 struct soft_cpu *pc; 374 struct pmc_soft *ps; 375 376 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 377 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 378 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 379 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 380 381 pc = soft_pcpu[cpu]; 382 pm = pc->soft_hw[ri].phw_pmc; 383 384 KASSERT(pm, 385 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 386 387 ps = pmc_soft_ev_acquire(pm->pm_event); 388 /* event unregistered ? */ 389 if (ps != NULL) { 390 atomic_subtract_int(&ps->ps_running, 1); 391 pmc_soft_ev_release(ps); 392 } 393 394 return (0); 395 } 396 397 int 398 pmc_soft_intr(struct pmckern_soft *ks) 399 { 400 struct pmc *pm; 401 struct soft_cpu *pc; 402 int ri, processed, error, user_mode; 403 404 KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(), 405 ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu)); 406 407 processed = 0; 408 pc = soft_pcpu[ks->pm_cpu]; 409 410 for (ri = 0; ri < SOFT_NPMCS; ri++) { 411 412 pm = pc->soft_hw[ri].phw_pmc; 413 if (pm == NULL || 414 pm->pm_state != PMC_STATE_RUNNING || 415 pm->pm_event != ks->pm_ev) { 416 continue; 417 } 418 419 processed = 1; 420 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 421 if ((pc->soft_values[ri]--) <= 0) 422 pc->soft_values[ri] += pm->pm_sc.pm_reloadcount; 423 else 424 continue; 425 user_mode = TRAPF_USERMODE(ks->pm_tf); 426 error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm, 427 ks->pm_tf, user_mode); 428 if (error) { 429 soft_stop_pmc(ks->pm_cpu, ri); 430 continue; 431 } 432 433 if (user_mode) { 434 /* If in user mode setup AST to process 435 * callchain out of interrupt context. 436 */ 437 curthread->td_flags |= TDF_ASTPENDING; 438 } 439 } else 440 pc->soft_values[ri]++; 441 } 442 443 atomic_add_int(processed ? &pmc_stats.pm_intr_processed : 444 &pmc_stats.pm_intr_ignored, 1); 445 446 return (processed); 447 } 448 449 void 450 pmc_soft_initialize(struct pmc_mdep *md) 451 { 452 struct pmc_classdep *pcd; 453 454 /* Add SOFT PMCs. */ 455 soft_pcpu = malloc(sizeof(struct soft_cpu *) * pmc_cpu_max(), M_PMC, 456 M_ZERO|M_WAITOK); 457 458 pcd = &md->pmd_classdep[PMC_CLASS_INDEX_SOFT]; 459 460 pcd->pcd_caps = SOFT_CAPS; 461 pcd->pcd_class = PMC_CLASS_SOFT; 462 pcd->pcd_num = SOFT_NPMCS; 463 pcd->pcd_ri = md->pmd_npmc; 464 pcd->pcd_width = 64; 465 466 pcd->pcd_allocate_pmc = soft_allocate_pmc; 467 pcd->pcd_config_pmc = soft_config_pmc; 468 pcd->pcd_describe = soft_describe; 469 pcd->pcd_get_config = soft_get_config; 470 pcd->pcd_get_msr = NULL; 471 pcd->pcd_pcpu_init = soft_pcpu_init; 472 pcd->pcd_pcpu_fini = soft_pcpu_fini; 473 pcd->pcd_read_pmc = soft_read_pmc; 474 pcd->pcd_write_pmc = soft_write_pmc; 475 pcd->pcd_release_pmc = soft_release_pmc; 476 pcd->pcd_start_pmc = soft_start_pmc; 477 pcd->pcd_stop_pmc = soft_stop_pmc; 478 479 md->pmd_npmc += SOFT_NPMCS; 480 } 481 482 void 483 pmc_soft_finalize(struct pmc_mdep *md) 484 { 485 #ifdef INVARIANTS 486 int i, ncpus; 487 488 ncpus = pmc_cpu_max(); 489 for (i = 0; i < ncpus; i++) 490 KASSERT(soft_pcpu[i] == NULL, ("[soft,%d] non-null pcpu cpu %d", 491 __LINE__, i)); 492 493 KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class == 494 PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__)); 495 #endif 496 free(soft_pcpu, M_PMC); 497 soft_pcpu = NULL; 498 } 499