1 /*- 2 * Copyright (c) 2012 Fabien Thomas 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/pmc.h> 32 #include <sys/pmckern.h> 33 #include <sys/systm.h> 34 #include <sys/mutex.h> 35 36 #include <machine/cpu.h> 37 #include <machine/cpufunc.h> 38 39 #include "hwpmc_soft.h" 40 41 /* 42 * Software PMC support. 43 */ 44 45 #define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \ 46 PMC_CAP_USER | PMC_CAP_SYSTEM) 47 48 struct soft_descr { 49 struct pmc_descr pm_descr; /* "base class" */ 50 }; 51 52 static struct soft_descr soft_pmcdesc[SOFT_NPMCS] = 53 { 54 #define SOFT_PMCDESCR(N) \ 55 { \ 56 .pm_descr = \ 57 { \ 58 .pd_name = #N, \ 59 .pd_class = PMC_CLASS_SOFT, \ 60 .pd_caps = SOFT_CAPS, \ 61 .pd_width = 64 \ 62 }, \ 63 } 64 65 SOFT_PMCDESCR(SOFT0), 66 SOFT_PMCDESCR(SOFT1), 67 SOFT_PMCDESCR(SOFT2), 68 SOFT_PMCDESCR(SOFT3), 69 SOFT_PMCDESCR(SOFT4), 70 SOFT_PMCDESCR(SOFT5), 71 SOFT_PMCDESCR(SOFT6), 72 SOFT_PMCDESCR(SOFT7), 73 SOFT_PMCDESCR(SOFT8), 74 SOFT_PMCDESCR(SOFT9), 75 SOFT_PMCDESCR(SOFT10), 76 SOFT_PMCDESCR(SOFT11), 77 SOFT_PMCDESCR(SOFT12), 78 SOFT_PMCDESCR(SOFT13), 79 SOFT_PMCDESCR(SOFT14), 80 SOFT_PMCDESCR(SOFT15) 81 }; 82 83 /* 84 * Per-CPU data structure. 85 */ 86 87 struct soft_cpu { 88 struct pmc_hw soft_hw[SOFT_NPMCS]; 89 pmc_value_t soft_values[SOFT_NPMCS]; 90 }; 91 92 93 static struct soft_cpu **soft_pcpu; 94 95 static int 96 soft_allocate_pmc(int cpu, int ri, struct pmc *pm, 97 const struct pmc_op_pmcallocate *a) 98 { 99 enum pmc_event ev; 100 struct pmc_soft *ps; 101 102 (void) cpu; 103 104 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 105 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 106 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 107 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 108 109 if (a->pm_class != PMC_CLASS_SOFT) 110 return (EINVAL); 111 112 if ((pm->pm_caps & SOFT_CAPS) == 0) 113 return (EINVAL); 114 115 if ((pm->pm_caps & ~SOFT_CAPS) != 0) 116 return (EPERM); 117 118 ev = pm->pm_event; 119 if ((int)ev < PMC_EV_SOFT_FIRST || (int)ev > PMC_EV_SOFT_LAST) 120 return (EINVAL); 121 122 /* Check if event is registered. */ 123 ps = pmc_soft_ev_acquire(ev); 124 if (ps == NULL) 125 return (EINVAL); 126 pmc_soft_ev_release(ps); 127 /* Module unload is protected by pmc SX lock. */ 128 if (ps->ps_alloc != NULL) 129 ps->ps_alloc(); 130 131 return (0); 132 } 133 134 static int 135 soft_config_pmc(int cpu, int ri, struct pmc *pm) 136 { 137 struct pmc_hw *phw; 138 139 PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm); 140 141 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 142 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 143 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 144 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 145 146 phw = &soft_pcpu[cpu]->soft_hw[ri]; 147 148 KASSERT(pm == NULL || phw->phw_pmc == NULL, 149 ("[soft,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__, 150 pm, phw->phw_pmc)); 151 152 phw->phw_pmc = pm; 153 154 return (0); 155 } 156 157 static int 158 soft_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 159 { 160 int error; 161 size_t copied; 162 const struct soft_descr *pd; 163 struct pmc_hw *phw; 164 165 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 166 ("[soft,%d] illegal CPU %d", __LINE__, cpu)); 167 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 168 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 169 170 phw = &soft_pcpu[cpu]->soft_hw[ri]; 171 pd = &soft_pmcdesc[ri]; 172 173 if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name, 174 PMC_NAME_MAX, &copied)) != 0) 175 return (error); 176 177 pi->pm_class = pd->pm_descr.pd_class; 178 179 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 180 pi->pm_enabled = TRUE; 181 *ppmc = phw->phw_pmc; 182 } else { 183 pi->pm_enabled = FALSE; 184 *ppmc = NULL; 185 } 186 187 return (0); 188 } 189 190 static int 191 soft_get_config(int cpu, int ri, struct pmc **ppm) 192 { 193 (void) ri; 194 195 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 196 ("[soft,%d] illegal CPU %d", __LINE__, cpu)); 197 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 198 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 199 200 *ppm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc; 201 return (0); 202 } 203 204 static int 205 soft_pcpu_fini(struct pmc_mdep *md, int cpu) 206 { 207 int ri; 208 struct pmc_cpu *pc; 209 210 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 211 ("[soft,%d] illegal cpu %d", __LINE__, cpu)); 212 KASSERT(soft_pcpu[cpu] != NULL, ("[soft,%d] null pcpu", __LINE__)); 213 214 free(soft_pcpu[cpu], M_PMC); 215 soft_pcpu[cpu] = NULL; 216 217 ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri; 218 219 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 220 ("[soft,%d] ri=%d", __LINE__, ri)); 221 222 pc = pmc_pcpu[cpu]; 223 pc->pc_hwpmcs[ri] = NULL; 224 225 return (0); 226 } 227 228 static int 229 soft_pcpu_init(struct pmc_mdep *md, int cpu) 230 { 231 int first_ri, n; 232 struct pmc_cpu *pc; 233 struct soft_cpu *soft_pc; 234 struct pmc_hw *phw; 235 236 237 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 238 ("[soft,%d] illegal cpu %d", __LINE__, cpu)); 239 KASSERT(soft_pcpu, ("[soft,%d] null pcpu", __LINE__)); 240 KASSERT(soft_pcpu[cpu] == NULL, ("[soft,%d] non-null per-cpu", 241 __LINE__)); 242 243 soft_pc = malloc(sizeof(struct soft_cpu), M_PMC, M_WAITOK|M_ZERO); 244 if (soft_pc == NULL) 245 return (ENOMEM); 246 247 pc = pmc_pcpu[cpu]; 248 249 KASSERT(pc != NULL, ("[soft,%d] cpu %d null per-cpu", __LINE__, cpu)); 250 251 soft_pcpu[cpu] = soft_pc; 252 phw = soft_pc->soft_hw; 253 first_ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri; 254 255 for (n = 0; n < SOFT_NPMCS; n++, phw++) { 256 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED | 257 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n); 258 phw->phw_pmc = NULL; 259 pc->pc_hwpmcs[n + first_ri] = phw; 260 } 261 262 return (0); 263 } 264 265 static int 266 soft_read_pmc(int cpu, int ri, pmc_value_t *v) 267 { 268 struct pmc *pm; 269 const struct pmc_hw *phw; 270 271 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 272 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 273 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 274 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 275 276 phw = &soft_pcpu[cpu]->soft_hw[ri]; 277 pm = phw->phw_pmc; 278 279 KASSERT(pm != NULL, 280 ("[soft,%d] no owner for PHW [cpu%d,pmc%d]", __LINE__, cpu, ri)); 281 282 PMCDBG(MDP,REA,1,"soft-read id=%d", ri); 283 284 *v = soft_pcpu[cpu]->soft_values[ri]; 285 286 return (0); 287 } 288 289 static int 290 soft_write_pmc(int cpu, int ri, pmc_value_t v) 291 { 292 struct pmc *pm; 293 const struct soft_descr *pd; 294 295 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 296 ("[soft,%d] illegal cpu value %d", __LINE__, cpu)); 297 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 298 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 299 300 pm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc; 301 pd = &soft_pmcdesc[ri]; 302 303 KASSERT(pm, 304 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 305 306 PMCDBG(MDP,WRI,1, "soft-write cpu=%d ri=%d v=%jx", cpu, ri, v); 307 308 soft_pcpu[cpu]->soft_values[ri] = v; 309 310 return (0); 311 } 312 313 static int 314 soft_release_pmc(int cpu, int ri, struct pmc *pmc) 315 { 316 struct pmc_hw *phw; 317 enum pmc_event ev; 318 struct pmc_soft *ps; 319 320 (void) pmc; 321 322 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 323 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 324 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 325 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 326 327 phw = &soft_pcpu[cpu]->soft_hw[ri]; 328 329 KASSERT(phw->phw_pmc == NULL, 330 ("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc)); 331 332 ev = pmc->pm_event; 333 334 /* Check if event is registered. */ 335 ps = pmc_soft_ev_acquire(ev); 336 KASSERT(ps != NULL, 337 ("[soft,%d] unregistered event %d", __LINE__, ev)); 338 pmc_soft_ev_release(ps); 339 /* Module unload is protected by pmc SX lock. */ 340 if (ps->ps_release != NULL) 341 ps->ps_release(); 342 return (0); 343 } 344 345 static int 346 soft_start_pmc(int cpu, int ri) 347 { 348 struct pmc *pm; 349 struct soft_cpu *pc; 350 struct pmc_soft *ps; 351 352 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 353 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 354 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 355 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 356 357 pc = soft_pcpu[cpu]; 358 pm = pc->soft_hw[ri].phw_pmc; 359 360 KASSERT(pm, 361 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 362 363 ps = pmc_soft_ev_acquire(pm->pm_event); 364 if (ps == NULL) 365 return (EINVAL); 366 atomic_add_int(&ps->ps_running, 1); 367 pmc_soft_ev_release(ps); 368 369 return (0); 370 } 371 372 static int 373 soft_stop_pmc(int cpu, int ri) 374 { 375 struct pmc *pm; 376 struct soft_cpu *pc; 377 struct pmc_soft *ps; 378 379 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 380 ("[soft,%d] illegal CPU value %d", __LINE__, cpu)); 381 KASSERT(ri >= 0 && ri < SOFT_NPMCS, 382 ("[soft,%d] illegal row-index %d", __LINE__, ri)); 383 384 pc = soft_pcpu[cpu]; 385 pm = pc->soft_hw[ri].phw_pmc; 386 387 KASSERT(pm, 388 ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri)); 389 390 ps = pmc_soft_ev_acquire(pm->pm_event); 391 /* event unregistered ? */ 392 if (ps != NULL) { 393 atomic_subtract_int(&ps->ps_running, 1); 394 pmc_soft_ev_release(ps); 395 } 396 397 return (0); 398 } 399 400 int 401 pmc_soft_intr(struct pmckern_soft *ks) 402 { 403 struct pmc *pm; 404 struct soft_cpu *pc; 405 int ri, processed, error, user_mode; 406 407 KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(), 408 ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu)); 409 410 processed = 0; 411 pc = soft_pcpu[ks->pm_cpu]; 412 413 for (ri = 0; ri < SOFT_NPMCS; ri++) { 414 415 pm = pc->soft_hw[ri].phw_pmc; 416 if (pm == NULL || 417 pm->pm_state != PMC_STATE_RUNNING || 418 pm->pm_event != ks->pm_ev) { 419 continue; 420 } 421 422 processed = 1; 423 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 424 if ((pc->soft_values[ri]--) <= 0) 425 pc->soft_values[ri] += pm->pm_sc.pm_reloadcount; 426 else 427 continue; 428 user_mode = TRAPF_USERMODE(ks->pm_tf); 429 error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm, 430 ks->pm_tf, user_mode); 431 if (error) { 432 soft_stop_pmc(ks->pm_cpu, ri); 433 continue; 434 } 435 436 if (user_mode) { 437 /* If in user mode setup AST to process 438 * callchain out of interrupt context. 439 */ 440 curthread->td_flags |= TDF_ASTPENDING; 441 } 442 } else 443 pc->soft_values[ri]++; 444 } 445 446 atomic_add_int(processed ? &pmc_stats.pm_intr_processed : 447 &pmc_stats.pm_intr_ignored, 1); 448 449 return (processed); 450 } 451 452 void 453 pmc_soft_initialize(struct pmc_mdep *md) 454 { 455 struct pmc_classdep *pcd; 456 457 /* Add SOFT PMCs. */ 458 soft_pcpu = malloc(sizeof(struct soft_cpu *) * pmc_cpu_max(), M_PMC, 459 M_ZERO|M_WAITOK); 460 461 pcd = &md->pmd_classdep[PMC_CLASS_INDEX_SOFT]; 462 463 pcd->pcd_caps = SOFT_CAPS; 464 pcd->pcd_class = PMC_CLASS_SOFT; 465 pcd->pcd_num = SOFT_NPMCS; 466 pcd->pcd_ri = md->pmd_npmc; 467 pcd->pcd_width = 64; 468 469 pcd->pcd_allocate_pmc = soft_allocate_pmc; 470 pcd->pcd_config_pmc = soft_config_pmc; 471 pcd->pcd_describe = soft_describe; 472 pcd->pcd_get_config = soft_get_config; 473 pcd->pcd_get_msr = NULL; 474 pcd->pcd_pcpu_init = soft_pcpu_init; 475 pcd->pcd_pcpu_fini = soft_pcpu_fini; 476 pcd->pcd_read_pmc = soft_read_pmc; 477 pcd->pcd_write_pmc = soft_write_pmc; 478 pcd->pcd_release_pmc = soft_release_pmc; 479 pcd->pcd_start_pmc = soft_start_pmc; 480 pcd->pcd_stop_pmc = soft_stop_pmc; 481 482 md->pmd_npmc += SOFT_NPMCS; 483 } 484 485 void 486 pmc_soft_finalize(struct pmc_mdep *md) 487 { 488 #ifdef INVARIANTS 489 int i, ncpus; 490 491 ncpus = pmc_cpu_max(); 492 for (i = 0; i < ncpus; i++) 493 KASSERT(soft_pcpu[i] == NULL, ("[soft,%d] non-null pcpu cpu %d", 494 __LINE__, i)); 495 496 KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class == 497 PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__)); 498 #endif 499 free(soft_pcpu, M_PMC); 500 soft_pcpu = NULL; 501 } 502