1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003-2008 Joseph Koshy 5 * Copyright (c) 2007 The FreeBSD Foundation 6 * Copyright (c) 2021 ARM Ltd 7 * 8 * Portions of this software were developed by A. Joseph Koshy under 9 * sponsorship from the FreeBSD Foundation and Google, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* Arm CoreLink CMN-600 Coherent Mesh Network PMU Driver */ 34 35 #include <sys/cdefs.h> 36 #include <sys/param.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/pmc.h> 42 #include <sys/pmckern.h> 43 #include <sys/systm.h> 44 45 #include <machine/cmn600_reg.h> 46 47 struct cmn600_descr { 48 struct pmc_descr pd_descr; /* "base class" */ 49 void *pd_rw_arg; /* Argument to use with read/write */ 50 struct pmc *pd_pmc; 51 struct pmc_hw *pd_phw; 52 uint32_t pd_nodeid; 53 int32_t pd_node_type; 54 int pd_local_counter; 55 56 }; 57 58 static struct cmn600_descr **cmn600_pmcdesc; 59 60 static struct cmn600_pmc cmn600_pmcs[CMN600_UNIT_MAX]; 61 static int cmn600_units = 0; 62 63 static inline struct cmn600_descr * 64 cmn600desc(int ri) 65 { 66 67 return (cmn600_pmcdesc[ri]); 68 } 69 70 static inline int 71 class_ri2unit(int ri) 72 { 73 74 return (ri / CMN600_COUNTERS_N); 75 } 76 77 #define EVENCNTR(x) (((x) >> POR_DT_PMEVCNT_EVENCNT_SHIFT) << \ 78 POR_DTM_PMEVCNT_CNTR_WIDTH) 79 #define ODDCNTR(x) (((x) >> POR_DT_PMEVCNT_ODDCNT_SHIFT) << \ 80 POR_DTM_PMEVCNT_CNTR_WIDTH) 81 82 static uint64_t 83 cmn600_pmu_readcntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr, 84 u_int width) 85 { 86 uint64_t dtcval, xpval; 87 88 KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big." 89 " Max: 3", __LINE__, xpcntr)); 90 KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too" 91 " big. Max: 7", __LINE__, dtccntr)); 92 93 dtcval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_DTC, 94 POR_DT_PMEVCNT(dtccntr >> 1)); 95 if (width == 4) { 96 dtcval = (dtccntr & 1) ? ODDCNTR(dtcval) : EVENCNTR(dtcval); 97 dtcval &= 0xffffffff0000UL; 98 } else 99 dtcval <<= POR_DTM_PMEVCNT_CNTR_WIDTH; 100 101 xpval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT); 102 xpval >>= xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH; 103 xpval &= 0xffffUL; 104 return (dtcval | xpval); 105 } 106 107 static void 108 cmn600_pmu_writecntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr, 109 u_int width, uint64_t val) 110 { 111 int shift; 112 113 KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big." 114 " Max: 3", __LINE__, xpcntr)); 115 KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too" 116 " big. Max: 7", __LINE__, dtccntr)); 117 118 if (width == 4) { 119 shift = (dtccntr & 1) ? POR_DT_PMEVCNT_ODDCNT_SHIFT : 120 POR_DT_PMEVCNT_EVENCNT_SHIFT; 121 pmu_cmn600_md8(arg, nodeid, NODE_TYPE_DTC, 122 POR_DT_PMEVCNT(dtccntr >> 1), 0xffffffffUL << shift, 123 ((val >> POR_DTM_PMEVCNT_CNTR_WIDTH) & 0xffffffff) << shift); 124 } else 125 pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_DTC, 126 POR_DT_PMEVCNT(dtccntr & ~0x1), val >> 127 POR_DTM_PMEVCNT_CNTR_WIDTH); 128 129 shift = xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH; 130 val &= 0xffffUL; 131 pmu_cmn600_md8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT, 132 0xffffUL << shift, val << shift); 133 } 134 135 #undef EVENCNTR 136 #undef ODDCNTR 137 138 /* 139 * read a pmc register 140 */ 141 static int 142 cmn600_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v) 143 { 144 int counter, local_counter, nodeid; 145 struct cmn600_descr *desc; 146 void *arg; 147 148 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 149 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 150 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 151 ri)); 152 153 counter = ri % CMN600_COUNTERS_N; 154 desc = cmn600desc(ri); 155 arg = desc->pd_rw_arg; 156 nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid; 157 local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter; 158 159 *v = cmn600_pmu_readcntr(arg, nodeid, local_counter, counter, 4); 160 PMCDBG3(MDP, REA, 2, "%s id=%d -> %jd", __func__, ri, *v); 161 162 return (0); 163 } 164 165 /* 166 * Write a pmc register. 167 */ 168 static int 169 cmn600_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v) 170 { 171 int counter, local_counter, nodeid; 172 struct cmn600_descr *desc; 173 void *arg; 174 175 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 176 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 177 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 178 ri)); 179 180 counter = ri % CMN600_COUNTERS_N; 181 desc = cmn600desc(ri); 182 arg = desc->pd_rw_arg; 183 nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid; 184 local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter; 185 186 KASSERT(pm != NULL, 187 ("[cmn600,%d] PMC not owned (cpu%d,pmc%d)", __LINE__, 188 cpu, ri)); 189 190 PMCDBG4(MDP, WRI, 1, "%s cpu=%d ri=%d v=%jx", __func__, cpu, ri, v); 191 192 cmn600_pmu_writecntr(arg, nodeid, local_counter, counter, 4, v); 193 return (0); 194 } 195 196 /* 197 * configure hardware pmc according to the configuration recorded in 198 * pmc 'pm'. 199 */ 200 static int 201 cmn600_config_pmc(int cpu, int ri, struct pmc *pm) 202 { 203 struct pmc_hw *phw; 204 205 PMCDBG4(MDP, CFG, 1, "%s cpu=%d ri=%d pm=%p", __func__, cpu, ri, pm); 206 207 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 208 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 209 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 210 ri)); 211 212 phw = cmn600desc(ri)->pd_phw; 213 214 KASSERT(pm == NULL || phw->phw_pmc == NULL, 215 ("[cmn600,%d] pm=%p phw->pm=%p hwpmc not unconfigured", 216 __LINE__, pm, phw->phw_pmc)); 217 218 phw->phw_pmc = pm; 219 return (0); 220 } 221 222 /* 223 * Retrieve a configured PMC pointer from hardware state. 224 */ 225 static int 226 cmn600_get_config(int cpu, int ri, struct pmc **ppm) 227 { 228 229 *ppm = cmn600desc(ri)->pd_phw->phw_pmc; 230 231 return (0); 232 } 233 234 #define CASE_DN_VER_EVT(n, id) case PMC_EV_CMN600_PMU_ ## n: { *event = id; \ 235 return (0); } 236 static int 237 cmn600_map_ev2event(int ev, int rev, int *node_type, uint8_t *event) 238 { 239 if (ev < PMC_EV_CMN600_PMU_dn_rxreq_dvmop || 240 ev > PMC_EV_CMN600_PMU_rni_rdb_ord) 241 return (EINVAL); 242 if (ev <= PMC_EV_CMN600_PMU_dn_rxreq_trk_full) { 243 *node_type = NODE_TYPE_DVM; 244 if (rev < 0x200) { 245 switch (ev) { 246 CASE_DN_VER_EVT(dn_rxreq_dvmop, 1); 247 CASE_DN_VER_EVT(dn_rxreq_dvmsync, 2); 248 CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 3); 249 CASE_DN_VER_EVT(dn_rxreq_retried, 4); 250 CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 5); 251 } 252 } else { 253 switch (ev) { 254 CASE_DN_VER_EVT(dn_rxreq_tlbi_dvmop, 0x01); 255 CASE_DN_VER_EVT(dn_rxreq_bpi_dvmop, 0x02); 256 CASE_DN_VER_EVT(dn_rxreq_pici_dvmop, 0x03); 257 CASE_DN_VER_EVT(dn_rxreq_vivi_dvmop, 0x04); 258 CASE_DN_VER_EVT(dn_rxreq_dvmsync, 0x05); 259 CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 0x06); 260 CASE_DN_VER_EVT(dn_rxreq_dvmop_other_filtered, 0x07); 261 CASE_DN_VER_EVT(dn_rxreq_retried, 0x08); 262 CASE_DN_VER_EVT(dn_rxreq_snp_sent, 0x09); 263 CASE_DN_VER_EVT(dn_rxreq_snp_stalled, 0x0a); 264 CASE_DN_VER_EVT(dn_rxreq_trk_full, 0x0b); 265 CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 0x0c); 266 } 267 } 268 return (EINVAL); 269 } else if (ev <= PMC_EV_CMN600_PMU_hnf_snp_fwded) { 270 *node_type = NODE_TYPE_HN_F; 271 *event = ev - PMC_EV_CMN600_PMU_hnf_cache_miss; 272 return (0); 273 } else if (ev <= PMC_EV_CMN600_PMU_hni_pcie_serialization) { 274 *node_type = NODE_TYPE_HN_I; 275 *event = ev - PMC_EV_CMN600_PMU_hni_rrt_rd_occ_cnt_ovfl; 276 return (0); 277 } else if (ev <= PMC_EV_CMN600_PMU_xp_partial_dat_flit) { 278 *node_type = NODE_TYPE_XP; 279 *event = ev - PMC_EV_CMN600_PMU_xp_txflit_valid; 280 return (0); 281 } else if (ev <= PMC_EV_CMN600_PMU_sbsx_txrsp_stall) { 282 *node_type = NODE_TYPE_SBSX; 283 *event = ev - PMC_EV_CMN600_PMU_sbsx_rd_req; 284 return (0); 285 } else if (ev <= PMC_EV_CMN600_PMU_rnd_rdb_ord) { 286 *node_type = NODE_TYPE_RN_D; 287 *event = ev - PMC_EV_CMN600_PMU_rnd_s0_rdata_beats; 288 return (0); 289 } else if (ev <= PMC_EV_CMN600_PMU_rni_rdb_ord) { 290 *node_type = NODE_TYPE_RN_I; 291 *event = ev - PMC_EV_CMN600_PMU_rni_s0_rdata_beats; 292 return (0); 293 } else if (ev <= PMC_EV_CMN600_PMU_cxha_snphaz_occ) { 294 *node_type = NODE_TYPE_CXHA; 295 *event = ev - PMC_EV_CMN600_PMU_cxha_rddatbyp; 296 return (0); 297 } else if (ev <= PMC_EV_CMN600_PMU_cxra_ext_dat_stall) { 298 *node_type = NODE_TYPE_CXRA; 299 *event = ev - PMC_EV_CMN600_PMU_cxra_req_trk_occ; 300 return (0); 301 } else if (ev <= PMC_EV_CMN600_PMU_cxla_avg_latency_form_tx_tlp) { 302 *node_type = NODE_TYPE_CXLA; 303 *event = ev - PMC_EV_CMN600_PMU_cxla_rx_tlp_link0; 304 return (0); 305 } 306 return (EINVAL); 307 } 308 309 /* 310 * Check if a given allocation is feasible. 311 */ 312 313 static int 314 cmn600_allocate_pmc(int cpu, int ri, struct pmc *pm, 315 const struct pmc_op_pmcallocate *a) 316 { 317 struct cmn600_descr *desc; 318 const struct pmc_descr *pd; 319 uint64_t caps __unused; 320 int local_counter, node_type; 321 enum pmc_event pe; 322 void *arg; 323 uint8_t e; 324 int err; 325 326 (void) cpu; 327 328 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 329 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 330 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 331 ri)); 332 333 desc = cmn600desc(ri); 334 arg = desc->pd_rw_arg; 335 pd = &desc->pd_descr; 336 if (cmn600_pmcs[class_ri2unit(ri)].domain != pcpu_find(cpu)->pc_domain) 337 return (EINVAL); 338 339 /* check class match */ 340 if (pd->pd_class != a->pm_class) 341 return (EINVAL); 342 343 caps = pm->pm_caps; 344 345 PMCDBG3(MDP, ALL, 1, "%s ri=%d caps=0x%x", __func__, ri, caps); 346 347 pe = a->pm_ev; 348 err = cmn600_map_ev2event(pe, pmu_cmn600_rev(arg), &node_type, &e); 349 if (err != 0) 350 return (err); 351 err = pmu_cmn600_alloc_localpmc(arg, 352 a->pm_md.pm_cmn600.pma_cmn600_nodeid, node_type, &local_counter); 353 if (err != 0) 354 return (err); 355 356 pm->pm_md.pm_cmn600.pm_cmn600_config = 357 a->pm_md.pm_cmn600.pma_cmn600_config; 358 pm->pm_md.pm_cmn600.pm_cmn600_occupancy = 359 a->pm_md.pm_cmn600.pma_cmn600_occupancy; 360 desc->pd_nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid = 361 a->pm_md.pm_cmn600.pma_cmn600_nodeid; 362 desc->pd_node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type = 363 node_type; 364 pm->pm_md.pm_cmn600.pm_cmn600_event = e; 365 desc->pd_local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter = 366 local_counter; 367 368 return (0); 369 } 370 371 /* Release machine dependent state associated with a PMC. */ 372 373 static int 374 cmn600_release_pmc(int cpu, int ri, struct pmc *pmc) 375 { 376 struct cmn600_descr *desc; 377 struct pmc_hw *phw; 378 struct pmc *pm __diagused; 379 int err; 380 381 (void) pmc; 382 383 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 384 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 385 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 386 ri)); 387 388 desc = cmn600desc(ri); 389 phw = desc->pd_phw; 390 pm = phw->phw_pmc; 391 err = pmu_cmn600_free_localpmc(desc->pd_rw_arg, desc->pd_nodeid, 392 desc->pd_node_type, desc->pd_local_counter); 393 if (err != 0) 394 return (err); 395 396 KASSERT(pm == NULL, ("[cmn600,%d] PHW pmc %p non-NULL", __LINE__, pm)); 397 398 return (0); 399 } 400 401 static inline uint64_t 402 cmn600_encode_source(int node_type, int counter, int port, int sub) 403 { 404 405 /* Calculate pmevcnt0_input_sel based on list in Table 3-794. */ 406 if (node_type == NODE_TYPE_XP) 407 return (0x4 | counter); 408 409 return (((port + 1) << 4) | (sub << 2) | counter); 410 } 411 412 /* 413 * start a PMC. 414 */ 415 416 static int 417 cmn600_start_pmc(int cpu, int ri, struct pmc *pm) 418 { 419 int counter, local_counter, node_type, shift; 420 uint64_t config, occupancy, source, xp_pmucfg; 421 struct cmn600_descr *desc; 422 uint8_t event, port, sub; 423 uint16_t nodeid; 424 void *arg; 425 426 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 427 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 428 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 429 ri)); 430 431 counter = ri % CMN600_COUNTERS_N; 432 desc = cmn600desc(ri); 433 arg = desc->pd_rw_arg; 434 435 PMCDBG3(MDP, STA, 1, "%s cpu=%d ri=%d", __func__, cpu, ri); 436 437 config = pm->pm_md.pm_cmn600.pm_cmn600_config; 438 occupancy = pm->pm_md.pm_cmn600.pm_cmn600_occupancy; 439 node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type; 440 event = pm->pm_md.pm_cmn600.pm_cmn600_event; 441 nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid; 442 local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter; 443 port = (nodeid >> 2) & 1; 444 sub = nodeid & 3; 445 446 switch (node_type) { 447 case NODE_TYPE_DVM: 448 case NODE_TYPE_HN_F: 449 case NODE_TYPE_CXHA: 450 case NODE_TYPE_CXRA: 451 pmu_cmn600_md8(arg, nodeid, node_type, 452 CMN600_COMMON_PMU_EVENT_SEL, 453 CMN600_COMMON_PMU_EVENT_SEL_OCC_MASK, 454 occupancy << CMN600_COMMON_PMU_EVENT_SEL_OCC_SHIFT); 455 break; 456 case NODE_TYPE_XP: 457 /* Set PC and Interface.*/ 458 event |= config; 459 } 460 461 /* 462 * 5.5.1 Set up PMU counters 463 * 1. Ensure that the NIDEN input is asserted. HW side. */ 464 /* 2. Select event of target node for one of four outputs. */ 465 pmu_cmn600_md8(arg, nodeid, node_type, CMN600_COMMON_PMU_EVENT_SEL, 466 0xff << (local_counter * 8), 467 event << (local_counter * 8)); 468 469 xp_pmucfg = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP, 470 POR_DTM_PMU_CONFIG); 471 /* 472 * 3. configure XP to connect one of four target node outputs to local 473 * counter. 474 */ 475 source = cmn600_encode_source(node_type, local_counter, port, sub); 476 shift = (local_counter * POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH) + 477 POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT; 478 xp_pmucfg &= ~(0xffUL << shift); 479 xp_pmucfg |= source << shift; 480 481 /* 4. Pair with global counters A, B, C, ..., H. */ 482 shift = (local_counter * 4) + 16; 483 xp_pmucfg &= ~(0xfUL << shift); 484 xp_pmucfg |= counter << shift; 485 /* Enable pairing.*/ 486 xp_pmucfg |= 1 << (local_counter + 4); 487 488 /* 5. Combine local counters 0 with 1, 2 with 3 or all four. */ 489 xp_pmucfg &= ~0xeUL; 490 491 /* 6. Enable XP's PMU function. */ 492 xp_pmucfg |= POR_DTM_PMU_CONFIG_PMU_EN; 493 pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMU_CONFIG, xp_pmucfg); 494 if (node_type == NODE_TYPE_CXLA) 495 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_CXLA, 496 POR_CXG_RA_CFG_CTL, EN_CXLA_PMUCMD_PROP); 497 498 /* 7. Enable DTM. */ 499 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_XP, POR_DTM_CONTROL, 500 POR_DTM_CONTROL_DTM_ENABLE); 501 502 /* 8. Reset grouping of global counters. Use 32 bits. */ 503 pmu_cmn600_clr8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR, 504 POR_DT_PMCR_CNTCFG_MASK); 505 506 /* 9. Enable DTC. */ 507 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_DTC_CTL, 508 POR_DT_DTC_CTL_DT_EN); 509 510 /* 10. Enable Overflow Interrupt. */ 511 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR, 512 POR_DT_PMCR_OVFL_INTR_EN); 513 514 /* 11. Run PMC. */ 515 pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR, 516 POR_DT_PMCR_PMU_EN); 517 518 return (0); 519 } 520 521 /* 522 * Stop a PMC. 523 */ 524 525 static int 526 cmn600_stop_pmc(int cpu, int ri, struct pmc *pm) 527 { 528 struct cmn600_descr *desc; 529 int local_counter; 530 uint64_t val; 531 532 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 533 ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu)); 534 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 535 ri)); 536 537 desc = cmn600desc(ri); 538 539 PMCDBG2(MDP, STO, 1, "%s ri=%d", __func__, ri); 540 541 /* Disable pairing. */ 542 local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter; 543 pmu_cmn600_clr8(desc->pd_rw_arg, pm->pm_md.pm_cmn600.pm_cmn600_nodeid, 544 NODE_TYPE_XP, POR_DTM_PMU_CONFIG, (1 << (local_counter + 4))); 545 546 /* Shutdown XP's DTM function if no paired counters. */ 547 val = pmu_cmn600_rd8(desc->pd_rw_arg, 548 pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP, 549 POR_DTM_PMU_CONFIG); 550 if ((val & 0xf0) == 0) 551 pmu_cmn600_clr8(desc->pd_rw_arg, 552 pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP, 553 POR_DTM_PMU_CONFIG, POR_DTM_CONTROL_DTM_ENABLE); 554 555 return (0); 556 } 557 558 /* 559 * describe a PMC 560 */ 561 static int 562 cmn600_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc) 563 { 564 struct pmc_descr *pd; 565 struct pmc_hw *phw; 566 567 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 568 ("[cmn600,%d] illegal CPU %d", __LINE__, cpu)); 569 KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__, 570 ri)); 571 572 phw = cmn600desc(ri)->pd_phw; 573 pd = &cmn600desc(ri)->pd_descr; 574 575 strlcpy(pi->pm_name, pd->pd_name, sizeof(pi->pm_name)); 576 pi->pm_class = pd->pd_class; 577 578 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) { 579 pi->pm_enabled = TRUE; 580 *ppmc = phw->phw_pmc; 581 } else { 582 pi->pm_enabled = FALSE; 583 *ppmc = NULL; 584 } 585 586 return (0); 587 } 588 589 /* 590 * processor dependent initialization. 591 */ 592 593 static int 594 cmn600_pcpu_init(struct pmc_mdep *md, int cpu) 595 { 596 int first_ri, n, npmc; 597 struct pmc_hw *phw; 598 struct pmc_cpu *pc; 599 int mdep_class; 600 601 mdep_class = PMC_MDEP_CLASS_INDEX_CMN600; 602 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 603 ("[cmn600,%d] insane cpu number %d", __LINE__, cpu)); 604 605 PMCDBG1(MDP, INI, 1, "cmn600-init cpu=%d", cpu); 606 607 /* 608 * Set the content of the hardware descriptors to a known 609 * state and initialize pointers in the MI per-cpu descriptor. 610 */ 611 612 pc = pmc_pcpu[cpu]; 613 first_ri = md->pmd_classdep[mdep_class].pcd_ri; 614 npmc = md->pmd_classdep[mdep_class].pcd_num; 615 616 for (n = 0; n < npmc; n++, phw++) { 617 phw = cmn600desc(n)->pd_phw; 618 phw->phw_state = PMC_PHW_CPU_TO_STATE(cpu) | 619 PMC_PHW_INDEX_TO_STATE(n); 620 /* Set enabled only if unit present. */ 621 if (cmn600_pmcs[class_ri2unit(n)].arg != NULL) 622 phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; 623 phw->phw_pmc = NULL; 624 pc->pc_hwpmcs[n + first_ri] = phw; 625 } 626 return (0); 627 } 628 629 /* 630 * processor dependent cleanup prior to the KLD 631 * being unloaded 632 */ 633 634 static int 635 cmn600_pcpu_fini(struct pmc_mdep *md, int cpu) 636 { 637 638 return (0); 639 } 640 641 static int 642 cmn600_pmu_intr(struct trapframe *tf, int unit, int i) 643 { 644 struct pmc_cpu *pc __diagused; 645 struct pmc_hw *phw; 646 struct pmc *pm; 647 int error, cpu, ri; 648 649 ri = i + unit * CMN600_COUNTERS_N; 650 cpu = curcpu; 651 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), 652 ("[cmn600,%d] CPU %d out of range", __LINE__, cpu)); 653 pc = pmc_pcpu[cpu]; 654 KASSERT(pc != NULL, ("pc != NULL")); 655 656 phw = cmn600desc(ri)->pd_phw; 657 KASSERT(phw != NULL, ("phw != NULL")); 658 pm = phw->phw_pmc; 659 if (pm == NULL) 660 return (0); 661 662 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { 663 /* Always CPU0. */ 664 pm->pm_pcpu_state[0].pps_overflowcnt += 1; 665 return (0); 666 } 667 668 if (pm->pm_state != PMC_STATE_RUNNING) 669 return (0); 670 671 error = pmc_process_interrupt(PMC_HR, pm, tf); 672 if (error) 673 cmn600_stop_pmc(cpu, ri, pm); 674 675 /* Reload sampling count */ 676 cmn600_write_pmc(cpu, ri, pm, pm->pm_sc.pm_reloadcount); 677 678 return (0); 679 } 680 681 /* 682 * Initialize ourselves. 683 */ 684 static int 685 cmn600_init_pmc_units(void) 686 { 687 int i; 688 689 if (cmn600_units > 0) { /* Already initialized. */ 690 return (0); 691 } 692 693 cmn600_units = cmn600_pmc_nunits(); 694 if (cmn600_units == 0) 695 return (ENOENT); 696 697 for (i = 0; i < cmn600_units; i++) { 698 if (cmn600_pmc_getunit(i, &cmn600_pmcs[i].arg, 699 &cmn600_pmcs[i].domain) != 0) 700 cmn600_pmcs[i].arg = NULL; 701 } 702 return (0); 703 } 704 705 int 706 pmc_cmn600_nclasses(void) 707 { 708 709 if (cmn600_pmc_nunits() > 0) 710 return (1); 711 return (0); 712 } 713 714 int 715 pmc_cmn600_initialize(struct pmc_mdep *md) 716 { 717 struct pmc_classdep *pcd; 718 int i, npmc, unit; 719 720 cmn600_init_pmc_units(); 721 KASSERT(md != NULL, ("[cmn600,%d] md is NULL", __LINE__)); 722 KASSERT(cmn600_units < CMN600_UNIT_MAX, 723 ("[cmn600,%d] cmn600_units too big", __LINE__)); 724 725 PMCDBG0(MDP,INI,1, "cmn600-initialize"); 726 727 npmc = CMN600_COUNTERS_N * cmn600_units; 728 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600]; 729 730 pcd->pcd_caps = PMC_CAP_SYSTEM | PMC_CAP_READ | 731 PMC_CAP_WRITE | PMC_CAP_QUALIFIER | PMC_CAP_INTERRUPT | 732 PMC_CAP_DOMWIDE; 733 pcd->pcd_class = PMC_CLASS_CMN600_PMU; 734 pcd->pcd_num = npmc; 735 pcd->pcd_ri = md->pmd_npmc; 736 pcd->pcd_width = 48; 737 738 pcd->pcd_allocate_pmc = cmn600_allocate_pmc; 739 pcd->pcd_config_pmc = cmn600_config_pmc; 740 pcd->pcd_describe = cmn600_describe; 741 pcd->pcd_get_config = cmn600_get_config; 742 pcd->pcd_get_msr = NULL; 743 pcd->pcd_pcpu_fini = cmn600_pcpu_fini; 744 pcd->pcd_pcpu_init = cmn600_pcpu_init; 745 pcd->pcd_read_pmc = cmn600_read_pmc; 746 pcd->pcd_release_pmc = cmn600_release_pmc; 747 pcd->pcd_start_pmc = cmn600_start_pmc; 748 pcd->pcd_stop_pmc = cmn600_stop_pmc; 749 pcd->pcd_write_pmc = cmn600_write_pmc; 750 751 md->pmd_npmc += npmc; 752 cmn600_pmcdesc = malloc(sizeof(struct cmn600_descr *) * npmc * 753 CMN600_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO); 754 for (i = 0; i < npmc; i++) { 755 cmn600_pmcdesc[i] = malloc(sizeof(struct cmn600_descr), M_PMC, 756 M_WAITOK|M_ZERO); 757 758 unit = i / CMN600_COUNTERS_N; 759 KASSERT(unit >= 0, ("unit >= 0")); 760 KASSERT(cmn600_pmcs[unit].arg != NULL, ("arg != NULL")); 761 762 cmn600_pmcdesc[i]->pd_rw_arg = cmn600_pmcs[unit].arg; 763 cmn600_pmcdesc[i]->pd_descr.pd_class = 764 PMC_CLASS_CMN600_PMU; 765 cmn600_pmcdesc[i]->pd_descr.pd_caps = pcd->pcd_caps; 766 cmn600_pmcdesc[i]->pd_phw = (struct pmc_hw *)malloc( 767 sizeof(struct pmc_hw), M_PMC, M_WAITOK|M_ZERO); 768 snprintf(cmn600_pmcdesc[i]->pd_descr.pd_name, 63, 769 "CMN600_%d", i); 770 cmn600_pmu_intr_cb(cmn600_pmcs[unit].arg, cmn600_pmu_intr); 771 } 772 773 return (0); 774 } 775 776 void 777 pmc_cmn600_finalize(struct pmc_mdep *md) 778 { 779 struct pmc_classdep *pcd; 780 int i, npmc; 781 782 KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600].pcd_class == 783 PMC_CLASS_CMN600_PMU, ("[cmn600,%d] pmc class mismatch", 784 __LINE__)); 785 786 pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600]; 787 788 npmc = pcd->pcd_num; 789 for (i = 0; i < npmc; i++) { 790 free(cmn600_pmcdesc[i]->pd_phw, M_PMC); 791 free(cmn600_pmcdesc[i], M_PMC); 792 } 793 free(cmn600_pmcdesc, M_PMC); 794 cmn600_pmcdesc = NULL; 795 } 796 797 MODULE_DEPEND(pmc, cmn600, 1, 1, 1); 798