xref: /freebsd/sys/dev/hwpmc/hwpmc_cmn600.c (revision 9d54812421274e490dc5f0fe4722ab8d35d9b258)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2008 Joseph Koshy
5  * Copyright (c) 2007 The FreeBSD Foundation
6  * Copyright (c) 2021 ARM Ltd
7  *
8  * Portions of this software were developed by A. Joseph Koshy under
9  * sponsorship from the FreeBSD Foundation and Google, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /* Arm CoreLink CMN-600 Coherent Mesh Network PMU Driver */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/pmc.h>
44 #include <sys/pmckern.h>
45 #include <sys/systm.h>
46 
47 #include <machine/cmn600_reg.h>
48 
49 struct cmn600_descr {
50 	struct pmc_descr pd_descr;  /* "base class" */
51 	void		*pd_rw_arg; /* Argument to use with read/write */
52 	struct pmc	*pd_pmc;
53 	struct pmc_hw	*pd_phw;
54 	uint32_t	 pd_nodeid;
55 	int32_t		 pd_node_type;
56 	int		 pd_local_counter;
57 
58 };
59 
60 static struct cmn600_descr **cmn600_pmcdesc;
61 
62 static struct cmn600_pmc cmn600_pmcs[CMN600_UNIT_MAX];
63 static int cmn600_units = 0;
64 
65 static inline struct cmn600_descr *
66 cmn600desc(int ri)
67 {
68 
69 	return (cmn600_pmcdesc[ri]);
70 }
71 
72 static inline int
73 class_ri2unit(int ri)
74 {
75 
76 	return (ri / CMN600_COUNTERS_N);
77 }
78 
79 #define	EVENCNTR(x)	(((x) >> POR_DT_PMEVCNT_EVENCNT_SHIFT) << \
80     POR_DTM_PMEVCNT_CNTR_WIDTH)
81 #define	ODDCNTR(x)	(((x) >> POR_DT_PMEVCNT_ODDCNT_SHIFT) << \
82     POR_DTM_PMEVCNT_CNTR_WIDTH)
83 
84 static uint64_t
85 cmn600_pmu_readcntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
86     u_int width)
87 {
88 	uint64_t dtcval, xpval;
89 
90 	KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
91 	    " Max: 3", __LINE__, xpcntr));
92 	KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
93 	    " big. Max: 7", __LINE__, dtccntr));
94 
95 	dtcval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_DTC,
96 	    POR_DT_PMEVCNT(dtccntr >> 1));
97 	if (width == 4) {
98 		dtcval = (dtccntr & 1) ? ODDCNTR(dtcval) : EVENCNTR(dtcval);
99 		dtcval &= 0xffffffff0000UL;
100 	} else
101 		dtcval <<= POR_DTM_PMEVCNT_CNTR_WIDTH;
102 
103 	xpval = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT);
104 	xpval >>= xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
105 	xpval &= 0xffffUL;
106 	return (dtcval | xpval);
107 }
108 
109 static void
110 cmn600_pmu_writecntr(void *arg, u_int nodeid, u_int xpcntr, u_int dtccntr,
111     u_int width, uint64_t val)
112 {
113 	int shift;
114 
115 	KASSERT(xpcntr < 4, ("[cmn600,%d] XP counter number %d is too big."
116 	    " Max: 3", __LINE__, xpcntr));
117 	KASSERT(dtccntr < 8, ("[cmn600,%d] Global counter number %d is too"
118 	    " big. Max: 7", __LINE__, dtccntr));
119 
120 	if (width == 4) {
121 		shift = (dtccntr & 1) ? POR_DT_PMEVCNT_ODDCNT_SHIFT :
122 		    POR_DT_PMEVCNT_EVENCNT_SHIFT;
123 		pmu_cmn600_md8(arg, nodeid, NODE_TYPE_DTC,
124 		    POR_DT_PMEVCNT(dtccntr >> 1), 0xffffffffUL << shift,
125 		    ((val >> POR_DTM_PMEVCNT_CNTR_WIDTH) & 0xffffffff) << shift);
126 	} else
127 		pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_DTC,
128 		    POR_DT_PMEVCNT(dtccntr & ~0x1), val >>
129 		    POR_DTM_PMEVCNT_CNTR_WIDTH);
130 
131 	shift = xpcntr * POR_DTM_PMEVCNT_CNTR_WIDTH;
132 	val &= 0xffffUL;
133 	pmu_cmn600_md8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMEVCNT,
134 	    0xffffUL << shift, val << shift);
135 }
136 
137 #undef	EVENCNTR
138 #undef	ODDCNTR
139 
140 /*
141  * read a pmc register
142  */
143 static int
144 cmn600_read_pmc(int cpu, int ri, pmc_value_t *v)
145 {
146 	int counter, local_counter, nodeid;
147 	struct cmn600_descr *desc;
148 	struct pmc *pm;
149 	void *arg;
150 
151 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
152 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
153 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
154 	    ri));
155 
156 	counter = ri % CMN600_COUNTERS_N;
157 	desc = cmn600desc(ri);
158 	pm = desc->pd_phw->phw_pmc;
159 	arg = desc->pd_rw_arg;
160 	nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
161 	local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
162 
163 	KASSERT(pm != NULL,
164 	    ("[cmn600,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
165 		cpu, ri));
166 
167 	*v = cmn600_pmu_readcntr(arg, nodeid, local_counter, counter, 4);
168 	PMCDBG3(MDP, REA, 2, "%s id=%d -> %jd", __func__, ri, *v);
169 
170 	return (0);
171 }
172 
173 /*
174  * Write a pmc register.
175  */
176 static int
177 cmn600_write_pmc(int cpu, int ri, pmc_value_t v)
178 {
179 	int counter, local_counter, nodeid;
180 	struct cmn600_descr *desc;
181 	struct pmc *pm;
182 	void *arg;
183 
184 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
185 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
186 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
187 	    ri));
188 
189 	counter = ri % CMN600_COUNTERS_N;
190 	desc = cmn600desc(ri);
191 	pm = desc->pd_phw->phw_pmc;
192 	arg = desc->pd_rw_arg;
193 	nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
194 	local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
195 
196 	KASSERT(pm != NULL,
197 	    ("[cmn600,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
198 		cpu, ri));
199 
200 	PMCDBG4(MDP, WRI, 1, "%s cpu=%d ri=%d v=%jx", __func__, cpu, ri, v);
201 
202 	cmn600_pmu_writecntr(arg, nodeid, local_counter, counter, 4, v);
203 	return (0);
204 }
205 
206 /*
207  * configure hardware pmc according to the configuration recorded in
208  * pmc 'pm'.
209  */
210 static int
211 cmn600_config_pmc(int cpu, int ri, struct pmc *pm)
212 {
213 	struct pmc_hw *phw;
214 
215 	PMCDBG4(MDP, CFG, 1, "%s cpu=%d ri=%d pm=%p", __func__, cpu, ri, pm);
216 
217 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
218 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
219 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
220 	    ri));
221 
222 	phw = cmn600desc(ri)->pd_phw;
223 
224 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
225 	    ("[cmn600,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
226 		__LINE__, pm, phw->phw_pmc));
227 
228 	phw->phw_pmc = pm;
229 	return (0);
230 }
231 
232 /*
233  * Retrieve a configured PMC pointer from hardware state.
234  */
235 static int
236 cmn600_get_config(int cpu, int ri, struct pmc **ppm)
237 {
238 
239 	*ppm = cmn600desc(ri)->pd_phw->phw_pmc;
240 
241 	return (0);
242 }
243 
244 #define	CASE_DN_VER_EVT(n, id) case PMC_EV_CMN600_PMU_ ## n: { *event = id; \
245 	return (0); }
246 static int
247 cmn600_map_ev2event(int ev, int rev, int *node_type, uint8_t *event)
248 {
249 	if (ev < PMC_EV_CMN600_PMU_dn_rxreq_dvmop ||
250 	    ev > PMC_EV_CMN600_PMU_rni_rdb_ord)
251 		return (EINVAL);
252 	if (ev <= PMC_EV_CMN600_PMU_dn_rxreq_trk_full) {
253 		*node_type = NODE_TYPE_DVM;
254 		if (rev < 0x200) {
255 			switch (ev) {
256 			CASE_DN_VER_EVT(dn_rxreq_dvmop, 1);
257 			CASE_DN_VER_EVT(dn_rxreq_dvmsync, 2);
258 			CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 3);
259 			CASE_DN_VER_EVT(dn_rxreq_retried, 4);
260 			CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 5);
261 			}
262 		} else {
263 			switch (ev) {
264 			CASE_DN_VER_EVT(dn_rxreq_tlbi_dvmop, 0x01);
265 			CASE_DN_VER_EVT(dn_rxreq_bpi_dvmop, 0x02);
266 			CASE_DN_VER_EVT(dn_rxreq_pici_dvmop, 0x03);
267 			CASE_DN_VER_EVT(dn_rxreq_vivi_dvmop, 0x04);
268 			CASE_DN_VER_EVT(dn_rxreq_dvmsync, 0x05);
269 			CASE_DN_VER_EVT(dn_rxreq_dvmop_vmid_filtered, 0x06);
270 			CASE_DN_VER_EVT(dn_rxreq_dvmop_other_filtered, 0x07);
271 			CASE_DN_VER_EVT(dn_rxreq_retried, 0x08);
272 			CASE_DN_VER_EVT(dn_rxreq_snp_sent, 0x09);
273 			CASE_DN_VER_EVT(dn_rxreq_snp_stalled, 0x0a);
274 			CASE_DN_VER_EVT(dn_rxreq_trk_full, 0x0b);
275 			CASE_DN_VER_EVT(dn_rxreq_trk_occupancy, 0x0c);
276 			}
277 		}
278 		return (EINVAL);
279 	} else if (ev <= PMC_EV_CMN600_PMU_hnf_snp_fwded) {
280 		*node_type = NODE_TYPE_HN_F;
281 		*event = ev - PMC_EV_CMN600_PMU_hnf_cache_miss;
282 		return (0);
283 	} else if (ev <= PMC_EV_CMN600_PMU_hni_pcie_serialization) {
284 		*node_type = NODE_TYPE_HN_I;
285 		*event = ev - PMC_EV_CMN600_PMU_hni_rrt_rd_occ_cnt_ovfl;
286 		return (0);
287 	} else if (ev <= PMC_EV_CMN600_PMU_xp_partial_dat_flit) {
288 		*node_type = NODE_TYPE_XP;
289 		*event = ev - PMC_EV_CMN600_PMU_xp_txflit_valid;
290 		return (0);
291 	} else if (ev <= PMC_EV_CMN600_PMU_sbsx_txrsp_stall) {
292 		*node_type = NODE_TYPE_SBSX;
293 		*event = ev - PMC_EV_CMN600_PMU_sbsx_rd_req;
294 		return (0);
295 	} else if (ev <= PMC_EV_CMN600_PMU_rnd_rdb_ord) {
296 		*node_type = NODE_TYPE_RN_D;
297 		*event = ev - PMC_EV_CMN600_PMU_rnd_s0_rdata_beats;
298 		return (0);
299 	} else if (ev <= PMC_EV_CMN600_PMU_rni_rdb_ord) {
300 		*node_type = NODE_TYPE_RN_I;
301 		*event = ev - PMC_EV_CMN600_PMU_rni_s0_rdata_beats;
302 		return (0);
303 	} else if (ev <= PMC_EV_CMN600_PMU_cxha_snphaz_occ) {
304 		*node_type = NODE_TYPE_CXHA;
305 		*event = ev - PMC_EV_CMN600_PMU_cxha_rddatbyp;
306 		return (0);
307 	} else if (ev <= PMC_EV_CMN600_PMU_cxra_ext_dat_stall) {
308 		*node_type = NODE_TYPE_CXRA;
309 		*event = ev - PMC_EV_CMN600_PMU_cxra_req_trk_occ;
310 		return (0);
311 	} else if (ev <= PMC_EV_CMN600_PMU_cxla_avg_latency_form_tx_tlp) {
312 		*node_type = NODE_TYPE_CXLA;
313 		*event = ev - PMC_EV_CMN600_PMU_cxla_rx_tlp_link0;
314 		return (0);
315 	}
316 	return (EINVAL);
317 }
318 
319 /*
320  * Check if a given allocation is feasible.
321  */
322 
323 static int
324 cmn600_allocate_pmc(int cpu, int ri, struct pmc *pm,
325     const struct pmc_op_pmcallocate *a)
326 {
327 	struct cmn600_descr *desc;
328 	const struct pmc_descr *pd;
329 	uint64_t caps __unused;
330 	int local_counter, node_type;
331 	enum pmc_event pe;
332 	void *arg;
333 	uint8_t e;
334 	int err;
335 
336 	(void) cpu;
337 
338 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
339 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
340 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
341 	    ri));
342 
343 	desc = cmn600desc(ri);
344 	arg = desc->pd_rw_arg;
345 	pd = &desc->pd_descr;
346 	if (cmn600_pmcs[class_ri2unit(ri)].domain != pcpu_find(cpu)->pc_domain)
347 		return (EINVAL);
348 
349 	/* check class match */
350 	if (pd->pd_class != a->pm_class)
351 		return (EINVAL);
352 
353 	caps = pm->pm_caps;
354 
355 	PMCDBG3(MDP, ALL, 1, "%s ri=%d caps=0x%x", __func__, ri, caps);
356 
357 	pe = a->pm_ev;
358 	err = cmn600_map_ev2event(pe, pmu_cmn600_rev(arg), &node_type, &e);
359 	if (err != 0)
360 		return (err);
361 	err = pmu_cmn600_alloc_localpmc(arg,
362 	    a->pm_md.pm_cmn600.pma_cmn600_nodeid, node_type, &local_counter);
363 	if (err != 0)
364 		return (err);
365 
366 	pm->pm_md.pm_cmn600.pm_cmn600_config =
367 	    a->pm_md.pm_cmn600.pma_cmn600_config;
368 	pm->pm_md.pm_cmn600.pm_cmn600_occupancy =
369 	    a->pm_md.pm_cmn600.pma_cmn600_occupancy;
370 	desc->pd_nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid =
371 	    a->pm_md.pm_cmn600.pma_cmn600_nodeid;
372 	desc->pd_node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type =
373 	    node_type;
374 	pm->pm_md.pm_cmn600.pm_cmn600_event = e;
375 	desc->pd_local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter =
376 	    local_counter;
377 
378 	return (0);
379 }
380 
381 /* Release machine dependent state associated with a PMC. */
382 
383 static int
384 cmn600_release_pmc(int cpu, int ri, struct pmc *pmc)
385 {
386 	struct cmn600_descr *desc;
387 	struct pmc_hw *phw;
388 	struct pmc *pm __diagused;
389 	int err;
390 
391 	(void) pmc;
392 
393 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
394 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
395 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
396 	    ri));
397 
398 	desc = cmn600desc(ri);
399 	phw = desc->pd_phw;
400 	pm  = phw->phw_pmc;
401 	err = pmu_cmn600_free_localpmc(desc->pd_rw_arg, desc->pd_nodeid,
402 	    desc->pd_node_type, desc->pd_local_counter);
403 	if (err != 0)
404 		return (err);
405 
406 	KASSERT(pm == NULL, ("[cmn600,%d] PHW pmc %p non-NULL", __LINE__, pm));
407 
408 	return (0);
409 }
410 
411 static inline uint64_t
412 cmn600_encode_source(int node_type, int counter, int port, int sub)
413 {
414 
415 	/* Calculate pmevcnt0_input_sel based on list in Table 3-794. */
416 	if (node_type == NODE_TYPE_XP)
417 		return (0x4 | counter);
418 
419 	return (((port + 1) << 4) | (sub << 2) | counter);
420 }
421 
422 /*
423  * start a PMC.
424  */
425 
426 static int
427 cmn600_start_pmc(int cpu, int ri)
428 {
429 	int counter, local_counter, node_type, shift;
430 	uint64_t config, occupancy, source, xp_pmucfg;
431 	struct cmn600_descr *desc;
432 	struct pmc_hw *phw;
433 	struct pmc *pm;
434 	uint8_t event, port, sub;
435 	uint16_t nodeid;
436 	void *arg;
437 
438 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
439 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
440 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
441 	    ri));
442 
443 	counter = ri % CMN600_COUNTERS_N;
444 	desc = cmn600desc(ri);
445 	phw = desc->pd_phw;
446 	pm  = phw->phw_pmc;
447 	arg = desc->pd_rw_arg;
448 
449 	KASSERT(pm != NULL,
450 	    ("[cmn600,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
451 		cpu, ri));
452 
453 	PMCDBG3(MDP, STA, 1, "%s cpu=%d ri=%d", __func__, cpu, ri);
454 
455 	config = pm->pm_md.pm_cmn600.pm_cmn600_config;
456 	occupancy = pm->pm_md.pm_cmn600.pm_cmn600_occupancy;
457 	node_type = pm->pm_md.pm_cmn600.pm_cmn600_node_type;
458 	event = pm->pm_md.pm_cmn600.pm_cmn600_event;
459 	nodeid = pm->pm_md.pm_cmn600.pm_cmn600_nodeid;
460 	local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
461 	port = (nodeid >> 2) & 1;
462 	sub = nodeid & 3;
463 
464 	switch (node_type) {
465 	case NODE_TYPE_DVM:
466 	case NODE_TYPE_HN_F:
467 	case NODE_TYPE_CXHA:
468 	case NODE_TYPE_CXRA:
469 		pmu_cmn600_md8(arg, nodeid, node_type,
470 		    CMN600_COMMON_PMU_EVENT_SEL,
471 		    CMN600_COMMON_PMU_EVENT_SEL_OCC_MASK,
472 		    occupancy << CMN600_COMMON_PMU_EVENT_SEL_OCC_SHIFT);
473 		break;
474 	case NODE_TYPE_XP:
475 		/* Set PC and Interface.*/
476 		event |= config;
477 	}
478 
479 	/*
480 	 * 5.5.1 Set up PMU counters
481 	 * 1. Ensure that the NIDEN input is asserted. HW side. */
482 	/* 2. Select event of target node for one of four outputs. */
483 	pmu_cmn600_md8(arg, nodeid, node_type, CMN600_COMMON_PMU_EVENT_SEL,
484 	    0xff << (local_counter * 8),
485 	    event << (local_counter * 8));
486 
487 	xp_pmucfg = pmu_cmn600_rd8(arg, nodeid, NODE_TYPE_XP,
488 	    POR_DTM_PMU_CONFIG);
489 	/*
490 	 * 3. configure XP to connect one of four target node outputs to local
491 	 * counter.
492 	 */
493 	source = cmn600_encode_source(node_type, local_counter, port, sub);
494 	shift = (local_counter * POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_WIDTH) +
495 	    POR_DTM_PMU_CONFIG_VCNT_INPUT_SEL_SHIFT;
496 	xp_pmucfg &= ~(0xffUL << shift);
497 	xp_pmucfg |= source << shift;
498 
499 	/* 4. Pair with global counters A, B, C, ..., H. */
500 	shift = (local_counter * 4) + 16;
501 	xp_pmucfg &= ~(0xfUL << shift);
502 	xp_pmucfg |= counter << shift;
503 	/* Enable pairing.*/
504 	xp_pmucfg |= 1 << (local_counter + 4);
505 
506 	/* 5. Combine local counters 0 with 1, 2 with 3 or all four. */
507 	xp_pmucfg &= ~0xeUL;
508 
509 	/* 6. Enable XP's PMU function. */
510 	xp_pmucfg |= POR_DTM_PMU_CONFIG_PMU_EN;
511 	pmu_cmn600_wr8(arg, nodeid, NODE_TYPE_XP, POR_DTM_PMU_CONFIG, xp_pmucfg);
512 	if (node_type == NODE_TYPE_CXLA)
513 		pmu_cmn600_set8(arg, nodeid, NODE_TYPE_CXLA,
514 		    POR_CXG_RA_CFG_CTL, EN_CXLA_PMUCMD_PROP);
515 
516 	/* 7. Enable DTM. */
517 	pmu_cmn600_set8(arg, nodeid, NODE_TYPE_XP, POR_DTM_CONTROL,
518 	    POR_DTM_CONTROL_DTM_ENABLE);
519 
520 	/* 8. Reset grouping of global counters. Use 32 bits. */
521 	pmu_cmn600_clr8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
522 	    POR_DT_PMCR_CNTCFG_MASK);
523 
524 	/* 9. Enable DTC. */
525 	pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_DTC_CTL,
526 	    POR_DT_DTC_CTL_DT_EN);
527 
528 	/* 10. Enable Overflow Interrupt. */
529 	pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
530 	    POR_DT_PMCR_OVFL_INTR_EN);
531 
532 	/* 11. Run PMC. */
533 	pmu_cmn600_set8(arg, nodeid, NODE_TYPE_DTC, POR_DT_PMCR,
534 	    POR_DT_PMCR_PMU_EN);
535 
536 	return (0);
537 }
538 
539 /*
540  * Stop a PMC.
541  */
542 
543 static int
544 cmn600_stop_pmc(int cpu, int ri)
545 {
546 	struct cmn600_descr *desc;
547 	struct pmc_hw *phw;
548 	struct pmc *pm;
549 	int local_counter;
550 	uint64_t val;
551 
552 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
553 	    ("[cmn600,%d] illegal CPU value %d", __LINE__, cpu));
554 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
555 	    ri));
556 
557 	desc = cmn600desc(ri);
558 	phw = desc->pd_phw;
559 	pm  = phw->phw_pmc;
560 
561 	KASSERT(pm != NULL,
562 	    ("[cmn600,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
563 		cpu, ri));
564 
565 	PMCDBG2(MDP, STO, 1, "%s ri=%d", __func__, ri);
566 
567 	/* Disable pairing. */
568 	local_counter = pm->pm_md.pm_cmn600.pm_cmn600_local_counter;
569 	pmu_cmn600_clr8(desc->pd_rw_arg, pm->pm_md.pm_cmn600.pm_cmn600_nodeid,
570 	    NODE_TYPE_XP, POR_DTM_PMU_CONFIG, (1 << (local_counter + 4)));
571 
572 	/* Shutdown XP's DTM function if no paired counters. */
573 	val = pmu_cmn600_rd8(desc->pd_rw_arg,
574 	    pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
575 	    POR_DTM_PMU_CONFIG);
576 	if ((val & 0xf0) == 0)
577 		pmu_cmn600_clr8(desc->pd_rw_arg,
578 		    pm->pm_md.pm_cmn600.pm_cmn600_nodeid, NODE_TYPE_XP,
579 		    POR_DTM_PMU_CONFIG, POR_DTM_CONTROL_DTM_ENABLE);
580 
581 	return (0);
582 }
583 
584 /*
585  * describe a PMC
586  */
587 static int
588 cmn600_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
589 {
590 	struct pmc_hw *phw;
591 	size_t copied;
592 	int error;
593 
594 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
595 	    ("[cmn600,%d] illegal CPU %d", __LINE__, cpu));
596 	KASSERT(ri >= 0, ("[cmn600,%d] row-index %d out of range", __LINE__,
597 	    ri));
598 
599 	phw = cmn600desc(ri)->pd_phw;
600 
601 	if ((error = copystr(cmn600desc(ri)->pd_descr.pd_name,
602 	    pi->pm_name, PMC_NAME_MAX, &copied)) != 0)
603 		return (error);
604 
605 	pi->pm_class = cmn600desc(ri)->pd_descr.pd_class;
606 
607 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
608 		pi->pm_enabled = TRUE;
609 		*ppmc          = phw->phw_pmc;
610 	} else {
611 		pi->pm_enabled = FALSE;
612 		*ppmc          = NULL;
613 	}
614 
615 	return (0);
616 }
617 
618 /*
619  * processor dependent initialization.
620  */
621 
622 static int
623 cmn600_pcpu_init(struct pmc_mdep *md, int cpu)
624 {
625 	int first_ri, n, npmc;
626 	struct pmc_hw  *phw;
627 	struct pmc_cpu *pc;
628 	int mdep_class;
629 
630 	mdep_class = PMC_MDEP_CLASS_INDEX_CMN600;
631 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
632 	    ("[cmn600,%d] insane cpu number %d", __LINE__, cpu));
633 
634 	PMCDBG1(MDP, INI, 1, "cmn600-init cpu=%d", cpu);
635 
636 	/*
637 	 * Set the content of the hardware descriptors to a known
638 	 * state and initialize pointers in the MI per-cpu descriptor.
639 	 */
640 
641 	pc = pmc_pcpu[cpu];
642 	first_ri = md->pmd_classdep[mdep_class].pcd_ri;
643 	npmc = md->pmd_classdep[mdep_class].pcd_num;
644 
645 	for (n = 0; n < npmc; n++, phw++) {
646 		phw = cmn600desc(n)->pd_phw;
647 		phw->phw_state = PMC_PHW_CPU_TO_STATE(cpu) |
648 		    PMC_PHW_INDEX_TO_STATE(n);
649 		/* Set enabled only if unit present. */
650 		if (cmn600_pmcs[class_ri2unit(n)].arg != NULL)
651 			phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
652 		phw->phw_pmc = NULL;
653 		pc->pc_hwpmcs[n + first_ri] = phw;
654 	}
655 	return (0);
656 }
657 
658 /*
659  * processor dependent cleanup prior to the KLD
660  * being unloaded
661  */
662 
663 static int
664 cmn600_pcpu_fini(struct pmc_mdep *md, int cpu)
665 {
666 
667 	return (0);
668 }
669 
670 static int
671 cmn600_pmu_intr(struct trapframe *tf, int unit, int i)
672 {
673 	struct pmc_cpu *pc __diagused;
674 	struct pmc_hw *phw;
675 	struct pmc *pm;
676 	int error, cpu, ri;
677 
678 	ri = i + unit * CMN600_COUNTERS_N;
679 	cpu = curcpu;
680 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
681 	    ("[cmn600,%d] CPU %d out of range", __LINE__, cpu));
682 	pc = pmc_pcpu[cpu];
683 	KASSERT(pc != NULL, ("pc != NULL"));
684 
685 	phw = cmn600desc(ri)->pd_phw;
686 	KASSERT(phw != NULL, ("phw != NULL"));
687 	pm  = phw->phw_pmc;
688 	if (pm == NULL)
689 		return (0);
690 
691 	if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
692 		/* Always CPU0. */
693 		pm->pm_pcpu_state[0].pps_overflowcnt += 1;
694 		return (0);
695 	}
696 
697 	if (pm->pm_state != PMC_STATE_RUNNING)
698 		return (0);
699 
700 	error = pmc_process_interrupt(PMC_HR, pm, tf);
701 	if (error)
702 		cmn600_stop_pmc(cpu, ri);
703 
704 	/* Reload sampling count */
705 	cmn600_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
706 
707 	return (0);
708 }
709 
710 /*
711  * Initialize ourselves.
712  */
713 static int
714 cmn600_init_pmc_units(void)
715 {
716 	int i;
717 
718 	if (cmn600_units > 0) { /* Already initialized. */
719 		return (0);
720 	}
721 
722 	cmn600_units = cmn600_pmc_nunits();
723 	if (cmn600_units == 0)
724 		return (ENOENT);
725 
726 	for (i = 0; i < cmn600_units; i++) {
727 		if (cmn600_pmc_getunit(i, &cmn600_pmcs[i].arg,
728 		    &cmn600_pmcs[i].domain) != 0)
729 			cmn600_pmcs[i].arg = NULL;
730 	}
731 	return (0);
732 }
733 
734 int
735 pmc_cmn600_nclasses(void)
736 {
737 
738 	if (cmn600_pmc_nunits() > 0)
739 		return (1);
740 	return (0);
741 }
742 
743 int
744 pmc_cmn600_initialize(struct pmc_mdep *md)
745 {
746 	struct pmc_classdep *pcd;
747 	int i, npmc, unit;
748 
749 	cmn600_init_pmc_units();
750 	KASSERT(md != NULL, ("[cmn600,%d] md is NULL", __LINE__));
751 	KASSERT(cmn600_units < CMN600_UNIT_MAX,
752 	    ("[cmn600,%d] cmn600_units too big", __LINE__));
753 
754 	PMCDBG0(MDP,INI,1, "cmn600-initialize");
755 
756 	npmc = CMN600_COUNTERS_N * cmn600_units;
757 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
758 
759 	pcd->pcd_caps		= PMC_CAP_SYSTEM | PMC_CAP_READ |
760 	    PMC_CAP_WRITE | PMC_CAP_QUALIFIER | PMC_CAP_INTERRUPT |
761 	    PMC_CAP_DOMWIDE;
762 	pcd->pcd_class	= PMC_CLASS_CMN600_PMU;
763 	pcd->pcd_num	= npmc;
764 	pcd->pcd_ri	= md->pmd_npmc;
765 	pcd->pcd_width	= 48;
766 
767 	pcd->pcd_allocate_pmc	= cmn600_allocate_pmc;
768 	pcd->pcd_config_pmc	= cmn600_config_pmc;
769 	pcd->pcd_describe	= cmn600_describe;
770 	pcd->pcd_get_config	= cmn600_get_config;
771 	pcd->pcd_get_msr	= NULL;
772 	pcd->pcd_pcpu_fini	= cmn600_pcpu_fini;
773 	pcd->pcd_pcpu_init	= cmn600_pcpu_init;
774 	pcd->pcd_read_pmc	= cmn600_read_pmc;
775 	pcd->pcd_release_pmc	= cmn600_release_pmc;
776 	pcd->pcd_start_pmc	= cmn600_start_pmc;
777 	pcd->pcd_stop_pmc	= cmn600_stop_pmc;
778 	pcd->pcd_write_pmc	= cmn600_write_pmc;
779 
780 	md->pmd_npmc	       += npmc;
781 	cmn600_pmcdesc = malloc(sizeof(struct cmn600_descr *) * npmc *
782 	    CMN600_PMU_DEFAULT_UNITS_N, M_PMC, M_WAITOK|M_ZERO);
783 	for (i = 0; i < npmc; i++) {
784 		cmn600_pmcdesc[i] = malloc(sizeof(struct cmn600_descr), M_PMC,
785 		    M_WAITOK|M_ZERO);
786 
787 		unit = i / CMN600_COUNTERS_N;
788 		KASSERT(unit >= 0, ("unit >= 0"));
789 		KASSERT(cmn600_pmcs[unit].arg != NULL, ("arg != NULL"));
790 
791 		cmn600_pmcdesc[i]->pd_rw_arg = cmn600_pmcs[unit].arg;
792 		cmn600_pmcdesc[i]->pd_descr.pd_class =
793 		    PMC_CLASS_CMN600_PMU;
794 		cmn600_pmcdesc[i]->pd_descr.pd_caps = pcd->pcd_caps;
795 		cmn600_pmcdesc[i]->pd_phw = (struct pmc_hw *)malloc(
796 		    sizeof(struct pmc_hw), M_PMC, M_WAITOK|M_ZERO);
797 		snprintf(cmn600_pmcdesc[i]->pd_descr.pd_name, 63,
798 		    "CMN600_%d", i);
799 		cmn600_pmu_intr_cb(cmn600_pmcs[unit].arg, cmn600_pmu_intr);
800 	}
801 
802 	return (0);
803 }
804 
805 void
806 pmc_cmn600_finalize(struct pmc_mdep *md)
807 {
808 	struct pmc_classdep *pcd;
809 	int i, npmc;
810 
811 	KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600].pcd_class ==
812 	    PMC_CLASS_CMN600_PMU, ("[cmn600,%d] pmc class mismatch",
813 	    __LINE__));
814 
815 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_CMN600];
816 
817 	npmc = pcd->pcd_num;
818 	for (i = 0; i < npmc; i++) {
819 		free(cmn600_pmcdesc[i]->pd_phw, M_PMC);
820 		free(cmn600_pmcdesc[i], M_PMC);
821 	}
822 	free(cmn600_pmcdesc, M_PMC);
823 	cmn600_pmcdesc = NULL;
824 }
825 
826 MODULE_DEPEND(pmc, cmn600, 1, 1, 1);
827