xref: /freebsd/sys/dev/hwpmc/hwpmc_uncore.c (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Uncore PMCs.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/pmckern.h>
40 #include <sys/systm.h>
41 
42 #include <machine/intr_machdep.h>
43 #if (__FreeBSD_version >= 1100000)
44 #include <x86/apicvar.h>
45 #else
46 #include <machine/apicvar.h>
47 #endif
48 #include <machine/cpu.h>
49 #include <machine/cpufunc.h>
50 #include <machine/specialreg.h>
51 
52 #define	UCF_PMC_CAPS \
53 	(PMC_CAP_READ | PMC_CAP_WRITE)
54 
55 #define	UCP_PMC_CAPS \
56     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
57     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
58 
59 #define	SELECTSEL(x) \
60 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
61 	UCP_CB0_EVSEL0 : UCP_EVSEL0)
62 
63 #define SELECTOFF(x) \
64 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
65 	UCF_OFFSET_SB : UCF_OFFSET)
66 
67 static enum pmc_cputype	uncore_cputype;
68 
69 struct uncore_cpu {
70 	volatile uint32_t	pc_resync;
71 	volatile uint32_t	pc_ucfctrl;	/* Fixed function control. */
72 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
73 	struct pmc_hw		pc_uncorepmcs[];
74 };
75 
76 static struct uncore_cpu **uncore_pcpu;
77 
78 static uint64_t uncore_pmcmask;
79 
80 static int uncore_ucf_ri;		/* relative index of fixed counters */
81 static int uncore_ucf_width;
82 static int uncore_ucf_npmc;
83 
84 static int uncore_ucp_width;
85 static int uncore_ucp_npmc;
86 
87 static int
88 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
89 {
90 	(void) md;
91 	(void) cpu;
92 	return (0);
93 }
94 
95 static int
96 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
97 {
98 	struct pmc_cpu *pc;
99 	struct uncore_cpu *cc;
100 	struct pmc_hw *phw;
101 	int uncore_ri, n, npmc;
102 
103 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
104 	    ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
105 
106 	PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
107 
108 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
109 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
110 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
111 
112 	cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
113 	    M_PMC, M_WAITOK | M_ZERO);
114 
115 	uncore_pcpu[cpu] = cc;
116 	pc = pmc_pcpu[cpu];
117 
118 	KASSERT(pc != NULL && cc != NULL,
119 	    ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
120 
121 	for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
122 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
123 		    PMC_PHW_CPU_TO_STATE(cpu) |
124 		    PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
125 		phw->phw_pmc	  = NULL;
126 		pc->pc_hwpmcs[n + uncore_ri]  = phw;
127 	}
128 
129 	return (0);
130 }
131 
132 static int
133 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
134 {
135 	int uncore_ri, n, npmc;
136 	struct pmc_cpu *pc;
137 	struct uncore_cpu *cc;
138 
139 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
140 	    ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
141 
142 	PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
143 
144 	if ((cc = uncore_pcpu[cpu]) == NULL)
145 		return (0);
146 
147 	uncore_pcpu[cpu] = NULL;
148 
149 	pc = pmc_pcpu[cpu];
150 
151 	KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
152 		cpu));
153 
154 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
155 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
156 
157 	for (n = 0; n < npmc; n++)
158 		wrmsr(SELECTSEL(uncore_cputype) + n, 0);
159 
160 	wrmsr(UCF_CTRL, 0);
161 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
162 
163 	for (n = 0; n < npmc; n++)
164 		pc->pc_hwpmcs[n + uncore_ri] = NULL;
165 
166 	free(cc, M_PMC);
167 
168 	return (0);
169 }
170 
171 /*
172  * Fixed function counters.
173  */
174 
175 static pmc_value_t
176 ucf_perfctr_value_to_reload_count(pmc_value_t v)
177 {
178 	v &= (1ULL << uncore_ucf_width) - 1;
179 	return (1ULL << uncore_ucf_width) - v;
180 }
181 
182 static pmc_value_t
183 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
184 {
185 	return (1ULL << uncore_ucf_width) - rlc;
186 }
187 
188 static int
189 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
190     const struct pmc_op_pmcallocate *a)
191 {
192 	enum pmc_event ev;
193 	uint32_t caps, flags;
194 
195 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
196 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
197 
198 	PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
199 
200 	if (ri < 0 || ri > uncore_ucf_npmc)
201 		return (EINVAL);
202 
203 	caps = a->pm_caps;
204 
205 	if (a->pm_class != PMC_CLASS_UCF ||
206 	    (caps & UCF_PMC_CAPS) != caps)
207 		return (EINVAL);
208 
209 	ev = pm->pm_event;
210 	flags = UCF_EN;
211 
212 	pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
213 
214 	PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
215 	    (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
216 
217 	return (0);
218 }
219 
220 static int
221 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
222 {
223 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
224 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
225 
226 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
227 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
228 
229 	PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
230 
231 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
232 	    cpu));
233 
234 	uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
235 
236 	return (0);
237 }
238 
239 static int
240 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
241 {
242 	int error;
243 	struct pmc_hw *phw;
244 	char ucf_name[PMC_NAME_MAX];
245 
246 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
247 
248 	(void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
249 	if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
250 	    NULL)) != 0)
251 		return (error);
252 
253 	pi->pm_class = PMC_CLASS_UCF;
254 
255 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
256 		pi->pm_enabled = TRUE;
257 		*ppmc          = phw->phw_pmc;
258 	} else {
259 		pi->pm_enabled = FALSE;
260 		*ppmc          = NULL;
261 	}
262 
263 	return (0);
264 }
265 
266 static int
267 ucf_get_config(int cpu, int ri, struct pmc **ppm)
268 {
269 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
270 
271 	return (0);
272 }
273 
274 static int
275 ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
276 {
277 	struct pmc *pm;
278 	pmc_value_t tmp;
279 
280 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
281 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
282 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
283 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
284 
285 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
286 
287 	KASSERT(pm,
288 	    ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
289 		ri, ri + uncore_ucf_ri));
290 
291 	tmp = rdmsr(UCF_CTR0 + ri);
292 
293 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
294 		*v = ucf_perfctr_value_to_reload_count(tmp);
295 	else
296 		*v = tmp;
297 
298 	PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
299 
300 	return (0);
301 }
302 
303 static int
304 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
305 {
306 	PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
307 
308 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
309 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
310 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
311 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
312 
313 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
314 	    ("[uncore,%d] PHW pmc non-NULL", __LINE__));
315 
316 	return (0);
317 }
318 
319 static int
320 ucf_start_pmc(int cpu, int ri)
321 {
322 	struct pmc *pm;
323 	struct uncore_cpu *ucfc;
324 
325 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
326 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
327 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
328 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
329 
330 	PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
331 
332 	ucfc = uncore_pcpu[cpu];
333 	pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
334 
335 	ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
336 
337 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
338 
339 	do {
340 		ucfc->pc_resync = 0;
341 		ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
342 		wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
343 	} while (ucfc->pc_resync != 0);
344 
345 	PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
346 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
347 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
348 
349 	return (0);
350 }
351 
352 static int
353 ucf_stop_pmc(int cpu, int ri)
354 {
355 	uint32_t fc;
356 	struct uncore_cpu *ucfc;
357 
358 	PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
359 
360 	ucfc = uncore_pcpu[cpu];
361 
362 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
363 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
364 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
365 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
366 
367 	fc = (UCF_MASK << (ri * 4));
368 
369 	ucfc->pc_ucfctrl &= ~fc;
370 
371 	PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
372 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
373 
374 	do {
375 		ucfc->pc_resync = 0;
376 		ucfc->pc_globalctrl &= ~(1ULL << (ri + SELECTOFF(uncore_cputype)));
377 		wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
378 	} while (ucfc->pc_resync != 0);
379 
380 	PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
381 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
382 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
383 
384 	return (0);
385 }
386 
387 static int
388 ucf_write_pmc(int cpu, int ri, pmc_value_t v)
389 {
390 	struct uncore_cpu *cc;
391 	struct pmc *pm;
392 
393 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
394 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
395 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
396 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
397 
398 	cc = uncore_pcpu[cpu];
399 	pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
400 
401 	KASSERT(pm,
402 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
403 
404 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
405 		v = ucf_reload_count_to_perfctr_value(v);
406 
407 	wrmsr(UCF_CTRL, 0);	/* Turn off fixed counters */
408 	wrmsr(UCF_CTR0 + ri, v);
409 	wrmsr(UCF_CTRL, cc->pc_ucfctrl);
410 
411 	PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
412 	    cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
413 
414 	return (0);
415 }
416 
417 
418 static void
419 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
420 {
421 	struct pmc_classdep *pcd;
422 
423 	KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
424 
425 	PMCDBG0(MDP,INI,1, "ucf-initialize");
426 
427 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
428 
429 	pcd->pcd_caps	= UCF_PMC_CAPS;
430 	pcd->pcd_class	= PMC_CLASS_UCF;
431 	pcd->pcd_num	= npmc;
432 	pcd->pcd_ri	= md->pmd_npmc;
433 	pcd->pcd_width	= pmcwidth;
434 
435 	pcd->pcd_allocate_pmc	= ucf_allocate_pmc;
436 	pcd->pcd_config_pmc	= ucf_config_pmc;
437 	pcd->pcd_describe	= ucf_describe;
438 	pcd->pcd_get_config	= ucf_get_config;
439 	pcd->pcd_get_msr	= NULL;
440 	pcd->pcd_pcpu_fini	= uncore_pcpu_noop;
441 	pcd->pcd_pcpu_init	= uncore_pcpu_noop;
442 	pcd->pcd_read_pmc	= ucf_read_pmc;
443 	pcd->pcd_release_pmc	= ucf_release_pmc;
444 	pcd->pcd_start_pmc	= ucf_start_pmc;
445 	pcd->pcd_stop_pmc	= ucf_stop_pmc;
446 	pcd->pcd_write_pmc	= ucf_write_pmc;
447 
448 	md->pmd_npmc	       += npmc;
449 }
450 
451 /*
452  * Intel programmable PMCs.
453  */
454 
455 /*
456  * Event descriptor tables.
457  *
458  * For each event id, we track:
459  *
460  * 1. The CPUs that the event is valid for.
461  *
462  * 2. If the event uses a fixed UMASK, the value of the umask field.
463  *    If the event doesn't use a fixed UMASK, a mask of legal bits
464  *    to check against.
465  */
466 
467 struct ucp_event_descr {
468 	enum pmc_event	ucp_ev;
469 	unsigned char	ucp_evcode;
470 	unsigned char	ucp_umask;
471 	unsigned char	ucp_flags;
472 };
473 
474 #define	UCP_F_I7	(1 << 0)	/* CPU: Core i7 */
475 #define	UCP_F_WM	(1 << 1)	/* CPU: Westmere */
476 #define	UCP_F_SB	(1 << 2)	/* CPU: Sandy Bridge */
477 #define	UCP_F_HW	(1 << 3)	/* CPU: Haswell */
478 #define	UCP_F_FM	(1 << 4)	/* Fixed mask */
479 
480 #define	UCP_F_ALLCPUS					\
481     (UCP_F_I7 | UCP_F_WM)
482 
483 #define	UCP_F_CMASK		0xFF000000
484 
485 static pmc_value_t
486 ucp_perfctr_value_to_reload_count(pmc_value_t v)
487 {
488 	v &= (1ULL << uncore_ucp_width) - 1;
489 	return (1ULL << uncore_ucp_width) - v;
490 }
491 
492 static pmc_value_t
493 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
494 {
495 	return (1ULL << uncore_ucp_width) - rlc;
496 }
497 
498 /*
499  * Counter specific event information for Sandybridge and Haswell
500  */
501 static int
502 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
503 {
504 	uint32_t mask;
505 
506 	switch (ev) {
507 		/*
508 		 * Events valid only on counter 0.
509 		 */
510 		case 0x80:
511 		case 0x83:
512 		mask = (1 << 0);
513 		break;
514 
515 	default:
516 		mask = ~0;	/* Any row index is ok. */
517 	}
518 
519 	return (mask & (1 << ri));
520 }
521 
522 static int
523 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
524     const struct pmc_op_pmcallocate *a)
525 {
526 	uint8_t ev;
527 	uint32_t caps;
528 	const struct pmc_md_ucp_op_pmcallocate *ucp;
529 
530 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
531 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
532 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
533 	    ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
534 
535 	/* check requested capabilities */
536 	caps = a->pm_caps;
537 	if ((UCP_PMC_CAPS & caps) != caps)
538 		return (EPERM);
539 
540 	ucp = &a->pm_md.pm_ucp;
541 	ev = UCP_EVSEL(ucp->pm_ucp_config);
542 	switch (uncore_cputype) {
543 	case PMC_CPU_INTEL_HASWELL:
544 	case PMC_CPU_INTEL_SANDYBRIDGE:
545 		if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
546 			return (EINVAL);
547 		break;
548 	default:
549 		break;
550 	}
551 
552 	pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
553 
554 	return (0);
555 }
556 
557 static int
558 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
559 {
560 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
561 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
562 
563 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
564 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
565 
566 	PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
567 
568 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
569 	    cpu));
570 
571 	uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
572 
573 	return (0);
574 }
575 
576 static int
577 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
578 {
579 	int error;
580 	struct pmc_hw *phw;
581 	char ucp_name[PMC_NAME_MAX];
582 
583 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
584 
585 	(void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
586 	if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
587 	    NULL)) != 0)
588 		return (error);
589 
590 	pi->pm_class = PMC_CLASS_UCP;
591 
592 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
593 		pi->pm_enabled = TRUE;
594 		*ppmc          = phw->phw_pmc;
595 	} else {
596 		pi->pm_enabled = FALSE;
597 		*ppmc          = NULL;
598 	}
599 
600 	return (0);
601 }
602 
603 static int
604 ucp_get_config(int cpu, int ri, struct pmc **ppm)
605 {
606 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
607 
608 	return (0);
609 }
610 
611 static int
612 ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
613 {
614 	struct pmc *pm;
615 	pmc_value_t tmp;
616 
617 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
618 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
619 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
620 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
621 
622 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
623 
624 	KASSERT(pm,
625 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
626 		ri));
627 
628 	tmp = rdmsr(UCP_PMC0 + ri);
629 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
630 		*v = ucp_perfctr_value_to_reload_count(tmp);
631 	else
632 		*v = tmp;
633 
634 	PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
635 	    ri, *v);
636 
637 	return (0);
638 }
639 
640 static int
641 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
642 {
643 	(void) pm;
644 
645 	PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
646 	    pm);
647 
648 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
649 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
650 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
651 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
652 
653 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
654 	    == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
655 
656 	return (0);
657 }
658 
659 static int
660 ucp_start_pmc(int cpu, int ri)
661 {
662 	struct pmc *pm;
663 	uint32_t evsel;
664 	struct uncore_cpu *cc;
665 
666 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
667 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
668 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
669 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
670 
671 	cc = uncore_pcpu[cpu];
672 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
673 
674 	KASSERT(pm,
675 	    ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
676 		__LINE__, cpu, ri));
677 
678 	PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
679 
680 	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
681 
682 	PMCDBG4(MDP,STA,2,
683 	    "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
684 	    cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
685 
686 	/* Event specific configuration. */
687 	switch (pm->pm_event) {
688 	case PMC_EV_UCP_EVENT_0CH_04H_E:
689 	case PMC_EV_UCP_EVENT_0CH_08H_E:
690 		wrmsr(MSR_GQ_SNOOP_MESF,0x2);
691 		break;
692 	case PMC_EV_UCP_EVENT_0CH_04H_F:
693 	case PMC_EV_UCP_EVENT_0CH_08H_F:
694 		wrmsr(MSR_GQ_SNOOP_MESF,0x8);
695 		break;
696 	case PMC_EV_UCP_EVENT_0CH_04H_M:
697 	case PMC_EV_UCP_EVENT_0CH_08H_M:
698 		wrmsr(MSR_GQ_SNOOP_MESF,0x1);
699 		break;
700 	case PMC_EV_UCP_EVENT_0CH_04H_S:
701 	case PMC_EV_UCP_EVENT_0CH_08H_S:
702 		wrmsr(MSR_GQ_SNOOP_MESF,0x4);
703 		break;
704 	default:
705 		break;
706 	}
707 	wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
708 
709 	do {
710 		cc->pc_resync = 0;
711 		cc->pc_globalctrl |= (1ULL << ri);
712 		wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
713 	} while (cc->pc_resync != 0);
714 
715 	return (0);
716 }
717 
718 static int
719 ucp_stop_pmc(int cpu, int ri)
720 {
721 	struct pmc *pm;
722 	struct uncore_cpu *cc;
723 
724 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
725 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
726 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
727 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
728 
729 	cc = uncore_pcpu[cpu];
730 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
731 
732 	KASSERT(pm,
733 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
734 		cpu, ri));
735 
736 	PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
737 
738 	/* stop hw. */
739 	wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
740 
741 	do {
742 		cc->pc_resync = 0;
743 		cc->pc_globalctrl &= ~(1ULL << ri);
744 		wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
745 	} while (cc->pc_resync != 0);
746 
747 	return (0);
748 }
749 
750 static int
751 ucp_write_pmc(int cpu, int ri, pmc_value_t v)
752 {
753 	struct pmc *pm;
754 	struct uncore_cpu *cc;
755 
756 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
757 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
758 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
759 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
760 
761 	cc = uncore_pcpu[cpu];
762 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
763 
764 	KASSERT(pm,
765 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
766 		cpu, ri));
767 
768 	PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
769 	    UCP_PMC0 + ri, v);
770 
771 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
772 		v = ucp_reload_count_to_perfctr_value(v);
773 
774 	/*
775 	 * Write the new value to the counter.  The counter will be in
776 	 * a stopped state when the pcd_write() entry point is called.
777 	 */
778 
779 	wrmsr(UCP_PMC0 + ri, v);
780 
781 	return (0);
782 }
783 
784 
785 static void
786 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
787 {
788 	struct pmc_classdep *pcd;
789 
790 	KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
791 
792 	PMCDBG0(MDP,INI,1, "ucp-initialize");
793 
794 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
795 
796 	pcd->pcd_caps	= UCP_PMC_CAPS;
797 	pcd->pcd_class	= PMC_CLASS_UCP;
798 	pcd->pcd_num	= npmc;
799 	pcd->pcd_ri	= md->pmd_npmc;
800 	pcd->pcd_width	= pmcwidth;
801 
802 	pcd->pcd_allocate_pmc	= ucp_allocate_pmc;
803 	pcd->pcd_config_pmc	= ucp_config_pmc;
804 	pcd->pcd_describe	= ucp_describe;
805 	pcd->pcd_get_config	= ucp_get_config;
806 	pcd->pcd_get_msr	= NULL;
807 	pcd->pcd_pcpu_fini	= uncore_pcpu_fini;
808 	pcd->pcd_pcpu_init	= uncore_pcpu_init;
809 	pcd->pcd_read_pmc	= ucp_read_pmc;
810 	pcd->pcd_release_pmc	= ucp_release_pmc;
811 	pcd->pcd_start_pmc	= ucp_start_pmc;
812 	pcd->pcd_stop_pmc	= ucp_stop_pmc;
813 	pcd->pcd_write_pmc	= ucp_write_pmc;
814 
815 	md->pmd_npmc	       += npmc;
816 }
817 
818 int
819 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
820 {
821 	uncore_cputype = md->pmd_cputype;
822 	uncore_pmcmask = 0;
823 
824 	/*
825 	 * Initialize programmable counters.
826 	 */
827 
828 	uncore_ucp_npmc  = 8;
829 	uncore_ucp_width = 48;
830 
831 	uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
832 
833 	ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
834 
835 	/*
836 	 * Initialize fixed function counters, if present.
837 	 */
838 	uncore_ucf_ri = uncore_ucp_npmc;
839 	uncore_ucf_npmc  = 1;
840 	uncore_ucf_width = 48;
841 
842 	ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
843 	uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
844 
845 	PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
846 	    uncore_ucf_ri);
847 
848 	uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
849 	    M_ZERO | M_WAITOK);
850 
851 	return (0);
852 }
853 
854 void
855 pmc_uncore_finalize(struct pmc_mdep *md)
856 {
857 	PMCDBG0(MDP,INI,1, "uncore-finalize");
858 
859 	free(uncore_pcpu, M_PMC);
860 	uncore_pcpu = NULL;
861 }
862