xref: /freebsd/sys/dev/hwpmc/hwpmc_uncore.c (revision 81ad626541db97eb356e2c1d4a20eb2a26a766ab)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Uncore PMCs.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/pmckern.h>
40 #include <sys/systm.h>
41 
42 #include <machine/intr_machdep.h>
43 #include <x86/apicvar.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/specialreg.h>
47 
48 #define	UCF_PMC_CAPS \
49 	(PMC_CAP_READ | PMC_CAP_WRITE)
50 
51 #define	UCP_PMC_CAPS \
52     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
53     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
54 
55 #define	SELECTSEL(x) \
56 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
57 	UCP_CB0_EVSEL0 : UCP_EVSEL0)
58 
59 #define SELECTOFF(x) \
60 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
61 	UCF_OFFSET_SB : UCF_OFFSET)
62 
63 static enum pmc_cputype	uncore_cputype;
64 
65 struct uncore_cpu {
66 	volatile uint32_t	pc_ucfctrl;	/* Fixed function control. */
67 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
68 	struct pmc_hw		pc_uncorepmcs[];
69 };
70 
71 static struct uncore_cpu **uncore_pcpu;
72 
73 static uint64_t uncore_pmcmask;
74 
75 static int uncore_ucf_ri;		/* relative index of fixed counters */
76 static int uncore_ucf_width;
77 static int uncore_ucf_npmc;
78 
79 static int uncore_ucp_width;
80 static int uncore_ucp_npmc;
81 
82 static int
83 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
84 {
85 	(void) md;
86 	(void) cpu;
87 	return (0);
88 }
89 
90 static int
91 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
92 {
93 	struct pmc_cpu *pc;
94 	struct uncore_cpu *cc;
95 	struct pmc_hw *phw;
96 	int uncore_ri, n, npmc;
97 
98 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
99 	    ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
100 
101 	PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
102 
103 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
104 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
105 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
106 
107 	cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
108 	    M_PMC, M_WAITOK | M_ZERO);
109 
110 	uncore_pcpu[cpu] = cc;
111 	pc = pmc_pcpu[cpu];
112 
113 	KASSERT(pc != NULL && cc != NULL,
114 	    ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
115 
116 	for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
117 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
118 		    PMC_PHW_CPU_TO_STATE(cpu) |
119 		    PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
120 		phw->phw_pmc	  = NULL;
121 		pc->pc_hwpmcs[n + uncore_ri]  = phw;
122 	}
123 
124 	return (0);
125 }
126 
127 static int
128 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
129 {
130 	int uncore_ri, n, npmc;
131 	struct pmc_cpu *pc;
132 	struct uncore_cpu *cc;
133 
134 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
135 	    ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
136 
137 	PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
138 
139 	if ((cc = uncore_pcpu[cpu]) == NULL)
140 		return (0);
141 
142 	uncore_pcpu[cpu] = NULL;
143 
144 	pc = pmc_pcpu[cpu];
145 
146 	KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
147 		cpu));
148 
149 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
150 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
151 
152 	for (n = 0; n < npmc; n++)
153 		wrmsr(SELECTSEL(uncore_cputype) + n, 0);
154 
155 	wrmsr(UCF_CTRL, 0);
156 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
157 
158 	for (n = 0; n < npmc; n++)
159 		pc->pc_hwpmcs[n + uncore_ri] = NULL;
160 
161 	free(cc, M_PMC);
162 
163 	return (0);
164 }
165 
166 /*
167  * Fixed function counters.
168  */
169 
170 static pmc_value_t
171 ucf_perfctr_value_to_reload_count(pmc_value_t v)
172 {
173 
174 	/* If the PMC has overflowed, return a reload count of zero. */
175 	if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
176 		return (0);
177 	v &= (1ULL << uncore_ucf_width) - 1;
178 	return (1ULL << uncore_ucf_width) - v;
179 }
180 
181 static pmc_value_t
182 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
183 {
184 	return (1ULL << uncore_ucf_width) - rlc;
185 }
186 
187 static int
188 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
189     const struct pmc_op_pmcallocate *a)
190 {
191 	uint32_t flags;
192 
193 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
194 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
195 
196 	PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
197 
198 	if (ri < 0 || ri > uncore_ucf_npmc)
199 		return (EINVAL);
200 
201 	if (a->pm_class != PMC_CLASS_UCF)
202 		return (EINVAL);
203 
204 	flags = UCF_EN;
205 
206 	pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
207 
208 	PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
209 	    (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
210 
211 	return (0);
212 }
213 
214 static int
215 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
216 {
217 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
218 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
219 
220 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
221 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
222 
223 	PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
224 
225 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
226 	    cpu));
227 
228 	uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
229 
230 	return (0);
231 }
232 
233 static int
234 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
235 {
236 	int error;
237 	struct pmc_hw *phw;
238 	char ucf_name[PMC_NAME_MAX];
239 
240 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
241 
242 	(void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
243 	if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
244 	    NULL)) != 0)
245 		return (error);
246 
247 	pi->pm_class = PMC_CLASS_UCF;
248 
249 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
250 		pi->pm_enabled = TRUE;
251 		*ppmc          = phw->phw_pmc;
252 	} else {
253 		pi->pm_enabled = FALSE;
254 		*ppmc          = NULL;
255 	}
256 
257 	return (0);
258 }
259 
260 static int
261 ucf_get_config(int cpu, int ri, struct pmc **ppm)
262 {
263 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
264 
265 	return (0);
266 }
267 
268 static int
269 ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
270 {
271 	struct pmc *pm;
272 	pmc_value_t tmp;
273 
274 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
275 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
276 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
277 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
278 
279 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
280 
281 	KASSERT(pm,
282 	    ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
283 		ri, ri + uncore_ucf_ri));
284 
285 	tmp = rdmsr(UCF_CTR0 + ri);
286 
287 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
288 		*v = ucf_perfctr_value_to_reload_count(tmp);
289 	else
290 		*v = tmp;
291 
292 	PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
293 
294 	return (0);
295 }
296 
297 static int
298 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
299 {
300 	PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
301 
302 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
303 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
304 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
305 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
306 
307 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
308 	    ("[uncore,%d] PHW pmc non-NULL", __LINE__));
309 
310 	return (0);
311 }
312 
313 static int
314 ucf_start_pmc(int cpu, int ri)
315 {
316 	struct pmc *pm;
317 	struct uncore_cpu *ucfc;
318 
319 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
320 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
321 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
322 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
323 
324 	PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
325 
326 	ucfc = uncore_pcpu[cpu];
327 	pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
328 
329 	ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
330 
331 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
332 
333 	ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
334 	wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
335 
336 	PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
337 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
338 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
339 
340 	return (0);
341 }
342 
343 static int
344 ucf_stop_pmc(int cpu, int ri)
345 {
346 	uint32_t fc;
347 	struct uncore_cpu *ucfc;
348 
349 	PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
350 
351 	ucfc = uncore_pcpu[cpu];
352 
353 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
354 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
355 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
356 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
357 
358 	fc = (UCF_MASK << (ri * 4));
359 
360 	ucfc->pc_ucfctrl &= ~fc;
361 
362 	PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
363 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
364 
365 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
366 
367 	PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
368 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
369 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
370 
371 	return (0);
372 }
373 
374 static int
375 ucf_write_pmc(int cpu, int ri, pmc_value_t v)
376 {
377 	struct uncore_cpu *cc;
378 	struct pmc *pm;
379 
380 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
381 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
382 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
383 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
384 
385 	cc = uncore_pcpu[cpu];
386 	pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
387 
388 	KASSERT(pm,
389 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
390 
391 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
392 		v = ucf_reload_count_to_perfctr_value(v);
393 
394 	wrmsr(UCF_CTRL, 0);	/* Turn off fixed counters */
395 	wrmsr(UCF_CTR0 + ri, v);
396 	wrmsr(UCF_CTRL, cc->pc_ucfctrl);
397 
398 	PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
399 	    cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
400 
401 	return (0);
402 }
403 
404 
405 static void
406 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
407 {
408 	struct pmc_classdep *pcd;
409 
410 	KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
411 
412 	PMCDBG0(MDP,INI,1, "ucf-initialize");
413 
414 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
415 
416 	pcd->pcd_caps	= UCF_PMC_CAPS;
417 	pcd->pcd_class	= PMC_CLASS_UCF;
418 	pcd->pcd_num	= npmc;
419 	pcd->pcd_ri	= md->pmd_npmc;
420 	pcd->pcd_width	= pmcwidth;
421 
422 	pcd->pcd_allocate_pmc	= ucf_allocate_pmc;
423 	pcd->pcd_config_pmc	= ucf_config_pmc;
424 	pcd->pcd_describe	= ucf_describe;
425 	pcd->pcd_get_config	= ucf_get_config;
426 	pcd->pcd_get_msr	= NULL;
427 	pcd->pcd_pcpu_fini	= uncore_pcpu_noop;
428 	pcd->pcd_pcpu_init	= uncore_pcpu_noop;
429 	pcd->pcd_read_pmc	= ucf_read_pmc;
430 	pcd->pcd_release_pmc	= ucf_release_pmc;
431 	pcd->pcd_start_pmc	= ucf_start_pmc;
432 	pcd->pcd_stop_pmc	= ucf_stop_pmc;
433 	pcd->pcd_write_pmc	= ucf_write_pmc;
434 
435 	md->pmd_npmc	       += npmc;
436 }
437 
438 /*
439  * Intel programmable PMCs.
440  */
441 
442 /*
443  * Event descriptor tables.
444  *
445  * For each event id, we track:
446  *
447  * 1. The CPUs that the event is valid for.
448  *
449  * 2. If the event uses a fixed UMASK, the value of the umask field.
450  *    If the event doesn't use a fixed UMASK, a mask of legal bits
451  *    to check against.
452  */
453 
454 struct ucp_event_descr {
455 	enum pmc_event	ucp_ev;
456 	unsigned char	ucp_evcode;
457 	unsigned char	ucp_umask;
458 	unsigned char	ucp_flags;
459 };
460 
461 #define	UCP_F_I7	(1 << 0)	/* CPU: Core i7 */
462 #define	UCP_F_WM	(1 << 1)	/* CPU: Westmere */
463 #define	UCP_F_SB	(1 << 2)	/* CPU: Sandy Bridge */
464 #define	UCP_F_HW	(1 << 3)	/* CPU: Haswell */
465 #define	UCP_F_FM	(1 << 4)	/* Fixed mask */
466 
467 #define	UCP_F_ALLCPUS					\
468     (UCP_F_I7 | UCP_F_WM)
469 
470 #define	UCP_F_CMASK		0xFF000000
471 
472 static pmc_value_t
473 ucp_perfctr_value_to_reload_count(pmc_value_t v)
474 {
475 	v &= (1ULL << uncore_ucp_width) - 1;
476 	return (1ULL << uncore_ucp_width) - v;
477 }
478 
479 static pmc_value_t
480 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
481 {
482 	return (1ULL << uncore_ucp_width) - rlc;
483 }
484 
485 /*
486  * Counter specific event information for Sandybridge and Haswell
487  */
488 static int
489 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
490 {
491 	uint32_t mask;
492 
493 	switch (ev) {
494 		/*
495 		 * Events valid only on counter 0.
496 		 */
497 		case 0x80:
498 		case 0x83:
499 		mask = (1 << 0);
500 		break;
501 
502 	default:
503 		mask = ~0;	/* Any row index is ok. */
504 	}
505 
506 	return (mask & (1 << ri));
507 }
508 
509 static int
510 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
511     const struct pmc_op_pmcallocate *a)
512 {
513 	uint8_t ev;
514 	const struct pmc_md_ucp_op_pmcallocate *ucp;
515 
516 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
517 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
518 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
519 	    ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
520 
521 	if (a->pm_class != PMC_CLASS_UCP)
522 		return (EINVAL);
523 
524 	ucp = &a->pm_md.pm_ucp;
525 	ev = UCP_EVSEL(ucp->pm_ucp_config);
526 	switch (uncore_cputype) {
527 	case PMC_CPU_INTEL_HASWELL:
528 	case PMC_CPU_INTEL_SANDYBRIDGE:
529 		if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
530 			return (EINVAL);
531 		break;
532 	default:
533 		break;
534 	}
535 
536 	pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
537 
538 	return (0);
539 }
540 
541 static int
542 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
543 {
544 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
545 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
546 
547 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
548 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
549 
550 	PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
551 
552 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
553 	    cpu));
554 
555 	uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
556 
557 	return (0);
558 }
559 
560 static int
561 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
562 {
563 	int error;
564 	struct pmc_hw *phw;
565 	char ucp_name[PMC_NAME_MAX];
566 
567 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
568 
569 	(void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
570 	if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
571 	    NULL)) != 0)
572 		return (error);
573 
574 	pi->pm_class = PMC_CLASS_UCP;
575 
576 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
577 		pi->pm_enabled = TRUE;
578 		*ppmc          = phw->phw_pmc;
579 	} else {
580 		pi->pm_enabled = FALSE;
581 		*ppmc          = NULL;
582 	}
583 
584 	return (0);
585 }
586 
587 static int
588 ucp_get_config(int cpu, int ri, struct pmc **ppm)
589 {
590 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
591 
592 	return (0);
593 }
594 
595 static int
596 ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
597 {
598 	struct pmc *pm;
599 	pmc_value_t tmp;
600 
601 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
602 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
603 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
604 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
605 
606 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
607 
608 	KASSERT(pm,
609 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
610 		ri));
611 
612 	tmp = rdmsr(UCP_PMC0 + ri);
613 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
614 		*v = ucp_perfctr_value_to_reload_count(tmp);
615 	else
616 		*v = tmp;
617 
618 	PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
619 	    ri, *v);
620 
621 	return (0);
622 }
623 
624 static int
625 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
626 {
627 	(void) pm;
628 
629 	PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
630 	    pm);
631 
632 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
633 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
634 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
635 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
636 
637 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
638 	    == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
639 
640 	return (0);
641 }
642 
643 static int
644 ucp_start_pmc(int cpu, int ri)
645 {
646 	struct pmc *pm;
647 	uint64_t evsel;
648 	struct uncore_cpu *cc;
649 
650 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
651 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
652 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
653 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
654 
655 	cc = uncore_pcpu[cpu];
656 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
657 
658 	KASSERT(pm,
659 	    ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
660 		__LINE__, cpu, ri));
661 
662 	PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
663 
664 	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
665 
666 	PMCDBG4(MDP,STA,2,
667 	    "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
668 	    cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
669 
670 	/* Event specific configuration. */
671 	switch (pm->pm_event) {
672 	case PMC_EV_UCP_EVENT_0CH_04H_E:
673 	case PMC_EV_UCP_EVENT_0CH_08H_E:
674 		wrmsr(MSR_GQ_SNOOP_MESF,0x2);
675 		break;
676 	case PMC_EV_UCP_EVENT_0CH_04H_F:
677 	case PMC_EV_UCP_EVENT_0CH_08H_F:
678 		wrmsr(MSR_GQ_SNOOP_MESF,0x8);
679 		break;
680 	case PMC_EV_UCP_EVENT_0CH_04H_M:
681 	case PMC_EV_UCP_EVENT_0CH_08H_M:
682 		wrmsr(MSR_GQ_SNOOP_MESF,0x1);
683 		break;
684 	case PMC_EV_UCP_EVENT_0CH_04H_S:
685 	case PMC_EV_UCP_EVENT_0CH_08H_S:
686 		wrmsr(MSR_GQ_SNOOP_MESF,0x4);
687 		break;
688 	default:
689 		break;
690 	}
691 	wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
692 
693 	cc->pc_globalctrl |= (1ULL << ri);
694 	wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
695 
696 	return (0);
697 }
698 
699 static int
700 ucp_stop_pmc(int cpu, int ri)
701 {
702 	struct pmc *pm __diagused;
703 	struct uncore_cpu *cc;
704 
705 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
706 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
707 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
708 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
709 
710 	cc = uncore_pcpu[cpu];
711 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
712 
713 	KASSERT(pm,
714 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
715 		cpu, ri));
716 
717 	PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
718 
719 	/* stop hw. */
720 	wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
721 
722 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
723 
724 	return (0);
725 }
726 
727 static int
728 ucp_write_pmc(int cpu, int ri, pmc_value_t v)
729 {
730 	struct pmc *pm;
731 	struct uncore_cpu *cc;
732 
733 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
734 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
735 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
736 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
737 
738 	cc = uncore_pcpu[cpu];
739 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
740 
741 	KASSERT(pm,
742 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
743 		cpu, ri));
744 
745 	PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
746 	    UCP_PMC0 + ri, v);
747 
748 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
749 		v = ucp_reload_count_to_perfctr_value(v);
750 
751 	/*
752 	 * Write the new value to the counter.  The counter will be in
753 	 * a stopped state when the pcd_write() entry point is called.
754 	 */
755 
756 	wrmsr(UCP_PMC0 + ri, v);
757 
758 	return (0);
759 }
760 
761 
762 static void
763 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
764 {
765 	struct pmc_classdep *pcd;
766 
767 	KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
768 
769 	PMCDBG0(MDP,INI,1, "ucp-initialize");
770 
771 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
772 
773 	pcd->pcd_caps	= UCP_PMC_CAPS;
774 	pcd->pcd_class	= PMC_CLASS_UCP;
775 	pcd->pcd_num	= npmc;
776 	pcd->pcd_ri	= md->pmd_npmc;
777 	pcd->pcd_width	= pmcwidth;
778 
779 	pcd->pcd_allocate_pmc	= ucp_allocate_pmc;
780 	pcd->pcd_config_pmc	= ucp_config_pmc;
781 	pcd->pcd_describe	= ucp_describe;
782 	pcd->pcd_get_config	= ucp_get_config;
783 	pcd->pcd_get_msr	= NULL;
784 	pcd->pcd_pcpu_fini	= uncore_pcpu_fini;
785 	pcd->pcd_pcpu_init	= uncore_pcpu_init;
786 	pcd->pcd_read_pmc	= ucp_read_pmc;
787 	pcd->pcd_release_pmc	= ucp_release_pmc;
788 	pcd->pcd_start_pmc	= ucp_start_pmc;
789 	pcd->pcd_stop_pmc	= ucp_stop_pmc;
790 	pcd->pcd_write_pmc	= ucp_write_pmc;
791 
792 	md->pmd_npmc	       += npmc;
793 }
794 
795 int
796 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
797 {
798 	uncore_cputype = md->pmd_cputype;
799 	uncore_pmcmask = 0;
800 
801 	/*
802 	 * Initialize programmable counters.
803 	 */
804 
805 	uncore_ucp_npmc  = 8;
806 	uncore_ucp_width = 48;
807 
808 	uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
809 
810 	ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
811 
812 	/*
813 	 * Initialize fixed function counters, if present.
814 	 */
815 	uncore_ucf_ri = uncore_ucp_npmc;
816 	uncore_ucf_npmc  = 1;
817 	uncore_ucf_width = 48;
818 
819 	ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
820 	uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
821 
822 	PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
823 	    uncore_ucf_ri);
824 
825 	uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
826 	    M_ZERO | M_WAITOK);
827 
828 	return (0);
829 }
830 
831 void
832 pmc_uncore_finalize(struct pmc_mdep *md)
833 {
834 	PMCDBG0(MDP,INI,1, "uncore-finalize");
835 
836 	free(uncore_pcpu, M_PMC);
837 	uncore_pcpu = NULL;
838 }
839