xref: /freebsd/sys/dev/hwpmc/hwpmc_uncore.c (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Uncore PMCs.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/pmckern.h>
40 #include <sys/systm.h>
41 
42 #include <machine/intr_machdep.h>
43 #if (__FreeBSD_version >= 1100000)
44 #include <x86/apicvar.h>
45 #else
46 #include <machine/apicvar.h>
47 #endif
48 #include <machine/cpu.h>
49 #include <machine/cpufunc.h>
50 #include <machine/specialreg.h>
51 
52 #define	UCF_PMC_CAPS \
53 	(PMC_CAP_READ | PMC_CAP_WRITE)
54 
55 #define	UCP_PMC_CAPS \
56     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
57     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
58 
59 #define	SELECTSEL(x) \
60 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
61 	UCP_CB0_EVSEL0 : UCP_EVSEL0)
62 
63 #define SELECTOFF(x) \
64 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
65 	UCF_OFFSET_SB : UCF_OFFSET)
66 
67 static enum pmc_cputype	uncore_cputype;
68 
69 struct uncore_cpu {
70 	volatile uint32_t	pc_ucfctrl;	/* Fixed function control. */
71 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
72 	struct pmc_hw		pc_uncorepmcs[];
73 };
74 
75 static struct uncore_cpu **uncore_pcpu;
76 
77 static uint64_t uncore_pmcmask;
78 
79 static int uncore_ucf_ri;		/* relative index of fixed counters */
80 static int uncore_ucf_width;
81 static int uncore_ucf_npmc;
82 
83 static int uncore_ucp_width;
84 static int uncore_ucp_npmc;
85 
86 static int
87 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
88 {
89 	(void) md;
90 	(void) cpu;
91 	return (0);
92 }
93 
94 static int
95 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
96 {
97 	struct pmc_cpu *pc;
98 	struct uncore_cpu *cc;
99 	struct pmc_hw *phw;
100 	int uncore_ri, n, npmc;
101 
102 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
103 	    ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
104 
105 	PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
106 
107 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
108 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
109 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
110 
111 	cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
112 	    M_PMC, M_WAITOK | M_ZERO);
113 
114 	uncore_pcpu[cpu] = cc;
115 	pc = pmc_pcpu[cpu];
116 
117 	KASSERT(pc != NULL && cc != NULL,
118 	    ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
119 
120 	for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
121 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
122 		    PMC_PHW_CPU_TO_STATE(cpu) |
123 		    PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
124 		phw->phw_pmc	  = NULL;
125 		pc->pc_hwpmcs[n + uncore_ri]  = phw;
126 	}
127 
128 	return (0);
129 }
130 
131 static int
132 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
133 {
134 	int uncore_ri, n, npmc;
135 	struct pmc_cpu *pc;
136 	struct uncore_cpu *cc;
137 
138 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
139 	    ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
140 
141 	PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
142 
143 	if ((cc = uncore_pcpu[cpu]) == NULL)
144 		return (0);
145 
146 	uncore_pcpu[cpu] = NULL;
147 
148 	pc = pmc_pcpu[cpu];
149 
150 	KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
151 		cpu));
152 
153 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
154 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
155 
156 	for (n = 0; n < npmc; n++)
157 		wrmsr(SELECTSEL(uncore_cputype) + n, 0);
158 
159 	wrmsr(UCF_CTRL, 0);
160 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
161 
162 	for (n = 0; n < npmc; n++)
163 		pc->pc_hwpmcs[n + uncore_ri] = NULL;
164 
165 	free(cc, M_PMC);
166 
167 	return (0);
168 }
169 
170 /*
171  * Fixed function counters.
172  */
173 
174 static pmc_value_t
175 ucf_perfctr_value_to_reload_count(pmc_value_t v)
176 {
177 
178 	/* If the PMC has overflowed, return a reload count of zero. */
179 	if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
180 		return (0);
181 	v &= (1ULL << uncore_ucf_width) - 1;
182 	return (1ULL << uncore_ucf_width) - v;
183 }
184 
185 static pmc_value_t
186 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
187 {
188 	return (1ULL << uncore_ucf_width) - rlc;
189 }
190 
191 static int
192 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
193     const struct pmc_op_pmcallocate *a)
194 {
195 	uint32_t flags;
196 
197 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
198 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
199 
200 	PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
201 
202 	if (ri < 0 || ri > uncore_ucf_npmc)
203 		return (EINVAL);
204 
205 	if (a->pm_class != PMC_CLASS_UCF)
206 		return (EINVAL);
207 
208 	flags = UCF_EN;
209 
210 	pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
211 
212 	PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
213 	    (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
214 
215 	return (0);
216 }
217 
218 static int
219 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
220 {
221 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
222 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
223 
224 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
225 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
226 
227 	PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
228 
229 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
230 	    cpu));
231 
232 	uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
233 
234 	return (0);
235 }
236 
237 static int
238 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
239 {
240 	int error;
241 	struct pmc_hw *phw;
242 	char ucf_name[PMC_NAME_MAX];
243 
244 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
245 
246 	(void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
247 	if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
248 	    NULL)) != 0)
249 		return (error);
250 
251 	pi->pm_class = PMC_CLASS_UCF;
252 
253 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
254 		pi->pm_enabled = TRUE;
255 		*ppmc          = phw->phw_pmc;
256 	} else {
257 		pi->pm_enabled = FALSE;
258 		*ppmc          = NULL;
259 	}
260 
261 	return (0);
262 }
263 
264 static int
265 ucf_get_config(int cpu, int ri, struct pmc **ppm)
266 {
267 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
268 
269 	return (0);
270 }
271 
272 static int
273 ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
274 {
275 	struct pmc *pm;
276 	pmc_value_t tmp;
277 
278 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
279 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
280 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
281 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
282 
283 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
284 
285 	KASSERT(pm,
286 	    ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
287 		ri, ri + uncore_ucf_ri));
288 
289 	tmp = rdmsr(UCF_CTR0 + ri);
290 
291 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
292 		*v = ucf_perfctr_value_to_reload_count(tmp);
293 	else
294 		*v = tmp;
295 
296 	PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
297 
298 	return (0);
299 }
300 
301 static int
302 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
303 {
304 	PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
305 
306 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
307 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
308 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
309 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
310 
311 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
312 	    ("[uncore,%d] PHW pmc non-NULL", __LINE__));
313 
314 	return (0);
315 }
316 
317 static int
318 ucf_start_pmc(int cpu, int ri)
319 {
320 	struct pmc *pm;
321 	struct uncore_cpu *ucfc;
322 
323 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
324 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
325 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
326 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
327 
328 	PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
329 
330 	ucfc = uncore_pcpu[cpu];
331 	pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
332 
333 	ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
334 
335 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
336 
337 	ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
338 	wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
339 
340 	PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
341 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
342 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
343 
344 	return (0);
345 }
346 
347 static int
348 ucf_stop_pmc(int cpu, int ri)
349 {
350 	uint32_t fc;
351 	struct uncore_cpu *ucfc;
352 
353 	PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
354 
355 	ucfc = uncore_pcpu[cpu];
356 
357 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
358 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
359 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
360 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
361 
362 	fc = (UCF_MASK << (ri * 4));
363 
364 	ucfc->pc_ucfctrl &= ~fc;
365 
366 	PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
367 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
368 
369 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
370 
371 	PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
372 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
373 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
374 
375 	return (0);
376 }
377 
378 static int
379 ucf_write_pmc(int cpu, int ri, pmc_value_t v)
380 {
381 	struct uncore_cpu *cc;
382 	struct pmc *pm;
383 
384 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
385 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
386 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
387 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
388 
389 	cc = uncore_pcpu[cpu];
390 	pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
391 
392 	KASSERT(pm,
393 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
394 
395 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
396 		v = ucf_reload_count_to_perfctr_value(v);
397 
398 	wrmsr(UCF_CTRL, 0);	/* Turn off fixed counters */
399 	wrmsr(UCF_CTR0 + ri, v);
400 	wrmsr(UCF_CTRL, cc->pc_ucfctrl);
401 
402 	PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
403 	    cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
404 
405 	return (0);
406 }
407 
408 
409 static void
410 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
411 {
412 	struct pmc_classdep *pcd;
413 
414 	KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
415 
416 	PMCDBG0(MDP,INI,1, "ucf-initialize");
417 
418 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
419 
420 	pcd->pcd_caps	= UCF_PMC_CAPS;
421 	pcd->pcd_class	= PMC_CLASS_UCF;
422 	pcd->pcd_num	= npmc;
423 	pcd->pcd_ri	= md->pmd_npmc;
424 	pcd->pcd_width	= pmcwidth;
425 
426 	pcd->pcd_allocate_pmc	= ucf_allocate_pmc;
427 	pcd->pcd_config_pmc	= ucf_config_pmc;
428 	pcd->pcd_describe	= ucf_describe;
429 	pcd->pcd_get_config	= ucf_get_config;
430 	pcd->pcd_get_msr	= NULL;
431 	pcd->pcd_pcpu_fini	= uncore_pcpu_noop;
432 	pcd->pcd_pcpu_init	= uncore_pcpu_noop;
433 	pcd->pcd_read_pmc	= ucf_read_pmc;
434 	pcd->pcd_release_pmc	= ucf_release_pmc;
435 	pcd->pcd_start_pmc	= ucf_start_pmc;
436 	pcd->pcd_stop_pmc	= ucf_stop_pmc;
437 	pcd->pcd_write_pmc	= ucf_write_pmc;
438 
439 	md->pmd_npmc	       += npmc;
440 }
441 
442 /*
443  * Intel programmable PMCs.
444  */
445 
446 /*
447  * Event descriptor tables.
448  *
449  * For each event id, we track:
450  *
451  * 1. The CPUs that the event is valid for.
452  *
453  * 2. If the event uses a fixed UMASK, the value of the umask field.
454  *    If the event doesn't use a fixed UMASK, a mask of legal bits
455  *    to check against.
456  */
457 
458 struct ucp_event_descr {
459 	enum pmc_event	ucp_ev;
460 	unsigned char	ucp_evcode;
461 	unsigned char	ucp_umask;
462 	unsigned char	ucp_flags;
463 };
464 
465 #define	UCP_F_I7	(1 << 0)	/* CPU: Core i7 */
466 #define	UCP_F_WM	(1 << 1)	/* CPU: Westmere */
467 #define	UCP_F_SB	(1 << 2)	/* CPU: Sandy Bridge */
468 #define	UCP_F_HW	(1 << 3)	/* CPU: Haswell */
469 #define	UCP_F_FM	(1 << 4)	/* Fixed mask */
470 
471 #define	UCP_F_ALLCPUS					\
472     (UCP_F_I7 | UCP_F_WM)
473 
474 #define	UCP_F_CMASK		0xFF000000
475 
476 static pmc_value_t
477 ucp_perfctr_value_to_reload_count(pmc_value_t v)
478 {
479 	v &= (1ULL << uncore_ucp_width) - 1;
480 	return (1ULL << uncore_ucp_width) - v;
481 }
482 
483 static pmc_value_t
484 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
485 {
486 	return (1ULL << uncore_ucp_width) - rlc;
487 }
488 
489 /*
490  * Counter specific event information for Sandybridge and Haswell
491  */
492 static int
493 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
494 {
495 	uint32_t mask;
496 
497 	switch (ev) {
498 		/*
499 		 * Events valid only on counter 0.
500 		 */
501 		case 0x80:
502 		case 0x83:
503 		mask = (1 << 0);
504 		break;
505 
506 	default:
507 		mask = ~0;	/* Any row index is ok. */
508 	}
509 
510 	return (mask & (1 << ri));
511 }
512 
513 static int
514 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
515     const struct pmc_op_pmcallocate *a)
516 {
517 	uint8_t ev;
518 	const struct pmc_md_ucp_op_pmcallocate *ucp;
519 
520 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
521 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
522 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
523 	    ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
524 
525 	if (a->pm_class != PMC_CLASS_UCP)
526 		return (EINVAL);
527 
528 	ucp = &a->pm_md.pm_ucp;
529 	ev = UCP_EVSEL(ucp->pm_ucp_config);
530 	switch (uncore_cputype) {
531 	case PMC_CPU_INTEL_HASWELL:
532 	case PMC_CPU_INTEL_SANDYBRIDGE:
533 		if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
534 			return (EINVAL);
535 		break;
536 	default:
537 		break;
538 	}
539 
540 	pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
541 
542 	return (0);
543 }
544 
545 static int
546 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
547 {
548 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
549 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
550 
551 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
552 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
553 
554 	PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
555 
556 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
557 	    cpu));
558 
559 	uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
560 
561 	return (0);
562 }
563 
564 static int
565 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
566 {
567 	int error;
568 	struct pmc_hw *phw;
569 	char ucp_name[PMC_NAME_MAX];
570 
571 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
572 
573 	(void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
574 	if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
575 	    NULL)) != 0)
576 		return (error);
577 
578 	pi->pm_class = PMC_CLASS_UCP;
579 
580 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
581 		pi->pm_enabled = TRUE;
582 		*ppmc          = phw->phw_pmc;
583 	} else {
584 		pi->pm_enabled = FALSE;
585 		*ppmc          = NULL;
586 	}
587 
588 	return (0);
589 }
590 
591 static int
592 ucp_get_config(int cpu, int ri, struct pmc **ppm)
593 {
594 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
595 
596 	return (0);
597 }
598 
599 static int
600 ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
601 {
602 	struct pmc *pm;
603 	pmc_value_t tmp;
604 
605 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
606 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
607 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
608 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
609 
610 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
611 
612 	KASSERT(pm,
613 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
614 		ri));
615 
616 	tmp = rdmsr(UCP_PMC0 + ri);
617 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
618 		*v = ucp_perfctr_value_to_reload_count(tmp);
619 	else
620 		*v = tmp;
621 
622 	PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
623 	    ri, *v);
624 
625 	return (0);
626 }
627 
628 static int
629 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
630 {
631 	(void) pm;
632 
633 	PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
634 	    pm);
635 
636 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
637 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
638 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
639 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
640 
641 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
642 	    == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
643 
644 	return (0);
645 }
646 
647 static int
648 ucp_start_pmc(int cpu, int ri)
649 {
650 	struct pmc *pm;
651 	uint64_t evsel;
652 	struct uncore_cpu *cc;
653 
654 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
655 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
656 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
657 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
658 
659 	cc = uncore_pcpu[cpu];
660 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
661 
662 	KASSERT(pm,
663 	    ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
664 		__LINE__, cpu, ri));
665 
666 	PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
667 
668 	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
669 
670 	PMCDBG4(MDP,STA,2,
671 	    "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
672 	    cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
673 
674 	/* Event specific configuration. */
675 	switch (pm->pm_event) {
676 	case PMC_EV_UCP_EVENT_0CH_04H_E:
677 	case PMC_EV_UCP_EVENT_0CH_08H_E:
678 		wrmsr(MSR_GQ_SNOOP_MESF,0x2);
679 		break;
680 	case PMC_EV_UCP_EVENT_0CH_04H_F:
681 	case PMC_EV_UCP_EVENT_0CH_08H_F:
682 		wrmsr(MSR_GQ_SNOOP_MESF,0x8);
683 		break;
684 	case PMC_EV_UCP_EVENT_0CH_04H_M:
685 	case PMC_EV_UCP_EVENT_0CH_08H_M:
686 		wrmsr(MSR_GQ_SNOOP_MESF,0x1);
687 		break;
688 	case PMC_EV_UCP_EVENT_0CH_04H_S:
689 	case PMC_EV_UCP_EVENT_0CH_08H_S:
690 		wrmsr(MSR_GQ_SNOOP_MESF,0x4);
691 		break;
692 	default:
693 		break;
694 	}
695 	wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
696 
697 	cc->pc_globalctrl |= (1ULL << ri);
698 	wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
699 
700 	return (0);
701 }
702 
703 static int
704 ucp_stop_pmc(int cpu, int ri)
705 {
706 	struct pmc *pm __diagused;
707 	struct uncore_cpu *cc;
708 
709 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
710 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
711 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
712 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
713 
714 	cc = uncore_pcpu[cpu];
715 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
716 
717 	KASSERT(pm,
718 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
719 		cpu, ri));
720 
721 	PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
722 
723 	/* stop hw. */
724 	wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
725 
726 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
727 
728 	return (0);
729 }
730 
731 static int
732 ucp_write_pmc(int cpu, int ri, pmc_value_t v)
733 {
734 	struct pmc *pm;
735 	struct uncore_cpu *cc;
736 
737 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
738 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
739 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
740 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
741 
742 	cc = uncore_pcpu[cpu];
743 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
744 
745 	KASSERT(pm,
746 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
747 		cpu, ri));
748 
749 	PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
750 	    UCP_PMC0 + ri, v);
751 
752 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
753 		v = ucp_reload_count_to_perfctr_value(v);
754 
755 	/*
756 	 * Write the new value to the counter.  The counter will be in
757 	 * a stopped state when the pcd_write() entry point is called.
758 	 */
759 
760 	wrmsr(UCP_PMC0 + ri, v);
761 
762 	return (0);
763 }
764 
765 
766 static void
767 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
768 {
769 	struct pmc_classdep *pcd;
770 
771 	KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
772 
773 	PMCDBG0(MDP,INI,1, "ucp-initialize");
774 
775 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
776 
777 	pcd->pcd_caps	= UCP_PMC_CAPS;
778 	pcd->pcd_class	= PMC_CLASS_UCP;
779 	pcd->pcd_num	= npmc;
780 	pcd->pcd_ri	= md->pmd_npmc;
781 	pcd->pcd_width	= pmcwidth;
782 
783 	pcd->pcd_allocate_pmc	= ucp_allocate_pmc;
784 	pcd->pcd_config_pmc	= ucp_config_pmc;
785 	pcd->pcd_describe	= ucp_describe;
786 	pcd->pcd_get_config	= ucp_get_config;
787 	pcd->pcd_get_msr	= NULL;
788 	pcd->pcd_pcpu_fini	= uncore_pcpu_fini;
789 	pcd->pcd_pcpu_init	= uncore_pcpu_init;
790 	pcd->pcd_read_pmc	= ucp_read_pmc;
791 	pcd->pcd_release_pmc	= ucp_release_pmc;
792 	pcd->pcd_start_pmc	= ucp_start_pmc;
793 	pcd->pcd_stop_pmc	= ucp_stop_pmc;
794 	pcd->pcd_write_pmc	= ucp_write_pmc;
795 
796 	md->pmd_npmc	       += npmc;
797 }
798 
799 int
800 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
801 {
802 	uncore_cputype = md->pmd_cputype;
803 	uncore_pmcmask = 0;
804 
805 	/*
806 	 * Initialize programmable counters.
807 	 */
808 
809 	uncore_ucp_npmc  = 8;
810 	uncore_ucp_width = 48;
811 
812 	uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
813 
814 	ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
815 
816 	/*
817 	 * Initialize fixed function counters, if present.
818 	 */
819 	uncore_ucf_ri = uncore_ucp_npmc;
820 	uncore_ucf_npmc  = 1;
821 	uncore_ucf_width = 48;
822 
823 	ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
824 	uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
825 
826 	PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
827 	    uncore_ucf_ri);
828 
829 	uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
830 	    M_ZERO | M_WAITOK);
831 
832 	return (0);
833 }
834 
835 void
836 pmc_uncore_finalize(struct pmc_mdep *md)
837 {
838 	PMCDBG0(MDP,INI,1, "uncore-finalize");
839 
840 	free(uncore_pcpu, M_PMC);
841 	uncore_pcpu = NULL;
842 }
843