xref: /freebsd/sys/dev/hwpmc/hwpmc_uncore.c (revision 7be9a3b45356747f9fcb6d69a722c1c95f8060bf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Uncore PMCs.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/pmc.h>
39 #include <sys/pmckern.h>
40 #include <sys/systm.h>
41 
42 #include <machine/intr_machdep.h>
43 #if (__FreeBSD_version >= 1100000)
44 #include <x86/apicvar.h>
45 #else
46 #include <machine/apicvar.h>
47 #endif
48 #include <machine/cpu.h>
49 #include <machine/cpufunc.h>
50 #include <machine/specialreg.h>
51 
52 #define	UCF_PMC_CAPS \
53 	(PMC_CAP_READ | PMC_CAP_WRITE)
54 
55 #define	UCP_PMC_CAPS \
56     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
57     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
58 
59 #define	SELECTSEL(x) \
60 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
61 	UCP_CB0_EVSEL0 : UCP_EVSEL0)
62 
63 #define SELECTOFF(x) \
64 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
65 	UCF_OFFSET_SB : UCF_OFFSET)
66 
67 static enum pmc_cputype	uncore_cputype;
68 
69 struct uncore_cpu {
70 	volatile uint32_t	pc_resync;
71 	volatile uint32_t	pc_ucfctrl;	/* Fixed function control. */
72 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
73 	struct pmc_hw		pc_uncorepmcs[];
74 };
75 
76 static struct uncore_cpu **uncore_pcpu;
77 
78 static uint64_t uncore_pmcmask;
79 
80 static int uncore_ucf_ri;		/* relative index of fixed counters */
81 static int uncore_ucf_width;
82 static int uncore_ucf_npmc;
83 
84 static int uncore_ucp_width;
85 static int uncore_ucp_npmc;
86 
87 static int
88 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
89 {
90 	(void) md;
91 	(void) cpu;
92 	return (0);
93 }
94 
95 static int
96 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
97 {
98 	struct pmc_cpu *pc;
99 	struct uncore_cpu *cc;
100 	struct pmc_hw *phw;
101 	int uncore_ri, n, npmc;
102 
103 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
104 	    ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
105 
106 	PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
107 
108 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
109 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
110 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
111 
112 	cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
113 	    M_PMC, M_WAITOK | M_ZERO);
114 
115 	uncore_pcpu[cpu] = cc;
116 	pc = pmc_pcpu[cpu];
117 
118 	KASSERT(pc != NULL && cc != NULL,
119 	    ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
120 
121 	for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
122 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
123 		    PMC_PHW_CPU_TO_STATE(cpu) |
124 		    PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
125 		phw->phw_pmc	  = NULL;
126 		pc->pc_hwpmcs[n + uncore_ri]  = phw;
127 	}
128 
129 	return (0);
130 }
131 
132 static int
133 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
134 {
135 	int uncore_ri, n, npmc;
136 	struct pmc_cpu *pc;
137 	struct uncore_cpu *cc;
138 
139 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
140 	    ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
141 
142 	PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
143 
144 	if ((cc = uncore_pcpu[cpu]) == NULL)
145 		return (0);
146 
147 	uncore_pcpu[cpu] = NULL;
148 
149 	pc = pmc_pcpu[cpu];
150 
151 	KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
152 		cpu));
153 
154 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
155 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
156 
157 	for (n = 0; n < npmc; n++)
158 		wrmsr(SELECTSEL(uncore_cputype) + n, 0);
159 
160 	wrmsr(UCF_CTRL, 0);
161 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
162 
163 	for (n = 0; n < npmc; n++)
164 		pc->pc_hwpmcs[n + uncore_ri] = NULL;
165 
166 	free(cc, M_PMC);
167 
168 	return (0);
169 }
170 
171 /*
172  * Fixed function counters.
173  */
174 
175 static pmc_value_t
176 ucf_perfctr_value_to_reload_count(pmc_value_t v)
177 {
178 
179 	/* If the PMC has overflowed, return a reload count of zero. */
180 	if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
181 		return (0);
182 	v &= (1ULL << uncore_ucf_width) - 1;
183 	return (1ULL << uncore_ucf_width) - v;
184 }
185 
186 static pmc_value_t
187 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
188 {
189 	return (1ULL << uncore_ucf_width) - rlc;
190 }
191 
192 static int
193 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
194     const struct pmc_op_pmcallocate *a)
195 {
196 	uint32_t flags;
197 
198 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
199 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
200 
201 	PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
202 
203 	if (ri < 0 || ri > uncore_ucf_npmc)
204 		return (EINVAL);
205 
206 	if (a->pm_class != PMC_CLASS_UCF)
207 		return (EINVAL);
208 
209 	flags = UCF_EN;
210 
211 	pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
212 
213 	PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
214 	    (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
215 
216 	return (0);
217 }
218 
219 static int
220 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
221 {
222 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
223 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
224 
225 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
226 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
227 
228 	PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
229 
230 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
231 	    cpu));
232 
233 	uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
234 
235 	return (0);
236 }
237 
238 static int
239 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
240 {
241 	int error;
242 	struct pmc_hw *phw;
243 	char ucf_name[PMC_NAME_MAX];
244 
245 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
246 
247 	(void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
248 	if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
249 	    NULL)) != 0)
250 		return (error);
251 
252 	pi->pm_class = PMC_CLASS_UCF;
253 
254 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
255 		pi->pm_enabled = TRUE;
256 		*ppmc          = phw->phw_pmc;
257 	} else {
258 		pi->pm_enabled = FALSE;
259 		*ppmc          = NULL;
260 	}
261 
262 	return (0);
263 }
264 
265 static int
266 ucf_get_config(int cpu, int ri, struct pmc **ppm)
267 {
268 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
269 
270 	return (0);
271 }
272 
273 static int
274 ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
275 {
276 	struct pmc *pm;
277 	pmc_value_t tmp;
278 
279 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
280 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
281 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
282 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
283 
284 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
285 
286 	KASSERT(pm,
287 	    ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
288 		ri, ri + uncore_ucf_ri));
289 
290 	tmp = rdmsr(UCF_CTR0 + ri);
291 
292 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
293 		*v = ucf_perfctr_value_to_reload_count(tmp);
294 	else
295 		*v = tmp;
296 
297 	PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
298 
299 	return (0);
300 }
301 
302 static int
303 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
304 {
305 	PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
306 
307 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
308 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
309 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
310 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
311 
312 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
313 	    ("[uncore,%d] PHW pmc non-NULL", __LINE__));
314 
315 	return (0);
316 }
317 
318 static int
319 ucf_start_pmc(int cpu, int ri)
320 {
321 	struct pmc *pm;
322 	struct uncore_cpu *ucfc;
323 
324 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
325 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
326 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
327 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
328 
329 	PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
330 
331 	ucfc = uncore_pcpu[cpu];
332 	pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
333 
334 	ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
335 
336 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
337 
338 	do {
339 		ucfc->pc_resync = 0;
340 		ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
341 		wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
342 	} while (ucfc->pc_resync != 0);
343 
344 	PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
345 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
346 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
347 
348 	return (0);
349 }
350 
351 static int
352 ucf_stop_pmc(int cpu, int ri)
353 {
354 	uint32_t fc;
355 	struct uncore_cpu *ucfc;
356 
357 	PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
358 
359 	ucfc = uncore_pcpu[cpu];
360 
361 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
362 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
363 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
364 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
365 
366 	fc = (UCF_MASK << (ri * 4));
367 
368 	ucfc->pc_ucfctrl &= ~fc;
369 
370 	PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
371 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
372 
373 	do {
374 		ucfc->pc_resync = 0;
375 		ucfc->pc_globalctrl &= ~(1ULL << (ri + SELECTOFF(uncore_cputype)));
376 		wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
377 	} while (ucfc->pc_resync != 0);
378 
379 	PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
380 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
381 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
382 
383 	return (0);
384 }
385 
386 static int
387 ucf_write_pmc(int cpu, int ri, pmc_value_t v)
388 {
389 	struct uncore_cpu *cc;
390 	struct pmc *pm;
391 
392 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
393 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
394 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
395 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
396 
397 	cc = uncore_pcpu[cpu];
398 	pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
399 
400 	KASSERT(pm,
401 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
402 
403 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
404 		v = ucf_reload_count_to_perfctr_value(v);
405 
406 	wrmsr(UCF_CTRL, 0);	/* Turn off fixed counters */
407 	wrmsr(UCF_CTR0 + ri, v);
408 	wrmsr(UCF_CTRL, cc->pc_ucfctrl);
409 
410 	PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
411 	    cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
412 
413 	return (0);
414 }
415 
416 
417 static void
418 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
419 {
420 	struct pmc_classdep *pcd;
421 
422 	KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
423 
424 	PMCDBG0(MDP,INI,1, "ucf-initialize");
425 
426 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
427 
428 	pcd->pcd_caps	= UCF_PMC_CAPS;
429 	pcd->pcd_class	= PMC_CLASS_UCF;
430 	pcd->pcd_num	= npmc;
431 	pcd->pcd_ri	= md->pmd_npmc;
432 	pcd->pcd_width	= pmcwidth;
433 
434 	pcd->pcd_allocate_pmc	= ucf_allocate_pmc;
435 	pcd->pcd_config_pmc	= ucf_config_pmc;
436 	pcd->pcd_describe	= ucf_describe;
437 	pcd->pcd_get_config	= ucf_get_config;
438 	pcd->pcd_get_msr	= NULL;
439 	pcd->pcd_pcpu_fini	= uncore_pcpu_noop;
440 	pcd->pcd_pcpu_init	= uncore_pcpu_noop;
441 	pcd->pcd_read_pmc	= ucf_read_pmc;
442 	pcd->pcd_release_pmc	= ucf_release_pmc;
443 	pcd->pcd_start_pmc	= ucf_start_pmc;
444 	pcd->pcd_stop_pmc	= ucf_stop_pmc;
445 	pcd->pcd_write_pmc	= ucf_write_pmc;
446 
447 	md->pmd_npmc	       += npmc;
448 }
449 
450 /*
451  * Intel programmable PMCs.
452  */
453 
454 /*
455  * Event descriptor tables.
456  *
457  * For each event id, we track:
458  *
459  * 1. The CPUs that the event is valid for.
460  *
461  * 2. If the event uses a fixed UMASK, the value of the umask field.
462  *    If the event doesn't use a fixed UMASK, a mask of legal bits
463  *    to check against.
464  */
465 
466 struct ucp_event_descr {
467 	enum pmc_event	ucp_ev;
468 	unsigned char	ucp_evcode;
469 	unsigned char	ucp_umask;
470 	unsigned char	ucp_flags;
471 };
472 
473 #define	UCP_F_I7	(1 << 0)	/* CPU: Core i7 */
474 #define	UCP_F_WM	(1 << 1)	/* CPU: Westmere */
475 #define	UCP_F_SB	(1 << 2)	/* CPU: Sandy Bridge */
476 #define	UCP_F_HW	(1 << 3)	/* CPU: Haswell */
477 #define	UCP_F_FM	(1 << 4)	/* Fixed mask */
478 
479 #define	UCP_F_ALLCPUS					\
480     (UCP_F_I7 | UCP_F_WM)
481 
482 #define	UCP_F_CMASK		0xFF000000
483 
484 static pmc_value_t
485 ucp_perfctr_value_to_reload_count(pmc_value_t v)
486 {
487 	v &= (1ULL << uncore_ucp_width) - 1;
488 	return (1ULL << uncore_ucp_width) - v;
489 }
490 
491 static pmc_value_t
492 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
493 {
494 	return (1ULL << uncore_ucp_width) - rlc;
495 }
496 
497 /*
498  * Counter specific event information for Sandybridge and Haswell
499  */
500 static int
501 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
502 {
503 	uint32_t mask;
504 
505 	switch (ev) {
506 		/*
507 		 * Events valid only on counter 0.
508 		 */
509 		case 0x80:
510 		case 0x83:
511 		mask = (1 << 0);
512 		break;
513 
514 	default:
515 		mask = ~0;	/* Any row index is ok. */
516 	}
517 
518 	return (mask & (1 << ri));
519 }
520 
521 static int
522 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
523     const struct pmc_op_pmcallocate *a)
524 {
525 	uint8_t ev;
526 	const struct pmc_md_ucp_op_pmcallocate *ucp;
527 
528 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
529 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
530 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
531 	    ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
532 
533 	if (a->pm_class != PMC_CLASS_UCP)
534 		return (EINVAL);
535 
536 	ucp = &a->pm_md.pm_ucp;
537 	ev = UCP_EVSEL(ucp->pm_ucp_config);
538 	switch (uncore_cputype) {
539 	case PMC_CPU_INTEL_HASWELL:
540 	case PMC_CPU_INTEL_SANDYBRIDGE:
541 		if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
542 			return (EINVAL);
543 		break;
544 	default:
545 		break;
546 	}
547 
548 	pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
549 
550 	return (0);
551 }
552 
553 static int
554 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
555 {
556 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
557 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
558 
559 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
560 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
561 
562 	PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
563 
564 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
565 	    cpu));
566 
567 	uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
568 
569 	return (0);
570 }
571 
572 static int
573 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
574 {
575 	int error;
576 	struct pmc_hw *phw;
577 	char ucp_name[PMC_NAME_MAX];
578 
579 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
580 
581 	(void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
582 	if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
583 	    NULL)) != 0)
584 		return (error);
585 
586 	pi->pm_class = PMC_CLASS_UCP;
587 
588 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
589 		pi->pm_enabled = TRUE;
590 		*ppmc          = phw->phw_pmc;
591 	} else {
592 		pi->pm_enabled = FALSE;
593 		*ppmc          = NULL;
594 	}
595 
596 	return (0);
597 }
598 
599 static int
600 ucp_get_config(int cpu, int ri, struct pmc **ppm)
601 {
602 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
603 
604 	return (0);
605 }
606 
607 static int
608 ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
609 {
610 	struct pmc *pm;
611 	pmc_value_t tmp;
612 
613 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
614 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
615 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
616 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
617 
618 	pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
619 
620 	KASSERT(pm,
621 	    ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
622 		ri));
623 
624 	tmp = rdmsr(UCP_PMC0 + ri);
625 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
626 		*v = ucp_perfctr_value_to_reload_count(tmp);
627 	else
628 		*v = tmp;
629 
630 	PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
631 	    ri, *v);
632 
633 	return (0);
634 }
635 
636 static int
637 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
638 {
639 	(void) pm;
640 
641 	PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
642 	    pm);
643 
644 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
645 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
646 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
647 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
648 
649 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
650 	    == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
651 
652 	return (0);
653 }
654 
655 static int
656 ucp_start_pmc(int cpu, int ri)
657 {
658 	struct pmc *pm;
659 	uint32_t evsel;
660 	struct uncore_cpu *cc;
661 
662 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
663 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
664 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
665 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
666 
667 	cc = uncore_pcpu[cpu];
668 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
669 
670 	KASSERT(pm,
671 	    ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
672 		__LINE__, cpu, ri));
673 
674 	PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
675 
676 	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
677 
678 	PMCDBG4(MDP,STA,2,
679 	    "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
680 	    cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
681 
682 	/* Event specific configuration. */
683 	switch (pm->pm_event) {
684 	case PMC_EV_UCP_EVENT_0CH_04H_E:
685 	case PMC_EV_UCP_EVENT_0CH_08H_E:
686 		wrmsr(MSR_GQ_SNOOP_MESF,0x2);
687 		break;
688 	case PMC_EV_UCP_EVENT_0CH_04H_F:
689 	case PMC_EV_UCP_EVENT_0CH_08H_F:
690 		wrmsr(MSR_GQ_SNOOP_MESF,0x8);
691 		break;
692 	case PMC_EV_UCP_EVENT_0CH_04H_M:
693 	case PMC_EV_UCP_EVENT_0CH_08H_M:
694 		wrmsr(MSR_GQ_SNOOP_MESF,0x1);
695 		break;
696 	case PMC_EV_UCP_EVENT_0CH_04H_S:
697 	case PMC_EV_UCP_EVENT_0CH_08H_S:
698 		wrmsr(MSR_GQ_SNOOP_MESF,0x4);
699 		break;
700 	default:
701 		break;
702 	}
703 	wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
704 
705 	do {
706 		cc->pc_resync = 0;
707 		cc->pc_globalctrl |= (1ULL << ri);
708 		wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
709 	} while (cc->pc_resync != 0);
710 
711 	return (0);
712 }
713 
714 static int
715 ucp_stop_pmc(int cpu, int ri)
716 {
717 	struct pmc *pm __diagused;
718 	struct uncore_cpu *cc;
719 
720 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
721 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
722 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
723 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
724 
725 	cc = uncore_pcpu[cpu];
726 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
727 
728 	KASSERT(pm,
729 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
730 		cpu, ri));
731 
732 	PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
733 
734 	/* stop hw. */
735 	wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
736 
737 	do {
738 		cc->pc_resync = 0;
739 		cc->pc_globalctrl &= ~(1ULL << ri);
740 		wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
741 	} while (cc->pc_resync != 0);
742 
743 	return (0);
744 }
745 
746 static int
747 ucp_write_pmc(int cpu, int ri, pmc_value_t v)
748 {
749 	struct pmc *pm;
750 	struct uncore_cpu *cc;
751 
752 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
753 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
754 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
755 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
756 
757 	cc = uncore_pcpu[cpu];
758 	pm = cc->pc_uncorepmcs[ri].phw_pmc;
759 
760 	KASSERT(pm,
761 	    ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
762 		cpu, ri));
763 
764 	PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
765 	    UCP_PMC0 + ri, v);
766 
767 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
768 		v = ucp_reload_count_to_perfctr_value(v);
769 
770 	/*
771 	 * Write the new value to the counter.  The counter will be in
772 	 * a stopped state when the pcd_write() entry point is called.
773 	 */
774 
775 	wrmsr(UCP_PMC0 + ri, v);
776 
777 	return (0);
778 }
779 
780 
781 static void
782 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
783 {
784 	struct pmc_classdep *pcd;
785 
786 	KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
787 
788 	PMCDBG0(MDP,INI,1, "ucp-initialize");
789 
790 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
791 
792 	pcd->pcd_caps	= UCP_PMC_CAPS;
793 	pcd->pcd_class	= PMC_CLASS_UCP;
794 	pcd->pcd_num	= npmc;
795 	pcd->pcd_ri	= md->pmd_npmc;
796 	pcd->pcd_width	= pmcwidth;
797 
798 	pcd->pcd_allocate_pmc	= ucp_allocate_pmc;
799 	pcd->pcd_config_pmc	= ucp_config_pmc;
800 	pcd->pcd_describe	= ucp_describe;
801 	pcd->pcd_get_config	= ucp_get_config;
802 	pcd->pcd_get_msr	= NULL;
803 	pcd->pcd_pcpu_fini	= uncore_pcpu_fini;
804 	pcd->pcd_pcpu_init	= uncore_pcpu_init;
805 	pcd->pcd_read_pmc	= ucp_read_pmc;
806 	pcd->pcd_release_pmc	= ucp_release_pmc;
807 	pcd->pcd_start_pmc	= ucp_start_pmc;
808 	pcd->pcd_stop_pmc	= ucp_stop_pmc;
809 	pcd->pcd_write_pmc	= ucp_write_pmc;
810 
811 	md->pmd_npmc	       += npmc;
812 }
813 
814 int
815 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
816 {
817 	uncore_cputype = md->pmd_cputype;
818 	uncore_pmcmask = 0;
819 
820 	/*
821 	 * Initialize programmable counters.
822 	 */
823 
824 	uncore_ucp_npmc  = 8;
825 	uncore_ucp_width = 48;
826 
827 	uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
828 
829 	ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
830 
831 	/*
832 	 * Initialize fixed function counters, if present.
833 	 */
834 	uncore_ucf_ri = uncore_ucp_npmc;
835 	uncore_ucf_npmc  = 1;
836 	uncore_ucf_width = 48;
837 
838 	ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
839 	uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
840 
841 	PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
842 	    uncore_ucf_ri);
843 
844 	uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
845 	    M_ZERO | M_WAITOK);
846 
847 	return (0);
848 }
849 
850 void
851 pmc_uncore_finalize(struct pmc_mdep *md)
852 {
853 	PMCDBG0(MDP,INI,1, "uncore-finalize");
854 
855 	free(uncore_pcpu, M_PMC);
856 	uncore_pcpu = NULL;
857 }
858