xref: /freebsd/sys/dev/hwpmc/hwpmc_uncore.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2010 Fabien Thomas
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * Intel Uncore PMCs.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/pmc.h>
36 #include <sys/pmckern.h>
37 #include <sys/systm.h>
38 
39 #include <machine/intr_machdep.h>
40 #include <x86/apicvar.h>
41 #include <machine/cpu.h>
42 #include <machine/cpufunc.h>
43 #include <machine/specialreg.h>
44 
45 #define	UCF_PMC_CAPS \
46 	(PMC_CAP_READ | PMC_CAP_WRITE)
47 
48 #define	UCP_PMC_CAPS \
49     (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
50     PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
51 
52 #define	SELECTSEL(x) \
53 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
54 	UCP_CB0_EVSEL0 : UCP_EVSEL0)
55 
56 #define SELECTOFF(x) \
57 	(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
58 	UCF_OFFSET_SB : UCF_OFFSET)
59 
60 static enum pmc_cputype	uncore_cputype;
61 
62 struct uncore_cpu {
63 	volatile uint32_t	pc_ucfctrl;	/* Fixed function control. */
64 	volatile uint64_t	pc_globalctrl;	/* Global control register. */
65 	struct pmc_hw		pc_uncorepmcs[];
66 };
67 
68 static struct uncore_cpu **uncore_pcpu;
69 
70 static uint64_t uncore_pmcmask;
71 
72 static int uncore_ucf_ri;		/* relative index of fixed counters */
73 static int uncore_ucf_width;
74 static int uncore_ucf_npmc;
75 
76 static int uncore_ucp_width;
77 static int uncore_ucp_npmc;
78 
79 static int
uncore_pcpu_noop(struct pmc_mdep * md,int cpu)80 uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
81 {
82 	(void) md;
83 	(void) cpu;
84 	return (0);
85 }
86 
87 static int
uncore_pcpu_init(struct pmc_mdep * md,int cpu)88 uncore_pcpu_init(struct pmc_mdep *md, int cpu)
89 {
90 	struct pmc_cpu *pc;
91 	struct uncore_cpu *cc;
92 	struct pmc_hw *phw;
93 	int uncore_ri, n, npmc;
94 
95 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
96 	    ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
97 
98 	PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
99 
100 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
101 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
102 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
103 
104 	cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
105 	    M_PMC, M_WAITOK | M_ZERO);
106 
107 	uncore_pcpu[cpu] = cc;
108 	pc = pmc_pcpu[cpu];
109 
110 	KASSERT(pc != NULL && cc != NULL,
111 	    ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
112 
113 	for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
114 		phw->phw_state 	  = PMC_PHW_FLAG_IS_ENABLED |
115 		    PMC_PHW_CPU_TO_STATE(cpu) |
116 		    PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
117 		phw->phw_pmc	  = NULL;
118 		pc->pc_hwpmcs[n + uncore_ri]  = phw;
119 	}
120 
121 	return (0);
122 }
123 
124 static int
uncore_pcpu_fini(struct pmc_mdep * md,int cpu)125 uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
126 {
127 	int uncore_ri, n, npmc;
128 	struct pmc_cpu *pc;
129 	struct uncore_cpu *cc;
130 
131 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
132 	    ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
133 
134 	PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
135 
136 	if ((cc = uncore_pcpu[cpu]) == NULL)
137 		return (0);
138 
139 	uncore_pcpu[cpu] = NULL;
140 
141 	pc = pmc_pcpu[cpu];
142 
143 	KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
144 		cpu));
145 
146 	npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
147 	uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
148 
149 	for (n = 0; n < npmc; n++)
150 		wrmsr(SELECTSEL(uncore_cputype) + n, 0);
151 
152 	wrmsr(UCF_CTRL, 0);
153 	npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
154 
155 	for (n = 0; n < npmc; n++)
156 		pc->pc_hwpmcs[n + uncore_ri] = NULL;
157 
158 	free(cc, M_PMC);
159 
160 	return (0);
161 }
162 
163 /*
164  * Fixed function counters.
165  */
166 
167 static pmc_value_t
ucf_perfctr_value_to_reload_count(pmc_value_t v)168 ucf_perfctr_value_to_reload_count(pmc_value_t v)
169 {
170 
171 	/* If the PMC has overflowed, return a reload count of zero. */
172 	if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
173 		return (0);
174 	v &= (1ULL << uncore_ucf_width) - 1;
175 	return (1ULL << uncore_ucf_width) - v;
176 }
177 
178 static pmc_value_t
ucf_reload_count_to_perfctr_value(pmc_value_t rlc)179 ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
180 {
181 	return (1ULL << uncore_ucf_width) - rlc;
182 }
183 
184 static int
ucf_allocate_pmc(int cpu,int ri,struct pmc * pm,const struct pmc_op_pmcallocate * a)185 ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
186     const struct pmc_op_pmcallocate *a)
187 {
188 	uint32_t flags;
189 
190 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
191 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
192 
193 	PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
194 
195 	if (ri < 0 || ri > uncore_ucf_npmc)
196 		return (EINVAL);
197 
198 	if (a->pm_class != PMC_CLASS_UCF)
199 		return (EINVAL);
200 
201 	if ((a->pm_flags & PMC_F_EV_PMU) == 0)
202 		return (EINVAL);
203 
204 	flags = UCF_EN;
205 
206 	pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
207 
208 	PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
209 	    (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
210 
211 	return (0);
212 }
213 
214 static int
ucf_config_pmc(int cpu,int ri,struct pmc * pm)215 ucf_config_pmc(int cpu, int ri, struct pmc *pm)
216 {
217 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
218 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
219 
220 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
221 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
222 
223 	PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
224 
225 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
226 	    cpu));
227 
228 	uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
229 
230 	return (0);
231 }
232 
233 static int
ucf_describe(int cpu,int ri,struct pmc_info * pi,struct pmc ** ppmc)234 ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
235 {
236 	struct pmc_hw *phw;
237 
238 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
239 
240 	snprintf(pi->pm_name, sizeof(pi->pm_name), "UCF-%d", ri);
241 	pi->pm_class = PMC_CLASS_UCF;
242 
243 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
244 		pi->pm_enabled = TRUE;
245 		*ppmc          = phw->phw_pmc;
246 	} else {
247 		pi->pm_enabled = FALSE;
248 		*ppmc          = NULL;
249 	}
250 
251 	return (0);
252 }
253 
254 static int
ucf_get_config(int cpu,int ri,struct pmc ** ppm)255 ucf_get_config(int cpu, int ri, struct pmc **ppm)
256 {
257 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
258 
259 	return (0);
260 }
261 
262 static int
ucf_read_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t * v)263 ucf_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
264 {
265 	pmc_value_t tmp;
266 
267 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
268 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
269 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
270 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
271 
272 	tmp = rdmsr(UCF_CTR0 + ri);
273 
274 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
275 		*v = ucf_perfctr_value_to_reload_count(tmp);
276 	else
277 		*v = tmp;
278 
279 	PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
280 
281 	return (0);
282 }
283 
284 static int
ucf_release_pmc(int cpu,int ri,struct pmc * pmc)285 ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
286 {
287 	PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
288 
289 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
290 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
291 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
292 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
293 
294 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
295 	    ("[uncore,%d] PHW pmc non-NULL", __LINE__));
296 
297 	return (0);
298 }
299 
300 static int
ucf_start_pmc(int cpu,int ri,struct pmc * pm)301 ucf_start_pmc(int cpu, int ri, struct pmc *pm)
302 {
303 	struct uncore_cpu *ucfc;
304 
305 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
306 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
307 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
308 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
309 
310 	PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
311 
312 	ucfc = uncore_pcpu[cpu];
313 	ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
314 
315 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
316 
317 	ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
318 	wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
319 
320 	PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
321 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
322 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
323 
324 	return (0);
325 }
326 
327 static int
ucf_stop_pmc(int cpu,int ri,struct pmc * pm __unused)328 ucf_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
329 {
330 	uint32_t fc;
331 	struct uncore_cpu *ucfc;
332 
333 	PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
334 
335 	ucfc = uncore_pcpu[cpu];
336 
337 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
338 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
339 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
340 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
341 
342 	fc = (UCF_MASK << (ri * 4));
343 
344 	ucfc->pc_ucfctrl &= ~fc;
345 
346 	PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
347 	wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
348 
349 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
350 
351 	PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
352 	    ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
353 	    ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
354 
355 	return (0);
356 }
357 
358 static int
ucf_write_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t v)359 ucf_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
360 {
361 	struct uncore_cpu *cc;
362 
363 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
364 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
365 	KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
366 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
367 
368 	cc = uncore_pcpu[cpu];
369 
370 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
371 		v = ucf_reload_count_to_perfctr_value(v);
372 
373 	wrmsr(UCF_CTRL, 0);	/* Turn off fixed counters */
374 	wrmsr(UCF_CTR0 + ri, v);
375 	wrmsr(UCF_CTRL, cc->pc_ucfctrl);
376 
377 	PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
378 	    cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
379 
380 	return (0);
381 }
382 
383 
384 static void
ucf_initialize(struct pmc_mdep * md,int maxcpu,int npmc,int pmcwidth)385 ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
386 {
387 	struct pmc_classdep *pcd;
388 
389 	KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
390 
391 	PMCDBG0(MDP,INI,1, "ucf-initialize");
392 
393 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
394 
395 	pcd->pcd_caps	= UCF_PMC_CAPS;
396 	pcd->pcd_class	= PMC_CLASS_UCF;
397 	pcd->pcd_num	= npmc;
398 	pcd->pcd_ri	= md->pmd_npmc;
399 	pcd->pcd_width	= pmcwidth;
400 
401 	pcd->pcd_allocate_pmc	= ucf_allocate_pmc;
402 	pcd->pcd_config_pmc	= ucf_config_pmc;
403 	pcd->pcd_describe	= ucf_describe;
404 	pcd->pcd_get_config	= ucf_get_config;
405 	pcd->pcd_get_msr	= NULL;
406 	pcd->pcd_pcpu_fini	= uncore_pcpu_noop;
407 	pcd->pcd_pcpu_init	= uncore_pcpu_noop;
408 	pcd->pcd_read_pmc	= ucf_read_pmc;
409 	pcd->pcd_release_pmc	= ucf_release_pmc;
410 	pcd->pcd_start_pmc	= ucf_start_pmc;
411 	pcd->pcd_stop_pmc	= ucf_stop_pmc;
412 	pcd->pcd_write_pmc	= ucf_write_pmc;
413 
414 	md->pmd_npmc	       += npmc;
415 }
416 
417 /*
418  * Intel programmable PMCs.
419  */
420 
421 /*
422  * Event descriptor tables.
423  *
424  * For each event id, we track:
425  *
426  * 1. The CPUs that the event is valid for.
427  *
428  * 2. If the event uses a fixed UMASK, the value of the umask field.
429  *    If the event doesn't use a fixed UMASK, a mask of legal bits
430  *    to check against.
431  */
432 
433 struct ucp_event_descr {
434 	enum pmc_event	ucp_ev;
435 	unsigned char	ucp_evcode;
436 	unsigned char	ucp_umask;
437 	unsigned char	ucp_flags;
438 };
439 
440 #define	UCP_F_I7	(1 << 0)	/* CPU: Core i7 */
441 #define	UCP_F_WM	(1 << 1)	/* CPU: Westmere */
442 #define	UCP_F_SB	(1 << 2)	/* CPU: Sandy Bridge */
443 #define	UCP_F_HW	(1 << 3)	/* CPU: Haswell */
444 #define	UCP_F_FM	(1 << 4)	/* Fixed mask */
445 
446 #define	UCP_F_ALLCPUS					\
447     (UCP_F_I7 | UCP_F_WM)
448 
449 #define	UCP_F_CMASK		0xFF000000
450 
451 static pmc_value_t
ucp_perfctr_value_to_reload_count(pmc_value_t v)452 ucp_perfctr_value_to_reload_count(pmc_value_t v)
453 {
454 	v &= (1ULL << uncore_ucp_width) - 1;
455 	return (1ULL << uncore_ucp_width) - v;
456 }
457 
458 static pmc_value_t
ucp_reload_count_to_perfctr_value(pmc_value_t rlc)459 ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
460 {
461 	return (1ULL << uncore_ucp_width) - rlc;
462 }
463 
464 /*
465  * Counter specific event information for Sandybridge and Haswell
466  */
467 static int
ucp_event_sb_hw_ok_on_counter(uint8_t ev,int ri)468 ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
469 {
470 	uint32_t mask;
471 
472 	switch (ev) {
473 		/*
474 		 * Events valid only on counter 0.
475 		 */
476 		case 0x80:
477 		case 0x83:
478 		mask = (1 << 0);
479 		break;
480 
481 	default:
482 		mask = ~0;	/* Any row index is ok. */
483 	}
484 
485 	return (mask & (1 << ri));
486 }
487 
488 static int
ucp_allocate_pmc(int cpu,int ri,struct pmc * pm,const struct pmc_op_pmcallocate * a)489 ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
490     const struct pmc_op_pmcallocate *a)
491 {
492 	uint8_t ev;
493 	const struct pmc_md_ucp_op_pmcallocate *ucp;
494 
495 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
496 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
497 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
498 	    ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
499 
500 	if (a->pm_class != PMC_CLASS_UCP)
501 		return (EINVAL);
502 
503 	if ((a->pm_flags & PMC_F_EV_PMU) == 0)
504 		return (EINVAL);
505 
506 	ucp = &a->pm_md.pm_ucp;
507 	ev = UCP_EVSEL(ucp->pm_ucp_config);
508 	switch (uncore_cputype) {
509 	case PMC_CPU_INTEL_HASWELL:
510 	case PMC_CPU_INTEL_SANDYBRIDGE:
511 		if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
512 			return (EINVAL);
513 		break;
514 	default:
515 		break;
516 	}
517 
518 	pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
519 
520 	return (0);
521 }
522 
523 static int
ucp_config_pmc(int cpu,int ri,struct pmc * pm)524 ucp_config_pmc(int cpu, int ri, struct pmc *pm)
525 {
526 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
527 	    ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
528 
529 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
530 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
531 
532 	PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
533 
534 	KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
535 	    cpu));
536 
537 	uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
538 
539 	return (0);
540 }
541 
542 static int
ucp_describe(int cpu,int ri,struct pmc_info * pi,struct pmc ** ppmc)543 ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
544 {
545 	struct pmc_hw *phw;
546 
547 	phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
548 
549 	snprintf(pi->pm_name, sizeof(pi->pm_name), "UCP-%d", ri);
550 	pi->pm_class = PMC_CLASS_UCP;
551 
552 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
553 		pi->pm_enabled = TRUE;
554 		*ppmc          = phw->phw_pmc;
555 	} else {
556 		pi->pm_enabled = FALSE;
557 		*ppmc          = NULL;
558 	}
559 
560 	return (0);
561 }
562 
563 static int
ucp_get_config(int cpu,int ri,struct pmc ** ppm)564 ucp_get_config(int cpu, int ri, struct pmc **ppm)
565 {
566 	*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
567 
568 	return (0);
569 }
570 
571 static int
ucp_read_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t * v)572 ucp_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
573 {
574 	pmc_value_t tmp;
575 
576 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
577 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
578 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
579 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
580 
581 	tmp = rdmsr(UCP_PMC0 + ri);
582 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
583 		*v = ucp_perfctr_value_to_reload_count(tmp);
584 	else
585 		*v = tmp;
586 
587 	PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
588 	    ri, *v);
589 
590 	return (0);
591 }
592 
593 static int
ucp_release_pmc(int cpu,int ri,struct pmc * pm)594 ucp_release_pmc(int cpu, int ri, struct pmc *pm)
595 {
596 	(void) pm;
597 
598 	PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
599 	    pm);
600 
601 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
602 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
603 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
604 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
605 
606 	KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
607 	    == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
608 
609 	return (0);
610 }
611 
612 static int
ucp_start_pmc(int cpu,int ri,struct pmc * pm)613 ucp_start_pmc(int cpu, int ri, struct pmc *pm)
614 {
615 	uint64_t evsel;
616 	struct uncore_cpu *cc;
617 
618 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
619 	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
620 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
621 	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));
622 
623 	cc = uncore_pcpu[cpu];
624 
625 	PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
626 
627 	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
628 
629 	PMCDBG4(MDP,STA,2,
630 	    "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
631 	    cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
632 
633 	wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
634 
635 	cc->pc_globalctrl |= (1ULL << ri);
636 	wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
637 
638 	return (0);
639 }
640 
641 static int
ucp_stop_pmc(int cpu,int ri,struct pmc * pm __unused)642 ucp_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
643 {
644 
645 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
646 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
647 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
648 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
649 
650 	PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
651 
652 	/* stop hw. */
653 	wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
654 
655 	/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
656 
657 	return (0);
658 }
659 
660 static int
ucp_write_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t v)661 ucp_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
662 {
663 
664 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
665 	    ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
666 	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
667 	    ("[uncore,%d] illegal row index %d", __LINE__, ri));
668 
669 	PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
670 	    UCP_PMC0 + ri, v);
671 
672 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
673 		v = ucp_reload_count_to_perfctr_value(v);
674 
675 	/*
676 	 * Write the new value to the counter.  The counter will be in
677 	 * a stopped state when the pcd_write() entry point is called.
678 	 */
679 
680 	wrmsr(UCP_PMC0 + ri, v);
681 
682 	return (0);
683 }
684 
685 
686 static void
ucp_initialize(struct pmc_mdep * md,int maxcpu,int npmc,int pmcwidth)687 ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
688 {
689 	struct pmc_classdep *pcd;
690 
691 	KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
692 
693 	PMCDBG0(MDP,INI,1, "ucp-initialize");
694 
695 	pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
696 
697 	pcd->pcd_caps	= UCP_PMC_CAPS;
698 	pcd->pcd_class	= PMC_CLASS_UCP;
699 	pcd->pcd_num	= npmc;
700 	pcd->pcd_ri	= md->pmd_npmc;
701 	pcd->pcd_width	= pmcwidth;
702 
703 	pcd->pcd_allocate_pmc	= ucp_allocate_pmc;
704 	pcd->pcd_config_pmc	= ucp_config_pmc;
705 	pcd->pcd_describe	= ucp_describe;
706 	pcd->pcd_get_config	= ucp_get_config;
707 	pcd->pcd_get_msr	= NULL;
708 	pcd->pcd_pcpu_fini	= uncore_pcpu_fini;
709 	pcd->pcd_pcpu_init	= uncore_pcpu_init;
710 	pcd->pcd_read_pmc	= ucp_read_pmc;
711 	pcd->pcd_release_pmc	= ucp_release_pmc;
712 	pcd->pcd_start_pmc	= ucp_start_pmc;
713 	pcd->pcd_stop_pmc	= ucp_stop_pmc;
714 	pcd->pcd_write_pmc	= ucp_write_pmc;
715 
716 	md->pmd_npmc	       += npmc;
717 }
718 
719 int
pmc_uncore_initialize(struct pmc_mdep * md,int maxcpu)720 pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
721 {
722 	uncore_cputype = md->pmd_cputype;
723 	uncore_pmcmask = 0;
724 
725 	/*
726 	 * Initialize programmable counters.
727 	 */
728 
729 	uncore_ucp_npmc  = 8;
730 	uncore_ucp_width = 48;
731 
732 	uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
733 
734 	ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
735 
736 	/*
737 	 * Initialize fixed function counters, if present.
738 	 */
739 	uncore_ucf_ri = uncore_ucp_npmc;
740 	uncore_ucf_npmc  = 1;
741 	uncore_ucf_width = 48;
742 
743 	ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
744 	uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
745 
746 	PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
747 	    uncore_ucf_ri);
748 
749 	uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
750 	    M_ZERO | M_WAITOK);
751 
752 	return (0);
753 }
754 
755 void
pmc_uncore_finalize(struct pmc_mdep * md)756 pmc_uncore_finalize(struct pmc_mdep *md)
757 {
758 	PMCDBG0(MDP,INI,1, "uncore-finalize");
759 
760 	for (int i = 0; i < pmc_cpu_max(); i++)
761 		KASSERT(uncore_pcpu[i] == NULL,
762 		    ("[uncore,%d] non-null pcpu cpu %d", __LINE__, i));
763 
764 	free(uncore_pcpu, M_PMC);
765 	uncore_pcpu = NULL;
766 }
767