xref: /freebsd/sys/dev/hwpmc/hwpmc_armv7.c (revision 608da65de9552d5678c1000776ed69da04a45983)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/pmc.h>
35 #include <sys/pmckern.h>
36 
37 #include <machine/pmc_mdep.h>
38 #include <machine/cpu.h>
39 
40 static int armv7_npmcs;
41 
42 struct armv7_event_code_map {
43 	enum pmc_event	pe_ev;
44 	uint8_t		pe_code;
45 };
46 
47 #define	PMC_EV_CPU_CYCLES	0xFF
48 
49 /*
50  * Per-processor information.
51  */
52 struct armv7_cpu {
53 	struct pmc_hw   *pc_armv7pmcs;
54 };
55 
56 static struct armv7_cpu **armv7_pcpu;
57 
58 /*
59  * Interrupt Enable Set Register
60  */
61 static __inline void
62 armv7_interrupt_enable(uint32_t pmc)
63 {
64 	uint32_t reg;
65 
66 	reg = (1 << pmc);
67 	cp15_pminten_set(reg);
68 }
69 
70 /*
71  * Interrupt Clear Set Register
72  */
73 static __inline void
74 armv7_interrupt_disable(uint32_t pmc)
75 {
76 	uint32_t reg;
77 
78 	reg = (1 << pmc);
79 	cp15_pminten_clr(reg);
80 }
81 
82 /*
83  * Counter Set Enable Register
84  */
85 static __inline void
86 armv7_counter_enable(unsigned int pmc)
87 {
88 	uint32_t reg;
89 
90 	reg = (1 << pmc);
91 	cp15_pmcnten_set(reg);
92 }
93 
94 /*
95  * Counter Clear Enable Register
96  */
97 static __inline void
98 armv7_counter_disable(unsigned int pmc)
99 {
100 	uint32_t reg;
101 
102 	reg = (1 << pmc);
103 	cp15_pmcnten_clr(reg);
104 }
105 
106 /*
107  * Performance Count Register N
108  */
109 static uint32_t
110 armv7_pmcn_read(unsigned int pmc, uint32_t evsel)
111 {
112 
113 	if (evsel == PMC_EV_CPU_CYCLES) {
114 		return ((uint32_t)cp15_pmccntr_get());
115 	}
116 
117 	KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
118 
119 	cp15_pmselr_set(pmc);
120 	return (cp15_pmxevcntr_get());
121 }
122 
123 static uint32_t
124 armv7_pmcn_write(unsigned int pmc, uint32_t reg)
125 {
126 
127 	KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
128 
129 	cp15_pmselr_set(pmc);
130 	cp15_pmxevcntr_set(reg);
131 
132 	return (reg);
133 }
134 
135 static int
136 armv7_allocate_pmc(int cpu, int ri, struct pmc *pm,
137   const struct pmc_op_pmcallocate *a)
138 {
139 	enum pmc_event pe;
140 	uint32_t config;
141 
142 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
143 	    ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
144 	KASSERT(ri >= 0 && ri < armv7_npmcs,
145 	    ("[armv7,%d] illegal row index %d", __LINE__, ri));
146 
147 	if (a->pm_class != PMC_CLASS_ARMV7)
148 		return (EINVAL);
149 	pe = a->pm_ev;
150 
151 	config = (pe & EVENT_ID_MASK);
152 	pm->pm_md.pm_armv7.pm_armv7_evsel = config;
153 
154 	PMCDBG2(MDP, ALL, 2, "armv7-allocate ri=%d -> config=0x%x", ri, config);
155 
156 	return 0;
157 }
158 
159 
160 static int
161 armv7_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
162 {
163 	pmc_value_t tmp;
164 	register_t s;
165 	u_int reg;
166 
167 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
168 	    ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
169 	KASSERT(ri >= 0 && ri < armv7_npmcs,
170 	    ("[armv7,%d] illegal row index %d", __LINE__, ri));
171 
172 	s = intr_disable();
173 	tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
174 
175 	/* Check if counter has overflowed */
176 	if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
177 		reg = (1u << 31);
178 	else
179 		reg = (1u << ri);
180 
181 	if ((cp15_pmovsr_get() & reg) != 0) {
182 		/* Clear Overflow Flag */
183 		cp15_pmovsr_set(reg);
184 		pm->pm_pcpu_state[cpu].pps_overflowcnt++;
185 
186 		/* Reread counter in case we raced. */
187 		tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
188 	}
189 	tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
190 	intr_restore(s);
191 
192 	PMCDBG2(MDP, REA, 2, "armv7-read id=%d -> %jd", ri, tmp);
193 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
194 		/*
195 		 * Clamp value to 0 if the counter just overflowed,
196 		 * otherwise the returned reload count would wrap to a
197 		 * huge value.
198 		 */
199 		if ((tmp & (1ull << 63)) == 0)
200 			tmp = 0;
201 		else
202 			tmp = ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
203 	}
204 	*v = tmp;
205 
206 	return 0;
207 }
208 
209 static int
210 armv7_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
211 {
212 
213 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
214 	    ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
215 	KASSERT(ri >= 0 && ri < armv7_npmcs,
216 	    ("[armv7,%d] illegal row-index %d", __LINE__, ri));
217 
218 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
219 		v = ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
220 
221 	PMCDBG3(MDP, WRI, 1, "armv7-write cpu=%d ri=%d v=%jx", cpu, ri, v);
222 
223 	pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
224 	if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
225 		cp15_pmccntr_set(v);
226 	else
227 		armv7_pmcn_write(ri, v);
228 
229 	return 0;
230 }
231 
232 static int
233 armv7_config_pmc(int cpu, int ri, struct pmc *pm)
234 {
235 	struct pmc_hw *phw;
236 
237 	PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
238 
239 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
240 	    ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
241 	KASSERT(ri >= 0 && ri < armv7_npmcs,
242 	    ("[armv7,%d] illegal row-index %d", __LINE__, ri));
243 
244 	phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
245 
246 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
247 	    ("[armv7,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
248 	    __LINE__, pm, phw->phw_pmc));
249 
250 	phw->phw_pmc = pm;
251 
252 	return 0;
253 }
254 
255 static int
256 armv7_start_pmc(int cpu, int ri, struct pmc *pm)
257 {
258 	uint32_t config;
259 
260 	config = pm->pm_md.pm_armv7.pm_armv7_evsel;
261 
262 	/*
263 	 * Configure the event selection.
264 	 */
265 	if (config != PMC_EV_CPU_CYCLES) {
266 		cp15_pmselr_set(ri);
267 		cp15_pmxevtyper_set(config);
268 	} else
269 		ri = 31;
270 
271 	/*
272 	 * Enable the PMC.
273 	 */
274 	armv7_interrupt_enable(ri);
275 	armv7_counter_enable(ri);
276 
277 	return 0;
278 }
279 
280 static int
281 armv7_stop_pmc(int cpu, int ri, struct pmc *pm)
282 {
283 	uint32_t config;
284 
285 	config = pm->pm_md.pm_armv7.pm_armv7_evsel;
286 	if (config == PMC_EV_CPU_CYCLES)
287 		ri = 31;
288 
289 	/*
290 	 * Disable the PMCs.
291 	 */
292 	armv7_counter_disable(ri);
293 	armv7_interrupt_disable(ri);
294 
295 	return 0;
296 }
297 
298 static int
299 armv7_release_pmc(int cpu, int ri, struct pmc *pmc)
300 {
301 	struct pmc_hw *phw __diagused;
302 
303 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
304 	    ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
305 	KASSERT(ri >= 0 && ri < armv7_npmcs,
306 	    ("[armv7,%d] illegal row-index %d", __LINE__, ri));
307 
308 	phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
309 	KASSERT(phw->phw_pmc == NULL,
310 	    ("[armv7,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
311 
312 	return 0;
313 }
314 
315 static int
316 armv7_intr(struct trapframe *tf)
317 {
318 	int retval, ri;
319 	struct pmc *pm;
320 	int error;
321 	int reg, cpu;
322 
323 	cpu = curcpu;
324 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
325 	    ("[armv7,%d] CPU %d out of range", __LINE__, cpu));
326 
327 	retval = 0;
328 
329 	for (ri = 0; ri < armv7_npmcs; ri++) {
330 		pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
331 		if (pm == NULL)
332 			continue;
333 
334 		/* Check if counter has overflowed */
335 		if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
336 			reg = (1u << 31);
337 		else
338 			reg = (1u << ri);
339 
340 		if ((cp15_pmovsr_get() & reg) == 0) {
341 			continue;
342 		}
343 
344 		/* Clear Overflow Flag */
345 		cp15_pmovsr_set(reg);
346 
347 		retval = 1; /* Found an interrupting PMC. */
348 
349 		pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
350 
351 		if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
352 			continue;
353 
354 		if (pm->pm_state != PMC_STATE_RUNNING)
355 			continue;
356 
357 		error = pmc_process_interrupt(PMC_HR, pm, tf);
358 		if (error)
359 			armv7_stop_pmc(cpu, ri, pm);
360 
361 		/* Reload sampling count */
362 		armv7_write_pmc(cpu, ri, pm, pm->pm_sc.pm_reloadcount);
363 	}
364 
365 	return (retval);
366 }
367 
368 static int
369 armv7_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
370 {
371 	struct pmc_hw *phw;
372 
373 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
374 	    ("[armv7,%d], illegal CPU %d", __LINE__, cpu));
375 	KASSERT(ri >= 0 && ri < armv7_npmcs,
376 	    ("[armv7,%d] row-index %d out of range", __LINE__, ri));
377 
378 	phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
379 
380 	snprintf(pi->pm_name, sizeof(pi->pm_name), "ARMV7-%d", ri);
381 	pi->pm_class = PMC_CLASS_ARMV7;
382 
383 	if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
384 		pi->pm_enabled = TRUE;
385 		*ppmc = phw->phw_pmc;
386 	} else {
387 		pi->pm_enabled = FALSE;
388 		*ppmc = NULL;
389 	}
390 
391 	return (0);
392 }
393 
394 static int
395 armv7_get_config(int cpu, int ri, struct pmc **ppm)
396 {
397 
398 	*ppm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
399 
400 	return 0;
401 }
402 
403 static int
404 armv7_pcpu_init(struct pmc_mdep *md, int cpu)
405 {
406 	struct armv7_cpu *pac;
407 	struct pmc_hw  *phw;
408 	struct pmc_cpu *pc;
409 	uint32_t pmnc;
410 	int first_ri;
411 	int i;
412 
413 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
414 	    ("[armv7,%d] wrong cpu number %d", __LINE__, cpu));
415 	PMCDBG0(MDP, INI, 1, "armv7-pcpu-init");
416 
417 	armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
418 	    M_WAITOK|M_ZERO);
419 
420 	pac->pc_armv7pmcs = malloc(sizeof(struct pmc_hw) * armv7_npmcs,
421 	    M_PMC, M_WAITOK|M_ZERO);
422 	pc = pmc_pcpu[cpu];
423 	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7].pcd_ri;
424 	KASSERT(pc != NULL, ("[armv7,%d] NULL per-cpu pointer", __LINE__));
425 
426 	for (i = 0, phw = pac->pc_armv7pmcs; i < armv7_npmcs; i++, phw++) {
427 		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
428 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
429 		phw->phw_pmc      = NULL;
430 		pc->pc_hwpmcs[i + first_ri] = phw;
431 	}
432 
433 	pmnc = 0xffffffff;
434 	cp15_pmcnten_clr(pmnc);
435 	cp15_pminten_clr(pmnc);
436 	cp15_pmovsr_set(pmnc);
437 
438 	/* Enable unit */
439 	pmnc = cp15_pmcr_get();
440 	pmnc |= ARMV7_PMNC_ENABLE;
441 	cp15_pmcr_set(pmnc);
442 
443 	return 0;
444 }
445 
446 static int
447 armv7_pcpu_fini(struct pmc_mdep *md, int cpu)
448 {
449 	uint32_t pmnc;
450 
451 	PMCDBG0(MDP, INI, 1, "armv7-pcpu-fini");
452 
453 	pmnc = cp15_pmcr_get();
454 	pmnc &= ~ARMV7_PMNC_ENABLE;
455 	cp15_pmcr_set(pmnc);
456 
457 	pmnc = 0xffffffff;
458 	cp15_pmcnten_clr(pmnc);
459 	cp15_pminten_clr(pmnc);
460 	cp15_pmovsr_set(pmnc);
461 
462 	free(armv7_pcpu[cpu]->pc_armv7pmcs, M_PMC);
463 	free(armv7_pcpu[cpu], M_PMC);
464 	armv7_pcpu[cpu] = NULL;
465 
466 	return 0;
467 }
468 
469 struct pmc_mdep *
470 pmc_armv7_initialize(void)
471 {
472 	struct pmc_mdep *pmc_mdep;
473 	struct pmc_classdep *pcd;
474 	int idcode;
475 	int reg;
476 
477 	reg = cp15_pmcr_get();
478 	armv7_npmcs = (reg >> ARMV7_PMNC_N_SHIFT) & \
479 				ARMV7_PMNC_N_MASK;
480 	idcode = (reg & ARMV7_IDCODE_MASK) >> ARMV7_IDCODE_SHIFT;
481 
482 	PMCDBG1(MDP, INI, 1, "armv7-init npmcs=%d", armv7_npmcs);
483 
484 	/*
485 	 * Allocate space for pointers to PMC HW descriptors and for
486 	 * the MDEP structure used by MI code.
487 	 */
488 	armv7_pcpu = malloc(sizeof(struct armv7_cpu *) * pmc_cpu_max(),
489 		M_PMC, M_WAITOK | M_ZERO);
490 
491 	/* Just one class */
492 	pmc_mdep = pmc_mdep_alloc(1);
493 
494 	switch (idcode) {
495 	case ARMV7_IDCODE_CORTEX_A9:
496 		pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A9;
497 		break;
498 	default:
499 	case ARMV7_IDCODE_CORTEX_A8:
500 		/*
501 		 * On A8 we implemented common events only,
502 		 * so use it for the rest of machines.
503 		 */
504 		pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A8;
505 		break;
506 	}
507 
508 	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7];
509 	pcd->pcd_caps  = ARMV7_PMC_CAPS;
510 	pcd->pcd_class = PMC_CLASS_ARMV7;
511 	pcd->pcd_num   = armv7_npmcs;
512 	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
513 	pcd->pcd_width = 32;
514 
515 	pcd->pcd_allocate_pmc   = armv7_allocate_pmc;
516 	pcd->pcd_config_pmc     = armv7_config_pmc;
517 	pcd->pcd_pcpu_fini      = armv7_pcpu_fini;
518 	pcd->pcd_pcpu_init      = armv7_pcpu_init;
519 	pcd->pcd_describe       = armv7_describe;
520 	pcd->pcd_get_config	= armv7_get_config;
521 	pcd->pcd_read_pmc       = armv7_read_pmc;
522 	pcd->pcd_release_pmc    = armv7_release_pmc;
523 	pcd->pcd_start_pmc      = armv7_start_pmc;
524 	pcd->pcd_stop_pmc       = armv7_stop_pmc;
525 	pcd->pcd_write_pmc      = armv7_write_pmc;
526 
527 	pmc_mdep->pmd_intr = armv7_intr;
528 	pmc_mdep->pmd_npmc += armv7_npmcs;
529 
530 	return (pmc_mdep);
531 }
532 
533 void
534 pmc_armv7_finalize(struct pmc_mdep *md)
535 {
536 	PMCDBG0(MDP, INI, 1, "armv7-finalize");
537 
538 	for (int i = 0; i < pmc_cpu_max(); i++)
539 		KASSERT(armv7_pcpu[i] == NULL,
540 		    ("[armv7,%d] non-null pcpu cpu %d", __LINE__, i));
541 
542 	free(armv7_pcpu, M_PMC);
543 }
544