1 /*-
2 * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/pmc.h>
34 #include <sys/pmckern.h>
35
36 #include <machine/pmc_mdep.h>
37 #include <machine/cpu.h>
38
39 static int armv7_npmcs;
40
41 struct armv7_event_code_map {
42 enum pmc_event pe_ev;
43 uint8_t pe_code;
44 };
45
46 #define PMC_EV_CPU_CYCLES 0xFF
47
48 /*
49 * Per-processor information.
50 */
51 struct armv7_cpu {
52 struct pmc_hw *pc_armv7pmcs;
53 };
54
55 static struct armv7_cpu **armv7_pcpu;
56
57 /*
58 * Interrupt Enable Set Register
59 */
60 static __inline void
armv7_interrupt_enable(uint32_t pmc)61 armv7_interrupt_enable(uint32_t pmc)
62 {
63 uint32_t reg;
64
65 reg = (1 << pmc);
66 cp15_pminten_set(reg);
67 }
68
69 /*
70 * Interrupt Clear Set Register
71 */
72 static __inline void
armv7_interrupt_disable(uint32_t pmc)73 armv7_interrupt_disable(uint32_t pmc)
74 {
75 uint32_t reg;
76
77 reg = (1 << pmc);
78 cp15_pminten_clr(reg);
79 }
80
81 /*
82 * Counter Set Enable Register
83 */
84 static __inline void
armv7_counter_enable(unsigned int pmc)85 armv7_counter_enable(unsigned int pmc)
86 {
87 uint32_t reg;
88
89 reg = (1 << pmc);
90 cp15_pmcnten_set(reg);
91 }
92
93 /*
94 * Counter Clear Enable Register
95 */
96 static __inline void
armv7_counter_disable(unsigned int pmc)97 armv7_counter_disable(unsigned int pmc)
98 {
99 uint32_t reg;
100
101 reg = (1 << pmc);
102 cp15_pmcnten_clr(reg);
103 }
104
105 /*
106 * Performance Count Register N
107 */
108 static uint32_t
armv7_pmcn_read(unsigned int pmc,uint32_t evsel)109 armv7_pmcn_read(unsigned int pmc, uint32_t evsel)
110 {
111
112 if (evsel == PMC_EV_CPU_CYCLES) {
113 return ((uint32_t)cp15_pmccntr_get());
114 }
115
116 KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
117
118 cp15_pmselr_set(pmc);
119 return (cp15_pmxevcntr_get());
120 }
121
122 static uint32_t
armv7_pmcn_write(unsigned int pmc,uint32_t reg)123 armv7_pmcn_write(unsigned int pmc, uint32_t reg)
124 {
125
126 KASSERT(pmc < armv7_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
127
128 cp15_pmselr_set(pmc);
129 cp15_pmxevcntr_set(reg);
130
131 return (reg);
132 }
133
134 static int
armv7_allocate_pmc(int cpu,int ri,struct pmc * pm,const struct pmc_op_pmcallocate * a)135 armv7_allocate_pmc(int cpu, int ri, struct pmc *pm,
136 const struct pmc_op_pmcallocate *a)
137 {
138 enum pmc_event pe;
139 uint32_t config;
140
141 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
142 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
143 KASSERT(ri >= 0 && ri < armv7_npmcs,
144 ("[armv7,%d] illegal row index %d", __LINE__, ri));
145
146 if (a->pm_class != PMC_CLASS_ARMV7)
147 return (EINVAL);
148 pe = a->pm_ev;
149
150 config = (pe & EVENT_ID_MASK);
151 pm->pm_md.pm_armv7.pm_armv7_evsel = config;
152
153 PMCDBG2(MDP, ALL, 2, "armv7-allocate ri=%d -> config=0x%x", ri, config);
154
155 return 0;
156 }
157
158
159 static int
armv7_read_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t * v)160 armv7_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
161 {
162 pmc_value_t tmp;
163 register_t s;
164 u_int reg;
165
166 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
167 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
168 KASSERT(ri >= 0 && ri < armv7_npmcs,
169 ("[armv7,%d] illegal row index %d", __LINE__, ri));
170
171 s = intr_disable();
172 tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
173
174 /* Check if counter has overflowed */
175 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
176 reg = (1u << 31);
177 else
178 reg = (1u << ri);
179
180 if ((cp15_pmovsr_get() & reg) != 0) {
181 /* Clear Overflow Flag */
182 cp15_pmovsr_set(reg);
183 pm->pm_pcpu_state[cpu].pps_overflowcnt++;
184
185 /* Reread counter in case we raced. */
186 tmp = armv7_pmcn_read(ri, pm->pm_md.pm_armv7.pm_armv7_evsel);
187 }
188 tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
189 intr_restore(s);
190
191 PMCDBG2(MDP, REA, 2, "armv7-read id=%d -> %jd", ri, tmp);
192 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
193 /*
194 * Clamp value to 0 if the counter just overflowed,
195 * otherwise the returned reload count would wrap to a
196 * huge value.
197 */
198 if ((tmp & (1ull << 63)) == 0)
199 tmp = 0;
200 else
201 tmp = ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
202 }
203 *v = tmp;
204
205 return 0;
206 }
207
208 static int
armv7_write_pmc(int cpu,int ri,struct pmc * pm,pmc_value_t v)209 armv7_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
210 {
211
212 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
213 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
214 KASSERT(ri >= 0 && ri < armv7_npmcs,
215 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
216
217 if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
218 v = ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
219
220 PMCDBG3(MDP, WRI, 1, "armv7-write cpu=%d ri=%d v=%jx", cpu, ri, v);
221
222 pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
223 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
224 cp15_pmccntr_set(v);
225 else
226 armv7_pmcn_write(ri, v);
227
228 return 0;
229 }
230
231 static int
armv7_config_pmc(int cpu,int ri,struct pmc * pm)232 armv7_config_pmc(int cpu, int ri, struct pmc *pm)
233 {
234 struct pmc_hw *phw;
235
236 PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
237
238 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
239 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
240 KASSERT(ri >= 0 && ri < armv7_npmcs,
241 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
242
243 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
244
245 KASSERT(pm == NULL || phw->phw_pmc == NULL,
246 ("[armv7,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
247 __LINE__, pm, phw->phw_pmc));
248
249 phw->phw_pmc = pm;
250
251 return 0;
252 }
253
254 static int
armv7_start_pmc(int cpu,int ri,struct pmc * pm)255 armv7_start_pmc(int cpu, int ri, struct pmc *pm)
256 {
257 uint32_t config;
258
259 config = pm->pm_md.pm_armv7.pm_armv7_evsel;
260
261 /*
262 * Configure the event selection.
263 */
264 if (config != PMC_EV_CPU_CYCLES) {
265 cp15_pmselr_set(ri);
266 cp15_pmxevtyper_set(config);
267 } else
268 ri = 31;
269
270 /*
271 * Enable the PMC.
272 */
273 armv7_interrupt_enable(ri);
274 armv7_counter_enable(ri);
275
276 return 0;
277 }
278
279 static int
armv7_stop_pmc(int cpu,int ri,struct pmc * pm)280 armv7_stop_pmc(int cpu, int ri, struct pmc *pm)
281 {
282 uint32_t config;
283
284 config = pm->pm_md.pm_armv7.pm_armv7_evsel;
285 if (config == PMC_EV_CPU_CYCLES)
286 ri = 31;
287
288 /*
289 * Disable the PMCs.
290 */
291 armv7_counter_disable(ri);
292 armv7_interrupt_disable(ri);
293
294 return 0;
295 }
296
297 static int
armv7_release_pmc(int cpu,int ri,struct pmc * pmc)298 armv7_release_pmc(int cpu, int ri, struct pmc *pmc)
299 {
300 struct pmc_hw *phw __diagused;
301
302 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
303 ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
304 KASSERT(ri >= 0 && ri < armv7_npmcs,
305 ("[armv7,%d] illegal row-index %d", __LINE__, ri));
306
307 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
308 KASSERT(phw->phw_pmc == NULL,
309 ("[armv7,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
310
311 return 0;
312 }
313
314 static int
armv7_intr(struct trapframe * tf)315 armv7_intr(struct trapframe *tf)
316 {
317 int retval, ri;
318 struct pmc *pm;
319 int error;
320 int reg, cpu;
321
322 cpu = curcpu;
323 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
324 ("[armv7,%d] CPU %d out of range", __LINE__, cpu));
325
326 retval = 0;
327
328 for (ri = 0; ri < armv7_npmcs; ri++) {
329 pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
330 if (pm == NULL)
331 continue;
332
333 /* Check if counter has overflowed */
334 if (pm->pm_md.pm_armv7.pm_armv7_evsel == PMC_EV_CPU_CYCLES)
335 reg = (1u << 31);
336 else
337 reg = (1u << ri);
338
339 if ((cp15_pmovsr_get() & reg) == 0) {
340 continue;
341 }
342
343 /* Clear Overflow Flag */
344 cp15_pmovsr_set(reg);
345
346 retval = 1; /* Found an interrupting PMC. */
347
348 pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
349
350 if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
351 continue;
352
353 if (pm->pm_state != PMC_STATE_RUNNING)
354 continue;
355
356 error = pmc_process_interrupt(PMC_HR, pm, tf);
357 if (error)
358 armv7_stop_pmc(cpu, ri, pm);
359
360 /* Reload sampling count */
361 armv7_write_pmc(cpu, ri, pm, pm->pm_sc.pm_reloadcount);
362 }
363
364 return (retval);
365 }
366
367 static int
armv7_describe(int cpu,int ri,struct pmc_info * pi,struct pmc ** ppmc)368 armv7_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
369 {
370 struct pmc_hw *phw;
371
372 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
373 ("[armv7,%d], illegal CPU %d", __LINE__, cpu));
374 KASSERT(ri >= 0 && ri < armv7_npmcs,
375 ("[armv7,%d] row-index %d out of range", __LINE__, ri));
376
377 phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
378
379 snprintf(pi->pm_name, sizeof(pi->pm_name), "ARMV7-%d", ri);
380 pi->pm_class = PMC_CLASS_ARMV7;
381
382 if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
383 pi->pm_enabled = TRUE;
384 *ppmc = phw->phw_pmc;
385 } else {
386 pi->pm_enabled = FALSE;
387 *ppmc = NULL;
388 }
389
390 return (0);
391 }
392
393 static int
armv7_get_config(int cpu,int ri,struct pmc ** ppm)394 armv7_get_config(int cpu, int ri, struct pmc **ppm)
395 {
396
397 *ppm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
398
399 return 0;
400 }
401
402 static int
armv7_pcpu_init(struct pmc_mdep * md,int cpu)403 armv7_pcpu_init(struct pmc_mdep *md, int cpu)
404 {
405 struct armv7_cpu *pac;
406 struct pmc_hw *phw;
407 struct pmc_cpu *pc;
408 uint32_t pmnc;
409 int first_ri;
410 int i;
411
412 KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
413 ("[armv7,%d] wrong cpu number %d", __LINE__, cpu));
414 PMCDBG0(MDP, INI, 1, "armv7-pcpu-init");
415
416 armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
417 M_WAITOK|M_ZERO);
418
419 pac->pc_armv7pmcs = malloc(sizeof(struct pmc_hw) * armv7_npmcs,
420 M_PMC, M_WAITOK|M_ZERO);
421 pc = pmc_pcpu[cpu];
422 first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7].pcd_ri;
423 KASSERT(pc != NULL, ("[armv7,%d] NULL per-cpu pointer", __LINE__));
424
425 for (i = 0, phw = pac->pc_armv7pmcs; i < armv7_npmcs; i++, phw++) {
426 phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
427 PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
428 phw->phw_pmc = NULL;
429 pc->pc_hwpmcs[i + first_ri] = phw;
430 }
431
432 pmnc = 0xffffffff;
433 cp15_pmcnten_clr(pmnc);
434 cp15_pminten_clr(pmnc);
435 cp15_pmovsr_set(pmnc);
436
437 /* Enable unit */
438 pmnc = cp15_pmcr_get();
439 pmnc |= ARMV7_PMNC_ENABLE;
440 cp15_pmcr_set(pmnc);
441
442 return 0;
443 }
444
445 static int
armv7_pcpu_fini(struct pmc_mdep * md,int cpu)446 armv7_pcpu_fini(struct pmc_mdep *md, int cpu)
447 {
448 uint32_t pmnc;
449
450 PMCDBG0(MDP, INI, 1, "armv7-pcpu-fini");
451
452 pmnc = cp15_pmcr_get();
453 pmnc &= ~ARMV7_PMNC_ENABLE;
454 cp15_pmcr_set(pmnc);
455
456 pmnc = 0xffffffff;
457 cp15_pmcnten_clr(pmnc);
458 cp15_pminten_clr(pmnc);
459 cp15_pmovsr_set(pmnc);
460
461 free(armv7_pcpu[cpu]->pc_armv7pmcs, M_PMC);
462 free(armv7_pcpu[cpu], M_PMC);
463 armv7_pcpu[cpu] = NULL;
464
465 return 0;
466 }
467
468 struct pmc_mdep *
pmc_armv7_initialize(void)469 pmc_armv7_initialize(void)
470 {
471 struct pmc_mdep *pmc_mdep;
472 struct pmc_classdep *pcd;
473 int idcode;
474 int reg;
475
476 reg = cp15_pmcr_get();
477 armv7_npmcs = (reg >> ARMV7_PMNC_N_SHIFT) & \
478 ARMV7_PMNC_N_MASK;
479 idcode = (reg & ARMV7_IDCODE_MASK) >> ARMV7_IDCODE_SHIFT;
480
481 PMCDBG1(MDP, INI, 1, "armv7-init npmcs=%d", armv7_npmcs);
482
483 /*
484 * Allocate space for pointers to PMC HW descriptors and for
485 * the MDEP structure used by MI code.
486 */
487 armv7_pcpu = malloc(sizeof(struct armv7_cpu *) * pmc_cpu_max(),
488 M_PMC, M_WAITOK | M_ZERO);
489
490 /* Just one class */
491 pmc_mdep = pmc_mdep_alloc(1);
492
493 switch (idcode) {
494 case ARMV7_IDCODE_CORTEX_A9:
495 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A9;
496 break;
497 default:
498 case ARMV7_IDCODE_CORTEX_A8:
499 /*
500 * On A8 we implemented common events only,
501 * so use it for the rest of machines.
502 */
503 pmc_mdep->pmd_cputype = PMC_CPU_ARMV7_CORTEX_A8;
504 break;
505 }
506
507 pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7];
508 pcd->pcd_caps = ARMV7_PMC_CAPS;
509 pcd->pcd_class = PMC_CLASS_ARMV7;
510 pcd->pcd_num = armv7_npmcs;
511 pcd->pcd_ri = pmc_mdep->pmd_npmc;
512 pcd->pcd_width = 32;
513
514 pcd->pcd_allocate_pmc = armv7_allocate_pmc;
515 pcd->pcd_config_pmc = armv7_config_pmc;
516 pcd->pcd_pcpu_fini = armv7_pcpu_fini;
517 pcd->pcd_pcpu_init = armv7_pcpu_init;
518 pcd->pcd_describe = armv7_describe;
519 pcd->pcd_get_config = armv7_get_config;
520 pcd->pcd_read_pmc = armv7_read_pmc;
521 pcd->pcd_release_pmc = armv7_release_pmc;
522 pcd->pcd_start_pmc = armv7_start_pmc;
523 pcd->pcd_stop_pmc = armv7_stop_pmc;
524 pcd->pcd_write_pmc = armv7_write_pmc;
525
526 pmc_mdep->pmd_intr = armv7_intr;
527 pmc_mdep->pmd_npmc += armv7_npmcs;
528
529 return (pmc_mdep);
530 }
531
532 void
pmc_armv7_finalize(struct pmc_mdep * md)533 pmc_armv7_finalize(struct pmc_mdep *md)
534 {
535 PMCDBG0(MDP, INI, 1, "armv7-finalize");
536
537 for (int i = 0; i < pmc_cpu_max(); i++)
538 KASSERT(armv7_pcpu[i] == NULL,
539 ("[armv7,%d] non-null pcpu cpu %d", __LINE__, i));
540
541 free(armv7_pcpu, M_PMC);
542 }
543