xref: /freebsd/sys/dev/hwpmc/hwpmc_e500.c (revision d6eb98610fa65663bf0df4574b7cb2c5c4ffda71)
1 /*-
2  * Copyright (c) 2015 Justin Hibbits
3  * Copyright (c) 2005, Joseph Koshy
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/pmc.h>
34 #include <sys/pmckern.h>
35 #include <sys/systm.h>
36 
37 #include <machine/pmc_mdep.h>
38 #include <machine/cpu.h>
39 
40 #include <ddb/ddb.h>
41 
42 #include "hwpmc_powerpc.h"
43 
44 #define	POWERPC_PMC_CAPS	(PMC_CAP_INTERRUPT | PMC_CAP_USER |     \
45 				 PMC_CAP_SYSTEM | PMC_CAP_EDGE |	\
46 				 PMC_CAP_THRESHOLD | PMC_CAP_READ |	\
47 				 PMC_CAP_WRITE | PMC_CAP_INVERT |	\
48 				 PMC_CAP_QUALIFIER)
49 
50 #define E500_PMC_HAS_OVERFLOWED(x) (e500_pmcn_read(x) & (0x1 << 31))
51 
52 struct e500_event_code_map {
53 	enum pmc_event	pe_ev;       /* enum value */
54 	uint8_t         pe_counter_mask;  /* Which counter this can be counted in. */
55 	uint8_t		pe_code;     /* numeric code */
56 	uint8_t		pe_cpu;	     /* e500 core (v1,v2,mc), mask */
57 };
58 
59 #define E500_MAX_PMCS	4
60 #define PMC_PPC_MASK0	0
61 #define PMC_PPC_MASK1	1
62 #define PMC_PPC_MASK2	2
63 #define PMC_PPC_MASK3	3
64 #define PMC_PPC_MASK_ALL	0x0f
65 #define PMC_PPC_E500V1		1
66 #define PMC_PPC_E500V2		2
67 #define PMC_PPC_E500MC		4
68 #define PMC_PPC_E500_ANY	7
69 #define PMC_E500_EVENT(id, mask, number, core) \
70 	[PMC_EV_E500_##id - PMC_EV_E500_FIRST] = \
71 	    { .pe_ev = PMC_EV_E500_##id, .pe_counter_mask = mask, \
72 	      .pe_code = number, .pe_cpu = core }
73 #define PMC_E500MC_ONLY(id, number) \
74 	PMC_E500_EVENT(id, PMC_PPC_MASK_ALL, number, PMC_PPC_E500MC)
75 #define PMC_E500_COMMON(id, number) \
76 	PMC_E500_EVENT(id, PMC_PPC_MASK_ALL, number, PMC_PPC_E500_ANY)
77 
78 static struct e500_event_code_map e500_event_codes[] = {
79 	PMC_E500_COMMON(CYCLES, 1),
80 	PMC_E500_COMMON(INSTR_COMPLETED, 2),
81 	PMC_E500_COMMON(UOPS_COMPLETED, 3),
82 	PMC_E500_COMMON(INSTR_FETCHED, 4),
83 	PMC_E500_COMMON(UOPS_DECODED, 5),
84 	PMC_E500_COMMON(PM_EVENT_TRANSITIONS, 6),
85 	PMC_E500_COMMON(PM_EVENT_CYCLES, 7),
86 	PMC_E500_COMMON(BRANCH_INSTRS_COMPLETED, 8),
87 	PMC_E500_COMMON(LOAD_UOPS_COMPLETED, 9),
88 	PMC_E500_COMMON(STORE_UOPS_COMPLETED, 10),
89 	PMC_E500_COMMON(CQ_REDIRECTS, 11),
90 	PMC_E500_COMMON(BRANCHES_FINISHED, 12),
91 	PMC_E500_COMMON(TAKEN_BRANCHES_FINISHED, 13),
92 	PMC_E500_COMMON(FINISHED_UNCOND_BRANCHES_MISS_BTB, 14),
93 	PMC_E500_COMMON(BRANCH_MISPRED, 15),
94 	PMC_E500_COMMON(BTB_BRANCH_MISPRED_FROM_DIRECTION, 16),
95 	PMC_E500_COMMON(BTB_HITS_PSEUDO_HITS, 17),
96 	PMC_E500_COMMON(CYCLES_DECODE_STALLED, 18),
97 	PMC_E500_COMMON(CYCLES_ISSUE_STALLED, 19),
98 	PMC_E500_COMMON(CYCLES_BRANCH_ISSUE_STALLED, 20),
99 	PMC_E500_COMMON(CYCLES_SU1_SCHED_STALLED, 21),
100 	PMC_E500_COMMON(CYCLES_SU2_SCHED_STALLED, 22),
101 	PMC_E500_COMMON(CYCLES_MU_SCHED_STALLED, 23),
102 	PMC_E500_COMMON(CYCLES_LRU_SCHED_STALLED, 24),
103 	PMC_E500_COMMON(CYCLES_BU_SCHED_STALLED, 25),
104 	PMC_E500_COMMON(TOTAL_TRANSLATED, 26),
105 	PMC_E500_COMMON(LOADS_TRANSLATED, 27),
106 	PMC_E500_COMMON(STORES_TRANSLATED, 28),
107 	PMC_E500_COMMON(TOUCHES_TRANSLATED, 29),
108 	PMC_E500_COMMON(CACHEOPS_TRANSLATED, 30),
109 	PMC_E500_COMMON(CACHE_INHIBITED_ACCESS_TRANSLATED, 31),
110 	PMC_E500_COMMON(GUARDED_LOADS_TRANSLATED, 32),
111 	PMC_E500_COMMON(WRITE_THROUGH_STORES_TRANSLATED, 33),
112 	PMC_E500_COMMON(MISALIGNED_LOAD_STORE_ACCESS_TRANSLATED, 34),
113 	PMC_E500_COMMON(TOTAL_ALLOCATED_TO_DLFB, 35),
114 	PMC_E500_COMMON(LOADS_TRANSLATED_ALLOCATED_TO_DLFB, 36),
115 	PMC_E500_COMMON(STORES_COMPLETED_ALLOCATED_TO_DLFB, 37),
116 	PMC_E500_COMMON(TOUCHES_TRANSLATED_ALLOCATED_TO_DLFB, 38),
117 	PMC_E500_COMMON(STORES_COMPLETED, 39),
118 	PMC_E500_COMMON(DATA_L1_CACHE_LOCKS, 40),
119 	PMC_E500_COMMON(DATA_L1_CACHE_RELOADS, 41),
120 	PMC_E500_COMMON(DATA_L1_CACHE_CASTOUTS, 42),
121 	PMC_E500_COMMON(LOAD_MISS_DLFB_FULL, 43),
122 	PMC_E500_COMMON(LOAD_MISS_LDQ_FULL, 44),
123 	PMC_E500_COMMON(LOAD_GUARDED_MISS, 45),
124 	PMC_E500_COMMON(STORE_TRANSLATE_WHEN_QUEUE_FULL, 46),
125 	PMC_E500_COMMON(ADDRESS_COLLISION, 47),
126 	PMC_E500_COMMON(DATA_MMU_MISS, 48),
127 	PMC_E500_COMMON(DATA_MMU_BUSY, 49),
128 	PMC_E500_COMMON(PART2_MISALIGNED_CACHE_ACCESS, 50),
129 	PMC_E500_COMMON(LOAD_MISS_DLFB_FULL_CYCLES, 51),
130 	PMC_E500_COMMON(LOAD_MISS_LDQ_FULL_CYCLES, 52),
131 	PMC_E500_COMMON(LOAD_GUARDED_MISS_CYCLES, 53),
132 	PMC_E500_COMMON(STORE_TRANSLATE_WHEN_QUEUE_FULL_CYCLES, 54),
133 	PMC_E500_COMMON(ADDRESS_COLLISION_CYCLES, 55),
134 	PMC_E500_COMMON(DATA_MMU_MISS_CYCLES, 56),
135 	PMC_E500_COMMON(DATA_MMU_BUSY_CYCLES, 57),
136 	PMC_E500_COMMON(PART2_MISALIGNED_CACHE_ACCESS_CYCLES, 58),
137 	PMC_E500_COMMON(INSTR_L1_CACHE_LOCKS, 59),
138 	PMC_E500_COMMON(INSTR_L1_CACHE_RELOADS, 60),
139 	PMC_E500_COMMON(INSTR_L1_CACHE_FETCHES, 61),
140 	PMC_E500_COMMON(INSTR_MMU_TLB4K_RELOADS, 62),
141 	PMC_E500_COMMON(INSTR_MMU_VSP_RELOADS, 63),
142 	PMC_E500_COMMON(DATA_MMU_TLB4K_RELOADS, 64),
143 	PMC_E500_COMMON(DATA_MMU_VSP_RELOADS, 65),
144 	PMC_E500_COMMON(L2MMU_MISSES, 66),
145 	PMC_E500_COMMON(BIU_MASTER_REQUESTS, 67),
146 	PMC_E500_COMMON(BIU_MASTER_INSTR_SIDE_REQUESTS, 68),
147 	PMC_E500_COMMON(BIU_MASTER_DATA_SIDE_REQUESTS, 69),
148 	PMC_E500_COMMON(BIU_MASTER_DATA_SIDE_CASTOUT_REQUESTS, 70),
149 	PMC_E500_COMMON(BIU_MASTER_RETRIES, 71),
150 	PMC_E500_COMMON(SNOOP_REQUESTS, 72),
151 	PMC_E500_COMMON(SNOOP_HITS, 73),
152 	PMC_E500_COMMON(SNOOP_PUSHES, 74),
153 	PMC_E500_COMMON(SNOOP_RETRIES, 75),
154 	PMC_E500_EVENT(DLFB_LOAD_MISS_CYCLES, PMC_PPC_MASK0|PMC_PPC_MASK1,
155 	    76, PMC_PPC_E500_ANY),
156 	PMC_E500_EVENT(ILFB_FETCH_MISS_CYCLES, PMC_PPC_MASK0|PMC_PPC_MASK1,
157 	    77, PMC_PPC_E500_ANY),
158 	PMC_E500_EVENT(EXT_INPU_INTR_LATENCY_CYCLES, PMC_PPC_MASK0|PMC_PPC_MASK1,
159 	    78, PMC_PPC_E500_ANY),
160 	PMC_E500_EVENT(CRIT_INPUT_INTR_LATENCY_CYCLES, PMC_PPC_MASK0|PMC_PPC_MASK1,
161 	    79, PMC_PPC_E500_ANY),
162 	PMC_E500_EVENT(EXT_INPUT_INTR_PENDING_LATENCY_CYCLES,
163 	    PMC_PPC_MASK0|PMC_PPC_MASK1, 80, PMC_PPC_E500_ANY),
164 	PMC_E500_EVENT(CRIT_INPUT_INTR_PENDING_LATENCY_CYCLES,
165 	    PMC_PPC_MASK0|PMC_PPC_MASK1, 81, PMC_PPC_E500_ANY),
166 	PMC_E500_COMMON(PMC0_OVERFLOW, 82),
167 	PMC_E500_COMMON(PMC1_OVERFLOW, 83),
168 	PMC_E500_COMMON(PMC2_OVERFLOW, 84),
169 	PMC_E500_COMMON(PMC3_OVERFLOW, 85),
170 	PMC_E500_COMMON(INTERRUPTS_TAKEN, 86),
171 	PMC_E500_COMMON(EXT_INPUT_INTR_TAKEN, 87),
172 	PMC_E500_COMMON(CRIT_INPUT_INTR_TAKEN, 88),
173 	PMC_E500_COMMON(SYSCALL_TRAP_INTR, 89),
174 	PMC_E500_EVENT(TLB_BIT_TRANSITIONS, PMC_PPC_MASK_ALL, 90,
175 	    PMC_PPC_E500V2 | PMC_PPC_E500MC),
176 	PMC_E500MC_ONLY(L2_LINEFILL_BUFFER, 91),
177 	PMC_E500MC_ONLY(LV2_VS, 92),
178 	PMC_E500MC_ONLY(CASTOUTS_RELEASED, 93),
179 	PMC_E500MC_ONLY(INTV_ALLOCATIONS, 94),
180 	PMC_E500MC_ONLY(DLFB_RETRIES_TO_MBAR, 95),
181 	PMC_E500MC_ONLY(STORE_RETRIES, 96),
182 	PMC_E500MC_ONLY(STASH_L1_HITS, 97),
183 	PMC_E500MC_ONLY(STASH_L2_HITS, 98),
184 	PMC_E500MC_ONLY(STASH_BUSY_1, 99),
185 	PMC_E500MC_ONLY(STASH_BUSY_2, 100),
186 	PMC_E500MC_ONLY(STASH_BUSY_3, 101),
187 	PMC_E500MC_ONLY(STASH_HITS, 102),
188 	PMC_E500MC_ONLY(STASH_HIT_DLFB, 103),
189 	PMC_E500MC_ONLY(STASH_REQUESTS, 106),
190 	PMC_E500MC_ONLY(STASH_REQUESTS_L1, 107),
191 	PMC_E500MC_ONLY(STASH_REQUESTS_L2, 108),
192 	PMC_E500MC_ONLY(STALLS_NO_CAQ_OR_COB, 109),
193 	PMC_E500MC_ONLY(L2_CACHE_ACCESSES, 110),
194 	PMC_E500MC_ONLY(L2_HIT_CACHE_ACCESSES, 111),
195 	PMC_E500MC_ONLY(L2_CACHE_DATA_ACCESSES, 112),
196 	PMC_E500MC_ONLY(L2_CACHE_DATA_HITS, 113),
197 	PMC_E500MC_ONLY(L2_CACHE_INSTR_ACCESSES, 114),
198 	PMC_E500MC_ONLY(L2_CACHE_INSTR_HITS, 115),
199 	PMC_E500MC_ONLY(L2_CACHE_ALLOCATIONS, 116),
200 	PMC_E500MC_ONLY(L2_CACHE_DATA_ALLOCATIONS, 117),
201 	PMC_E500MC_ONLY(L2_CACHE_DIRTY_DATA_ALLOCATIONS, 118),
202 	PMC_E500MC_ONLY(L2_CACHE_INSTR_ALLOCATIONS, 119),
203 	PMC_E500MC_ONLY(L2_CACHE_UPDATES, 120),
204 	PMC_E500MC_ONLY(L2_CACHE_CLEAN_UPDATES, 121),
205 	PMC_E500MC_ONLY(L2_CACHE_DIRTY_UPDATES, 122),
206 	PMC_E500MC_ONLY(L2_CACHE_CLEAN_REDUNDANT_UPDATES, 123),
207 	PMC_E500MC_ONLY(L2_CACHE_DIRTY_REDUNDANT_UPDATES, 124),
208 	PMC_E500MC_ONLY(L2_CACHE_LOCKS, 125),
209 	PMC_E500MC_ONLY(L2_CACHE_CASTOUTS, 126),
210 	PMC_E500MC_ONLY(L2_CACHE_DATA_DIRTY_HITS, 127),
211 	PMC_E500MC_ONLY(INSTR_LFB_WENT_HIGH_PRIORITY, 128),
212 	PMC_E500MC_ONLY(SNOOP_THROTTLING_TURNED_ON, 129),
213 	PMC_E500MC_ONLY(L2_CLEAN_LINE_INVALIDATIONS, 130),
214 	PMC_E500MC_ONLY(L2_INCOHERENT_LINE_INVALIDATIONS, 131),
215 	PMC_E500MC_ONLY(L2_COHERENT_LINE_INVALIDATIONS, 132),
216 	PMC_E500MC_ONLY(COHERENT_LOOKUP_MISS_DUE_TO_VALID_BUT_INCOHERENT_MATCHES, 133),
217 	PMC_E500MC_ONLY(IAC1S_DETECTED, 140),
218 	PMC_E500MC_ONLY(IAC2S_DETECTED, 141),
219 	PMC_E500MC_ONLY(DAC1S_DTECTED, 144),
220 	PMC_E500MC_ONLY(DAC2S_DTECTED, 145),
221 	PMC_E500MC_ONLY(DVT0_DETECTED, 148),
222 	PMC_E500MC_ONLY(DVT1_DETECTED, 149),
223 	PMC_E500MC_ONLY(DVT2_DETECTED, 150),
224 	PMC_E500MC_ONLY(DVT3_DETECTED, 151),
225 	PMC_E500MC_ONLY(DVT4_DETECTED, 152),
226 	PMC_E500MC_ONLY(DVT5_DETECTED, 153),
227 	PMC_E500MC_ONLY(DVT6_DETECTED, 154),
228 	PMC_E500MC_ONLY(DVT7_DETECTED, 155),
229 	PMC_E500MC_ONLY(CYCLES_COMPLETION_STALLED_NEXUS_FIFO_FULL, 156),
230 	PMC_E500MC_ONLY(FPU_DOUBLE_PUMP, 160),
231 	PMC_E500MC_ONLY(FPU_FINISH, 161),
232 	PMC_E500MC_ONLY(FPU_DIVIDE_CYCLES, 162),
233 	PMC_E500MC_ONLY(FPU_DENORM_INPUT_CYCLES, 163),
234 	PMC_E500MC_ONLY(FPU_RESULT_STALL_CYCLES, 164),
235 	PMC_E500MC_ONLY(FPU_FPSCR_FULL_STALL, 165),
236 	PMC_E500MC_ONLY(FPU_PIPE_SYNC_STALLS, 166),
237 	PMC_E500MC_ONLY(FPU_INPUT_DATA_STALLS, 167),
238 	PMC_E500MC_ONLY(DECORATED_LOADS, 176),
239 	PMC_E500MC_ONLY(DECORATED_STORES, 177),
240 	PMC_E500MC_ONLY(LOAD_RETRIES, 178),
241 	PMC_E500MC_ONLY(STWCX_SUCCESSES, 179),
242 	PMC_E500MC_ONLY(STWCX_FAILURES, 180),
243 };
244 
245 static pmc_value_t
246 e500_pmcn_read(unsigned int pmc)
247 {
248 	switch (pmc) {
249 		case 0:
250 			return mfpmr(PMR_PMC0);
251 			break;
252 		case 1:
253 			return mfpmr(PMR_PMC1);
254 			break;
255 		case 2:
256 			return mfpmr(PMR_PMC2);
257 			break;
258 		case 3:
259 			return mfpmr(PMR_PMC3);
260 			break;
261 		default:
262 			panic("Invalid PMC number: %d\n", pmc);
263 	}
264 }
265 
266 static void
267 e500_pmcn_write(unsigned int pmc, uint32_t val)
268 {
269 	switch (pmc) {
270 		case 0:
271 			mtpmr(PMR_PMC0, val);
272 			break;
273 		case 1:
274 			mtpmr(PMR_PMC1, val);
275 			break;
276 		case 2:
277 			mtpmr(PMR_PMC2, val);
278 			break;
279 		case 3:
280 			mtpmr(PMR_PMC3, val);
281 			break;
282 		default:
283 			panic("Invalid PMC number: %d\n", pmc);
284 	}
285 }
286 
287 static int
288 e500_read_pmc(int cpu, int ri, pmc_value_t *v)
289 {
290 	struct pmc *pm;
291 	pmc_value_t tmp;
292 
293 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
294 	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
295 	KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
296 	    ("[powerpc,%d] illegal row index %d", __LINE__, ri));
297 
298 	pm  = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
299 	KASSERT(pm,
300 	    ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
301 		ri));
302 
303 	tmp = e500_pmcn_read(ri);
304 	PMCDBG2(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
305 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
306 		*v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
307 	else
308 		*v = tmp;
309 
310 	return 0;
311 }
312 
313 static int
314 e500_write_pmc(int cpu, int ri, pmc_value_t v)
315 {
316 	struct pmc *pm;
317 
318 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
319 	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
320 	KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
321 	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
322 
323 	pm  = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
324 
325 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
326 		v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
327 
328 	PMCDBG3(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
329 
330 	e500_pmcn_write(ri, v);
331 
332 	return 0;
333 }
334 
335 static int
336 e500_config_pmc(int cpu, int ri, struct pmc *pm)
337 {
338 	struct pmc_hw *phw;
339 
340 	PMCDBG3(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
341 
342 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
343 	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
344 	KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
345 	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
346 
347 	phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
348 
349 	KASSERT(pm == NULL || phw->phw_pmc == NULL,
350 	    ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
351 	    __LINE__, pm, phw->phw_pmc));
352 
353 	phw->phw_pmc = pm;
354 
355 	return 0;
356 }
357 
358 static int
359 e500_start_pmc(int cpu, int ri)
360 {
361 	uint32_t config;
362         struct pmc *pm;
363         struct pmc_hw *phw;
364 
365 	phw    = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
366 	pm     = phw->phw_pmc;
367 	config = pm->pm_md.pm_powerpc.pm_powerpc_evsel;
368 
369 	if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
370 		config |= PMLCax_CE;
371 
372 	/* Enable the PMC. */
373 	switch (ri) {
374 	case 0:
375 		mtpmr(PMR_PMLCa0, config);
376 		break;
377 	case 1:
378 		mtpmr(PMR_PMLCa1, config);
379 		break;
380 	case 2:
381 		mtpmr(PMR_PMLCa2, config);
382 		break;
383 	case 3:
384 		mtpmr(PMR_PMLCa3, config);
385 		break;
386 	default:
387 		break;
388 	}
389 
390 	return 0;
391 }
392 
393 static int
394 e500_stop_pmc(int cpu, int ri)
395 {
396         struct pmc *pm;
397         struct pmc_hw *phw;
398         register_t pmc_pmlc;
399 
400 	phw    = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
401 	pm     = phw->phw_pmc;
402 
403 	/*
404 	 * Disable the PMCs.
405 	 */
406 	switch (ri) {
407 	case 0:
408 		pmc_pmlc = mfpmr(PMR_PMLCa0);
409 		pmc_pmlc |= PMLCax_FC;
410 		mtpmr(PMR_PMLCa0, pmc_pmlc);
411 		break;
412 	case 1:
413 		pmc_pmlc = mfpmr(PMR_PMLCa1);
414 		pmc_pmlc |= PMLCax_FC;
415 		mtpmr(PMR_PMLCa1, pmc_pmlc);
416 		break;
417 	case 2:
418 		pmc_pmlc = mfpmr(PMR_PMLCa2);
419 		pmc_pmlc |= PMLCax_FC;
420 		mtpmr(PMR_PMLCa2, pmc_pmlc);
421 		break;
422 	case 3:
423 		pmc_pmlc = mfpmr(PMR_PMLCa3);
424 		pmc_pmlc |= PMLCax_FC;
425 		mtpmr(PMR_PMLCa3, pmc_pmlc);
426 		break;
427 	default:
428 		break;
429 	}
430 	return 0;
431 }
432 
433 static int
434 e500_pcpu_init(struct pmc_mdep *md, int cpu)
435 {
436 	int first_ri, i;
437 	struct pmc_cpu *pc;
438 	struct powerpc_cpu *pac;
439 	struct pmc_hw  *phw;
440 
441 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
442 	    ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
443 	PMCDBG1(MDP,INI,1,"powerpc-init cpu=%d", cpu);
444 
445 	/* Freeze all counters. */
446 	mtpmr(PMR_PMGC0, PMGC_FAC | PMGC_PMIE | PMGC_FCECE);
447 
448 	powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
449 	    M_WAITOK|M_ZERO);
450 	pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * E500_MAX_PMCS,
451 	    M_PMC, M_WAITOK|M_ZERO);
452 	pac->pc_class = PMC_CLASS_E500;
453 	pc = pmc_pcpu[cpu];
454 	first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC].pcd_ri;
455 	KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
456 
457 	for (i = 0, phw = pac->pc_ppcpmcs; i < E500_MAX_PMCS; i++, phw++) {
458 		phw->phw_state    = PMC_PHW_FLAG_IS_ENABLED |
459 		    PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
460 		phw->phw_pmc      = NULL;
461 		pc->pc_hwpmcs[i + first_ri] = phw;
462 
463 		/* Initialize the PMC to stopped */
464 		e500_stop_pmc(cpu, i);
465 	}
466 	/* Unfreeze global register. */
467 	mtpmr(PMR_PMGC0, PMGC_PMIE | PMGC_FCECE);
468 
469 	return 0;
470 }
471 
472 static int
473 e500_pcpu_fini(struct pmc_mdep *md, int cpu)
474 {
475 	uint32_t pmgc0 = mfpmr(PMR_PMGC0);
476 
477 	pmgc0 |= PMGC_FAC;
478 	mtpmr(PMR_PMGC0, pmgc0);
479 	mtmsr(mfmsr() & ~PSL_PMM);
480 
481 	free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
482 	free(powerpc_pcpu[cpu], M_PMC);
483 
484 	return 0;
485 }
486 
487 static int
488 e500_allocate_pmc(int cpu, int ri, struct pmc *pm,
489   const struct pmc_op_pmcallocate *a)
490 {
491 	enum pmc_event pe;
492 	uint32_t caps, config, counter;
493 	struct e500_event_code_map *ev;
494 	uint16_t vers;
495 	uint8_t pe_cpu_mask;
496 
497 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
498 	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
499 	KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
500 	    ("[powerpc,%d] illegal row index %d", __LINE__, ri));
501 
502 	caps = a->pm_caps;
503 
504 	pe = a->pm_ev;
505 	config = PMLCax_FCS | PMLCax_FCU |
506 	    PMLCax_FCM1 | PMLCax_FCM1;
507 
508 	if (pe < PMC_EV_E500_FIRST || pe > PMC_EV_E500_LAST)
509 		return (EINVAL);
510 
511 	ev = &e500_event_codes[pe-PMC_EV_E500_FIRST];
512 	if (ev->pe_code == 0)
513 		return (EINVAL);
514 
515 	vers = mfpvr() >> 16;
516 	switch (vers) {
517 	case FSL_E500v1:
518 		pe_cpu_mask = ev->pe_cpu & PMC_PPC_E500V1;
519 		break;
520 	case FSL_E500v2:
521 		pe_cpu_mask = ev->pe_cpu & PMC_PPC_E500V2;
522 		break;
523 	case FSL_E500mc:
524 	case FSL_E5500:
525 		pe_cpu_mask = ev->pe_cpu & PMC_PPC_E500MC;
526 		break;
527 	}
528 	if (pe_cpu_mask == 0)
529 		return (EINVAL);
530 
531 	config |= PMLCax_EVENT(ev->pe_code);
532 	counter =  ev->pe_counter_mask;
533 	if ((counter & (1 << ri)) == 0)
534 		return (EINVAL);
535 
536 	if (caps & PMC_CAP_SYSTEM)
537 		config &= ~PMLCax_FCS;
538 	if (caps & PMC_CAP_USER)
539 		config &= ~PMLCax_FCU;
540 	if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
541 		config &= ~(PMLCax_FCS|PMLCax_FCU);
542 
543 	pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
544 
545 	PMCDBG2(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
546 
547 	return 0;
548 }
549 
550 static int
551 e500_release_pmc(int cpu, int ri, struct pmc *pmc)
552 {
553 	struct pmc_hw *phw;
554 
555 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
556 	    ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
557 	KASSERT(ri >= 0 && ri < E500_MAX_PMCS,
558 	    ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
559 
560 	phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
561 	KASSERT(phw->phw_pmc == NULL,
562 	    ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
563 
564 	return 0;
565 }
566 
567 static int
568 e500_intr(struct trapframe *tf)
569 {
570 	int i, error, retval, cpu;
571 	uint32_t config;
572 	struct pmc *pm;
573 	struct powerpc_cpu *pac;
574 
575 	cpu = curcpu;
576 	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
577 	    ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
578 
579 	PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
580 	    TRAPF_USERMODE(tf));
581 
582 	retval = 0;
583 
584 	pac = powerpc_pcpu[cpu];
585 
586 	config  = mfpmr(PMR_PMGC0) & ~PMGC_FAC;
587 
588 	/*
589 	 * look for all PMCs that have interrupted:
590 	 * - look for a running, sampling PMC which has overflowed
591 	 *   and which has a valid 'struct pmc' association
592 	 *
593 	 * If found, we call a helper to process the interrupt.
594 	 */
595 
596 	for (i = 0; i < E500_MAX_PMCS; i++) {
597 		if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
598 		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
599 			continue;
600 		}
601 
602 		if (!E500_PMC_HAS_OVERFLOWED(i))
603 			continue;
604 
605 		retval = 1;	/* Found an interrupting PMC. */
606 
607 		if (pm->pm_state != PMC_STATE_RUNNING)
608 			continue;
609 
610 		/* Stop the counter if logging fails. */
611 		error = pmc_process_interrupt(PMC_HR, pm, tf);
612 		if (error != 0)
613 			e500_stop_pmc(cpu, i);
614 
615 		/* reload count. */
616 		e500_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
617 	}
618 
619 	if (retval)
620 		counter_u64_add(pmc_stats.pm_intr_processed, 1);
621 	else
622 		counter_u64_add(pmc_stats.pm_intr_ignored, 1);
623 
624 	/* Re-enable PERF exceptions. */
625 	if (retval)
626 		mtpmr(PMR_PMGC0, config | PMGC_PMIE);
627 
628 	return (retval);
629 }
630 
631 int
632 pmc_e500_initialize(struct pmc_mdep *pmc_mdep)
633 {
634 	struct pmc_classdep *pcd;
635 
636 	pmc_mdep->pmd_cputype = PMC_CPU_PPC_E500;
637 
638 	pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_POWERPC];
639 	pcd->pcd_caps  = POWERPC_PMC_CAPS;
640 	pcd->pcd_class = PMC_CLASS_E500;
641 	pcd->pcd_num   = E500_MAX_PMCS;
642 	pcd->pcd_ri    = pmc_mdep->pmd_npmc;
643 	pcd->pcd_width = 32;
644 
645 	pcd->pcd_allocate_pmc   = e500_allocate_pmc;
646 	pcd->pcd_config_pmc     = e500_config_pmc;
647 	pcd->pcd_pcpu_fini      = e500_pcpu_fini;
648 	pcd->pcd_pcpu_init      = e500_pcpu_init;
649 	pcd->pcd_describe       = powerpc_describe;
650 	pcd->pcd_get_config     = powerpc_get_config;
651 	pcd->pcd_read_pmc       = e500_read_pmc;
652 	pcd->pcd_release_pmc    = e500_release_pmc;
653 	pcd->pcd_start_pmc      = e500_start_pmc;
654 	pcd->pcd_stop_pmc       = e500_stop_pmc;
655  	pcd->pcd_write_pmc      = e500_write_pmc;
656 
657 	pmc_mdep->pmd_npmc   += E500_MAX_PMCS;
658 	pmc_mdep->pmd_intr   =  e500_intr;
659 
660 	return (0);
661 }
662