xref: /linux/arch/mips/kernel/perf_event_mipsxx.c (revision 5f2d44591fb374ae346a3df682d722b68552adc2)
1 /*
2  * Linux performance counter support for MIPS.
3  *
4  * Copyright (C) 2010 MIPS Technologies, Inc.
5  * Copyright (C) 2011 Cavium Networks, Inc.
6  * Author: Deng-Cheng Zhu
7  *
8  * This code is based on the implementation for ARM, which is in turn
9  * based on the sparc64 perf event code and the x86 code. Performance
10  * counter access is based on the MIPS Oprofile code. And the callchain
11  * support references the code of MIPS stacktrace.c.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
24 
25 #include <asm/irq.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
29 
30 #define MIPS_MAX_HWEVENTS 4
31 #define MIPS_TCS_PER_COUNTER 2
32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
33 
34 struct cpu_hw_events {
35 	/* Array of events on this cpu. */
36 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
37 
38 	/*
39 	 * Set the bit (indexed by the counter number) when the counter
40 	 * is used for an event.
41 	 */
42 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43 
44 	/*
45 	 * Software copy of the control register for each performance counter.
46 	 * MIPS CPUs vary in performance counters. They use this differently,
47 	 * and even may not use it.
48 	 */
49 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
50 };
51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 	.saved_ctrl = {0},
53 };
54 
55 /* The description of MIPS performance events. */
56 struct mips_perf_event {
57 	unsigned int event_id;
58 	/*
59 	 * MIPS performance counters are indexed starting from 0.
60 	 * CNTR_EVEN indicates the indexes of the counters to be used are
61 	 * even numbers.
62 	 */
63 	unsigned int cntr_mask;
64 	#define CNTR_EVEN	0x55555555
65 	#define CNTR_ODD	0xaaaaaaaa
66 	#define CNTR_ALL	0xffffffff
67 #ifdef CONFIG_MIPS_MT_SMP
68 	enum {
69 		T  = 0,
70 		V  = 1,
71 		P  = 2,
72 	} range;
73 #else
74 	#define T
75 	#define V
76 	#define P
77 #endif
78 };
79 
80 static struct mips_perf_event raw_event;
81 static DEFINE_MUTEX(raw_event_mutex);
82 
83 #define C(x) PERF_COUNT_HW_CACHE_##x
84 
85 struct mips_pmu {
86 	u64		max_period;
87 	u64		valid_count;
88 	u64		overflow;
89 	const char	*name;
90 	int		irq;
91 	u64		(*read_counter)(unsigned int idx);
92 	void		(*write_counter)(unsigned int idx, u64 val);
93 	const struct mips_perf_event *(*map_raw_event)(u64 config);
94 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 	const struct mips_perf_event (*cache_event_map)
96 				[PERF_COUNT_HW_CACHE_MAX]
97 				[PERF_COUNT_HW_CACHE_OP_MAX]
98 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
99 	unsigned int	num_counters;
100 };
101 
102 static struct mips_pmu mipspmu;
103 
104 #define M_CONFIG1_PC	(1 << 4)
105 
106 #define M_PERFCTL_EXL			(1	<<  0)
107 #define M_PERFCTL_KERNEL		(1	<<  1)
108 #define M_PERFCTL_SUPERVISOR		(1	<<  2)
109 #define M_PERFCTL_USER			(1	<<  3)
110 #define M_PERFCTL_INTERRUPT_ENABLE	(1	<<  4)
111 #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
112 #define M_PERFCTL_VPEID(vpe)		((vpe)	  << 16)
113 
114 #ifdef CONFIG_CPU_BMIPS5000
115 #define M_PERFCTL_MT_EN(filter)		0
116 #else /* !CONFIG_CPU_BMIPS5000 */
117 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
118 #endif /* CONFIG_CPU_BMIPS5000 */
119 
120 #define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
121 #define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
122 #define	   M_TC_EN_TC			M_PERFCTL_MT_EN(2)
123 #define M_PERFCTL_TCID(tcid)		((tcid)	  << 22)
124 #define M_PERFCTL_WIDE			(1	<< 30)
125 #define M_PERFCTL_MORE			(1	<< 31)
126 #define M_PERFCTL_TC			(1	<< 30)
127 
128 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
129 					M_PERFCTL_KERNEL |		\
130 					M_PERFCTL_USER |		\
131 					M_PERFCTL_SUPERVISOR |		\
132 					M_PERFCTL_INTERRUPT_ENABLE)
133 
134 #ifdef CONFIG_MIPS_MT_SMP
135 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
136 #else
137 #define M_PERFCTL_CONFIG_MASK		0x1f
138 #endif
139 #define M_PERFCTL_EVENT_MASK		0xfe0
140 
141 
142 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
143 static int cpu_has_mipsmt_pertccounters;
144 
145 static DEFINE_RWLOCK(pmuint_rwlock);
146 
147 #if defined(CONFIG_CPU_BMIPS5000)
148 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
149 			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
150 #else
151 /*
152  * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
153  * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
154  */
155 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
156 			 0 : smp_processor_id())
157 #endif
158 
159 /* Copied from op_model_mipsxx.c */
160 static unsigned int vpe_shift(void)
161 {
162 	if (num_possible_cpus() > 1)
163 		return 1;
164 
165 	return 0;
166 }
167 
168 static unsigned int counters_total_to_per_cpu(unsigned int counters)
169 {
170 	return counters >> vpe_shift();
171 }
172 
173 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
174 #define vpe_id()	0
175 
176 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
177 
178 static void resume_local_counters(void);
179 static void pause_local_counters(void);
180 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
181 static int mipsxx_pmu_handle_shared_irq(void);
182 
183 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
184 {
185 	if (vpe_id() == 1)
186 		idx = (idx + 2) & 3;
187 	return idx;
188 }
189 
190 static u64 mipsxx_pmu_read_counter(unsigned int idx)
191 {
192 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
193 
194 	switch (idx) {
195 	case 0:
196 		/*
197 		 * The counters are unsigned, we must cast to truncate
198 		 * off the high bits.
199 		 */
200 		return (u32)read_c0_perfcntr0();
201 	case 1:
202 		return (u32)read_c0_perfcntr1();
203 	case 2:
204 		return (u32)read_c0_perfcntr2();
205 	case 3:
206 		return (u32)read_c0_perfcntr3();
207 	default:
208 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
209 		return 0;
210 	}
211 }
212 
213 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
214 {
215 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
216 
217 	switch (idx) {
218 	case 0:
219 		return read_c0_perfcntr0_64();
220 	case 1:
221 		return read_c0_perfcntr1_64();
222 	case 2:
223 		return read_c0_perfcntr2_64();
224 	case 3:
225 		return read_c0_perfcntr3_64();
226 	default:
227 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
228 		return 0;
229 	}
230 }
231 
232 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
233 {
234 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
235 
236 	switch (idx) {
237 	case 0:
238 		write_c0_perfcntr0(val);
239 		return;
240 	case 1:
241 		write_c0_perfcntr1(val);
242 		return;
243 	case 2:
244 		write_c0_perfcntr2(val);
245 		return;
246 	case 3:
247 		write_c0_perfcntr3(val);
248 		return;
249 	}
250 }
251 
252 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
253 {
254 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
255 
256 	switch (idx) {
257 	case 0:
258 		write_c0_perfcntr0_64(val);
259 		return;
260 	case 1:
261 		write_c0_perfcntr1_64(val);
262 		return;
263 	case 2:
264 		write_c0_perfcntr2_64(val);
265 		return;
266 	case 3:
267 		write_c0_perfcntr3_64(val);
268 		return;
269 	}
270 }
271 
272 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
273 {
274 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
275 
276 	switch (idx) {
277 	case 0:
278 		return read_c0_perfctrl0();
279 	case 1:
280 		return read_c0_perfctrl1();
281 	case 2:
282 		return read_c0_perfctrl2();
283 	case 3:
284 		return read_c0_perfctrl3();
285 	default:
286 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
287 		return 0;
288 	}
289 }
290 
291 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
292 {
293 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
294 
295 	switch (idx) {
296 	case 0:
297 		write_c0_perfctrl0(val);
298 		return;
299 	case 1:
300 		write_c0_perfctrl1(val);
301 		return;
302 	case 2:
303 		write_c0_perfctrl2(val);
304 		return;
305 	case 3:
306 		write_c0_perfctrl3(val);
307 		return;
308 	}
309 }
310 
311 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
312 				    struct hw_perf_event *hwc)
313 {
314 	int i;
315 
316 	/*
317 	 * We only need to care the counter mask. The range has been
318 	 * checked definitely.
319 	 */
320 	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
321 
322 	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
323 		/*
324 		 * Note that some MIPS perf events can be counted by both
325 		 * even and odd counters, wheresas many other are only by
326 		 * even _or_ odd counters. This introduces an issue that
327 		 * when the former kind of event takes the counter the
328 		 * latter kind of event wants to use, then the "counter
329 		 * allocation" for the latter event will fail. In fact if
330 		 * they can be dynamically swapped, they both feel happy.
331 		 * But here we leave this issue alone for now.
332 		 */
333 		if (test_bit(i, &cntr_mask) &&
334 			!test_and_set_bit(i, cpuc->used_mask))
335 			return i;
336 	}
337 
338 	return -EAGAIN;
339 }
340 
341 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
342 {
343 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
344 
345 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
346 
347 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
348 		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
349 		/* Make sure interrupt enabled. */
350 		M_PERFCTL_INTERRUPT_ENABLE;
351 	if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
352 		/* enable the counter for the calling thread */
353 		cpuc->saved_ctrl[idx] |=
354 			(1 << (12 + vpe_id())) | M_PERFCTL_TC;
355 
356 	/*
357 	 * We do not actually let the counter run. Leave it until start().
358 	 */
359 }
360 
361 static void mipsxx_pmu_disable_event(int idx)
362 {
363 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
364 	unsigned long flags;
365 
366 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
367 
368 	local_irq_save(flags);
369 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
370 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
371 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
372 	local_irq_restore(flags);
373 }
374 
375 static int mipspmu_event_set_period(struct perf_event *event,
376 				    struct hw_perf_event *hwc,
377 				    int idx)
378 {
379 	u64 left = local64_read(&hwc->period_left);
380 	u64 period = hwc->sample_period;
381 	int ret = 0;
382 
383 	if (unlikely((left + period) & (1ULL << 63))) {
384 		/* left underflowed by more than period. */
385 		left = period;
386 		local64_set(&hwc->period_left, left);
387 		hwc->last_period = period;
388 		ret = 1;
389 	} else	if (unlikely((left + period) <= period)) {
390 		/* left underflowed by less than period. */
391 		left += period;
392 		local64_set(&hwc->period_left, left);
393 		hwc->last_period = period;
394 		ret = 1;
395 	}
396 
397 	if (left > mipspmu.max_period) {
398 		left = mipspmu.max_period;
399 		local64_set(&hwc->period_left, left);
400 	}
401 
402 	local64_set(&hwc->prev_count, mipspmu.overflow - left);
403 
404 	mipspmu.write_counter(idx, mipspmu.overflow - left);
405 
406 	perf_event_update_userpage(event);
407 
408 	return ret;
409 }
410 
411 static void mipspmu_event_update(struct perf_event *event,
412 				 struct hw_perf_event *hwc,
413 				 int idx)
414 {
415 	u64 prev_raw_count, new_raw_count;
416 	u64 delta;
417 
418 again:
419 	prev_raw_count = local64_read(&hwc->prev_count);
420 	new_raw_count = mipspmu.read_counter(idx);
421 
422 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
423 				new_raw_count) != prev_raw_count)
424 		goto again;
425 
426 	delta = new_raw_count - prev_raw_count;
427 
428 	local64_add(delta, &event->count);
429 	local64_sub(delta, &hwc->period_left);
430 }
431 
432 static void mipspmu_start(struct perf_event *event, int flags)
433 {
434 	struct hw_perf_event *hwc = &event->hw;
435 
436 	if (flags & PERF_EF_RELOAD)
437 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
438 
439 	hwc->state = 0;
440 
441 	/* Set the period for the event. */
442 	mipspmu_event_set_period(event, hwc, hwc->idx);
443 
444 	/* Enable the event. */
445 	mipsxx_pmu_enable_event(hwc, hwc->idx);
446 }
447 
448 static void mipspmu_stop(struct perf_event *event, int flags)
449 {
450 	struct hw_perf_event *hwc = &event->hw;
451 
452 	if (!(hwc->state & PERF_HES_STOPPED)) {
453 		/* We are working on a local event. */
454 		mipsxx_pmu_disable_event(hwc->idx);
455 		barrier();
456 		mipspmu_event_update(event, hwc, hwc->idx);
457 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
458 	}
459 }
460 
461 static int mipspmu_add(struct perf_event *event, int flags)
462 {
463 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
464 	struct hw_perf_event *hwc = &event->hw;
465 	int idx;
466 	int err = 0;
467 
468 	perf_pmu_disable(event->pmu);
469 
470 	/* To look for a free counter for this event. */
471 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
472 	if (idx < 0) {
473 		err = idx;
474 		goto out;
475 	}
476 
477 	/*
478 	 * If there is an event in the counter we are going to use then
479 	 * make sure it is disabled.
480 	 */
481 	event->hw.idx = idx;
482 	mipsxx_pmu_disable_event(idx);
483 	cpuc->events[idx] = event;
484 
485 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
486 	if (flags & PERF_EF_START)
487 		mipspmu_start(event, PERF_EF_RELOAD);
488 
489 	/* Propagate our changes to the userspace mapping. */
490 	perf_event_update_userpage(event);
491 
492 out:
493 	perf_pmu_enable(event->pmu);
494 	return err;
495 }
496 
497 static void mipspmu_del(struct perf_event *event, int flags)
498 {
499 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
500 	struct hw_perf_event *hwc = &event->hw;
501 	int idx = hwc->idx;
502 
503 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
504 
505 	mipspmu_stop(event, PERF_EF_UPDATE);
506 	cpuc->events[idx] = NULL;
507 	clear_bit(idx, cpuc->used_mask);
508 
509 	perf_event_update_userpage(event);
510 }
511 
512 static void mipspmu_read(struct perf_event *event)
513 {
514 	struct hw_perf_event *hwc = &event->hw;
515 
516 	/* Don't read disabled counters! */
517 	if (hwc->idx < 0)
518 		return;
519 
520 	mipspmu_event_update(event, hwc, hwc->idx);
521 }
522 
523 static void mipspmu_enable(struct pmu *pmu)
524 {
525 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
526 	write_unlock(&pmuint_rwlock);
527 #endif
528 	resume_local_counters();
529 }
530 
531 /*
532  * MIPS performance counters can be per-TC. The control registers can
533  * not be directly accessed accross CPUs. Hence if we want to do global
534  * control, we need cross CPU calls. on_each_cpu() can help us, but we
535  * can not make sure this function is called with interrupts enabled. So
536  * here we pause local counters and then grab a rwlock and leave the
537  * counters on other CPUs alone. If any counter interrupt raises while
538  * we own the write lock, simply pause local counters on that CPU and
539  * spin in the handler. Also we know we won't be switched to another
540  * CPU after pausing local counters and before grabbing the lock.
541  */
542 static void mipspmu_disable(struct pmu *pmu)
543 {
544 	pause_local_counters();
545 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
546 	write_lock(&pmuint_rwlock);
547 #endif
548 }
549 
550 static atomic_t active_events = ATOMIC_INIT(0);
551 static DEFINE_MUTEX(pmu_reserve_mutex);
552 static int (*save_perf_irq)(void);
553 
554 static int mipspmu_get_irq(void)
555 {
556 	int err;
557 
558 	if (mipspmu.irq >= 0) {
559 		/* Request my own irq handler. */
560 		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
561 				  IRQF_PERCPU | IRQF_NOBALANCING |
562 				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
563 				  IRQF_SHARED,
564 				  "mips_perf_pmu", &mipspmu);
565 		if (err) {
566 			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
567 				mipspmu.irq);
568 		}
569 	} else if (cp0_perfcount_irq < 0) {
570 		/*
571 		 * We are sharing the irq number with the timer interrupt.
572 		 */
573 		save_perf_irq = perf_irq;
574 		perf_irq = mipsxx_pmu_handle_shared_irq;
575 		err = 0;
576 	} else {
577 		pr_warn("The platform hasn't properly defined its interrupt controller\n");
578 		err = -ENOENT;
579 	}
580 
581 	return err;
582 }
583 
584 static void mipspmu_free_irq(void)
585 {
586 	if (mipspmu.irq >= 0)
587 		free_irq(mipspmu.irq, &mipspmu);
588 	else if (cp0_perfcount_irq < 0)
589 		perf_irq = save_perf_irq;
590 }
591 
592 /*
593  * mipsxx/rm9000/loongson2 have different performance counters, they have
594  * specific low-level init routines.
595  */
596 static void reset_counters(void *arg);
597 static int __hw_perf_event_init(struct perf_event *event);
598 
599 static void hw_perf_event_destroy(struct perf_event *event)
600 {
601 	if (atomic_dec_and_mutex_lock(&active_events,
602 				&pmu_reserve_mutex)) {
603 		/*
604 		 * We must not call the destroy function with interrupts
605 		 * disabled.
606 		 */
607 		on_each_cpu(reset_counters,
608 			(void *)(long)mipspmu.num_counters, 1);
609 		mipspmu_free_irq();
610 		mutex_unlock(&pmu_reserve_mutex);
611 	}
612 }
613 
614 static int mipspmu_event_init(struct perf_event *event)
615 {
616 	int err = 0;
617 
618 	/* does not support taken branch sampling */
619 	if (has_branch_stack(event))
620 		return -EOPNOTSUPP;
621 
622 	switch (event->attr.type) {
623 	case PERF_TYPE_RAW:
624 	case PERF_TYPE_HARDWARE:
625 	case PERF_TYPE_HW_CACHE:
626 		break;
627 
628 	default:
629 		return -ENOENT;
630 	}
631 
632 	if (event->cpu >= nr_cpumask_bits ||
633 	    (event->cpu >= 0 && !cpu_online(event->cpu)))
634 		return -ENODEV;
635 
636 	if (!atomic_inc_not_zero(&active_events)) {
637 		mutex_lock(&pmu_reserve_mutex);
638 		if (atomic_read(&active_events) == 0)
639 			err = mipspmu_get_irq();
640 
641 		if (!err)
642 			atomic_inc(&active_events);
643 		mutex_unlock(&pmu_reserve_mutex);
644 	}
645 
646 	if (err)
647 		return err;
648 
649 	return __hw_perf_event_init(event);
650 }
651 
652 static struct pmu pmu = {
653 	.pmu_enable	= mipspmu_enable,
654 	.pmu_disable	= mipspmu_disable,
655 	.event_init	= mipspmu_event_init,
656 	.add		= mipspmu_add,
657 	.del		= mipspmu_del,
658 	.start		= mipspmu_start,
659 	.stop		= mipspmu_stop,
660 	.read		= mipspmu_read,
661 };
662 
663 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
664 {
665 /*
666  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
667  * event_id.
668  */
669 #ifdef CONFIG_MIPS_MT_SMP
670 	return ((unsigned int)pev->range << 24) |
671 		(pev->cntr_mask & 0xffff00) |
672 		(pev->event_id & 0xff);
673 #else
674 	return (pev->cntr_mask & 0xffff00) |
675 		(pev->event_id & 0xff);
676 #endif
677 }
678 
679 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
680 {
681 
682 	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
683 		return ERR_PTR(-EOPNOTSUPP);
684 	return &(*mipspmu.general_event_map)[idx];
685 }
686 
687 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
688 {
689 	unsigned int cache_type, cache_op, cache_result;
690 	const struct mips_perf_event *pev;
691 
692 	cache_type = (config >> 0) & 0xff;
693 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
694 		return ERR_PTR(-EINVAL);
695 
696 	cache_op = (config >> 8) & 0xff;
697 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
698 		return ERR_PTR(-EINVAL);
699 
700 	cache_result = (config >> 16) & 0xff;
701 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
702 		return ERR_PTR(-EINVAL);
703 
704 	pev = &((*mipspmu.cache_event_map)
705 					[cache_type]
706 					[cache_op]
707 					[cache_result]);
708 
709 	if (pev->cntr_mask == 0)
710 		return ERR_PTR(-EOPNOTSUPP);
711 
712 	return pev;
713 
714 }
715 
716 static int validate_group(struct perf_event *event)
717 {
718 	struct perf_event *sibling, *leader = event->group_leader;
719 	struct cpu_hw_events fake_cpuc;
720 
721 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
722 
723 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
724 		return -EINVAL;
725 
726 	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
727 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
728 			return -EINVAL;
729 	}
730 
731 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
732 		return -EINVAL;
733 
734 	return 0;
735 }
736 
737 /* This is needed by specific irq handlers in perf_event_*.c */
738 static void handle_associated_event(struct cpu_hw_events *cpuc,
739 				    int idx, struct perf_sample_data *data,
740 				    struct pt_regs *regs)
741 {
742 	struct perf_event *event = cpuc->events[idx];
743 	struct hw_perf_event *hwc = &event->hw;
744 
745 	mipspmu_event_update(event, hwc, idx);
746 	data->period = event->hw.last_period;
747 	if (!mipspmu_event_set_period(event, hwc, idx))
748 		return;
749 
750 	if (perf_event_overflow(event, data, regs))
751 		mipsxx_pmu_disable_event(idx);
752 }
753 
754 
755 static int __n_counters(void)
756 {
757 	if (!(read_c0_config1() & M_CONFIG1_PC))
758 		return 0;
759 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
760 		return 1;
761 	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
762 		return 2;
763 	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
764 		return 3;
765 
766 	return 4;
767 }
768 
769 static int n_counters(void)
770 {
771 	int counters;
772 
773 	switch (current_cpu_type()) {
774 	case CPU_R10000:
775 		counters = 2;
776 		break;
777 
778 	case CPU_R12000:
779 	case CPU_R14000:
780 		counters = 4;
781 		break;
782 
783 	default:
784 		counters = __n_counters();
785 	}
786 
787 	return counters;
788 }
789 
790 static void reset_counters(void *arg)
791 {
792 	int counters = (int)(long)arg;
793 	switch (counters) {
794 	case 4:
795 		mipsxx_pmu_write_control(3, 0);
796 		mipspmu.write_counter(3, 0);
797 	case 3:
798 		mipsxx_pmu_write_control(2, 0);
799 		mipspmu.write_counter(2, 0);
800 	case 2:
801 		mipsxx_pmu_write_control(1, 0);
802 		mipspmu.write_counter(1, 0);
803 	case 1:
804 		mipsxx_pmu_write_control(0, 0);
805 		mipspmu.write_counter(0, 0);
806 	}
807 }
808 
809 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
810 static const struct mips_perf_event mipsxxcore_event_map
811 				[PERF_COUNT_HW_MAX] = {
812 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
813 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
814 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
815 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
816 };
817 
818 /* 74K/proAptiv core has different branch event code. */
819 static const struct mips_perf_event mipsxxcore_event_map2
820 				[PERF_COUNT_HW_MAX] = {
821 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
822 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
823 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
824 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
825 };
826 
827 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
828 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
829 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
830 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
831 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
832 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
833 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
834 	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
835 };
836 
837 static const struct mips_perf_event bmips5000_event_map
838 				[PERF_COUNT_HW_MAX] = {
839 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
840 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
841 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
842 };
843 
844 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
845 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
846 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
847 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
848 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
849 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
850 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
851 };
852 
853 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
854 static const struct mips_perf_event mipsxxcore_cache_map
855 				[PERF_COUNT_HW_CACHE_MAX]
856 				[PERF_COUNT_HW_CACHE_OP_MAX]
857 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
858 [C(L1D)] = {
859 	/*
860 	 * Like some other architectures (e.g. ARM), the performance
861 	 * counters don't differentiate between read and write
862 	 * accesses/misses, so this isn't strictly correct, but it's the
863 	 * best we can do. Writes and reads get combined.
864 	 */
865 	[C(OP_READ)] = {
866 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
867 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
868 	},
869 	[C(OP_WRITE)] = {
870 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
871 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
872 	},
873 },
874 [C(L1I)] = {
875 	[C(OP_READ)] = {
876 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
877 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
878 	},
879 	[C(OP_WRITE)] = {
880 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
881 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
882 	},
883 	[C(OP_PREFETCH)] = {
884 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
885 		/*
886 		 * Note that MIPS has only "hit" events countable for
887 		 * the prefetch operation.
888 		 */
889 	},
890 },
891 [C(LL)] = {
892 	[C(OP_READ)] = {
893 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
894 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
895 	},
896 	[C(OP_WRITE)] = {
897 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
898 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
899 	},
900 },
901 [C(DTLB)] = {
902 	[C(OP_READ)] = {
903 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
904 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
905 	},
906 	[C(OP_WRITE)] = {
907 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
908 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
909 	},
910 },
911 [C(ITLB)] = {
912 	[C(OP_READ)] = {
913 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
914 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
915 	},
916 	[C(OP_WRITE)] = {
917 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
918 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
919 	},
920 },
921 [C(BPU)] = {
922 	/* Using the same code for *HW_BRANCH* */
923 	[C(OP_READ)] = {
924 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
925 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
926 	},
927 	[C(OP_WRITE)] = {
928 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
929 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
930 	},
931 },
932 };
933 
934 /* 74K/proAptiv core has completely different cache event map. */
935 static const struct mips_perf_event mipsxxcore_cache_map2
936 				[PERF_COUNT_HW_CACHE_MAX]
937 				[PERF_COUNT_HW_CACHE_OP_MAX]
938 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
939 [C(L1D)] = {
940 	/*
941 	 * Like some other architectures (e.g. ARM), the performance
942 	 * counters don't differentiate between read and write
943 	 * accesses/misses, so this isn't strictly correct, but it's the
944 	 * best we can do. Writes and reads get combined.
945 	 */
946 	[C(OP_READ)] = {
947 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
948 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
949 	},
950 	[C(OP_WRITE)] = {
951 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
952 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
953 	},
954 },
955 [C(L1I)] = {
956 	[C(OP_READ)] = {
957 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
958 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
959 	},
960 	[C(OP_WRITE)] = {
961 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
962 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
963 	},
964 	[C(OP_PREFETCH)] = {
965 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
966 		/*
967 		 * Note that MIPS has only "hit" events countable for
968 		 * the prefetch operation.
969 		 */
970 	},
971 },
972 [C(LL)] = {
973 	[C(OP_READ)] = {
974 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
975 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
976 	},
977 	[C(OP_WRITE)] = {
978 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
979 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
980 	},
981 },
982 /*
983  * 74K core does not have specific DTLB events. proAptiv core has
984  * "speculative" DTLB events which are numbered 0x63 (even/odd) and
985  * not included here. One can use raw events if really needed.
986  */
987 [C(ITLB)] = {
988 	[C(OP_READ)] = {
989 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
990 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
991 	},
992 	[C(OP_WRITE)] = {
993 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
994 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
995 	},
996 },
997 [C(BPU)] = {
998 	/* Using the same code for *HW_BRANCH* */
999 	[C(OP_READ)] = {
1000 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1001 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1002 	},
1003 	[C(OP_WRITE)] = {
1004 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1005 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1006 	},
1007 },
1008 };
1009 
1010 /* BMIPS5000 */
1011 static const struct mips_perf_event bmips5000_cache_map
1012 				[PERF_COUNT_HW_CACHE_MAX]
1013 				[PERF_COUNT_HW_CACHE_OP_MAX]
1014 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1015 [C(L1D)] = {
1016 	/*
1017 	 * Like some other architectures (e.g. ARM), the performance
1018 	 * counters don't differentiate between read and write
1019 	 * accesses/misses, so this isn't strictly correct, but it's the
1020 	 * best we can do. Writes and reads get combined.
1021 	 */
1022 	[C(OP_READ)] = {
1023 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1024 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1025 	},
1026 	[C(OP_WRITE)] = {
1027 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1028 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1029 	},
1030 },
1031 [C(L1I)] = {
1032 	[C(OP_READ)] = {
1033 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1034 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1035 	},
1036 	[C(OP_WRITE)] = {
1037 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1038 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1039 	},
1040 	[C(OP_PREFETCH)] = {
1041 		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1042 		/*
1043 		 * Note that MIPS has only "hit" events countable for
1044 		 * the prefetch operation.
1045 		 */
1046 	},
1047 },
1048 [C(LL)] = {
1049 	[C(OP_READ)] = {
1050 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1051 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1052 	},
1053 	[C(OP_WRITE)] = {
1054 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1055 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1056 	},
1057 },
1058 [C(BPU)] = {
1059 	/* Using the same code for *HW_BRANCH* */
1060 	[C(OP_READ)] = {
1061 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1062 	},
1063 	[C(OP_WRITE)] = {
1064 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1065 	},
1066 },
1067 };
1068 
1069 
1070 static const struct mips_perf_event octeon_cache_map
1071 				[PERF_COUNT_HW_CACHE_MAX]
1072 				[PERF_COUNT_HW_CACHE_OP_MAX]
1073 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1074 [C(L1D)] = {
1075 	[C(OP_READ)] = {
1076 		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1077 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1078 	},
1079 	[C(OP_WRITE)] = {
1080 		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1081 	},
1082 },
1083 [C(L1I)] = {
1084 	[C(OP_READ)] = {
1085 		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1086 	},
1087 	[C(OP_PREFETCH)] = {
1088 		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1089 	},
1090 },
1091 [C(DTLB)] = {
1092 	/*
1093 	 * Only general DTLB misses are counted use the same event for
1094 	 * read and write.
1095 	 */
1096 	[C(OP_READ)] = {
1097 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1098 	},
1099 	[C(OP_WRITE)] = {
1100 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1101 	},
1102 },
1103 [C(ITLB)] = {
1104 	[C(OP_READ)] = {
1105 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1106 	},
1107 },
1108 };
1109 
1110 static const struct mips_perf_event xlp_cache_map
1111 				[PERF_COUNT_HW_CACHE_MAX]
1112 				[PERF_COUNT_HW_CACHE_OP_MAX]
1113 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1114 [C(L1D)] = {
1115 	[C(OP_READ)] = {
1116 		[C(RESULT_ACCESS)]	= { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1117 		[C(RESULT_MISS)]	= { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1118 	},
1119 	[C(OP_WRITE)] = {
1120 		[C(RESULT_ACCESS)]	= { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1121 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1122 	},
1123 },
1124 [C(L1I)] = {
1125 	[C(OP_READ)] = {
1126 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1127 		[C(RESULT_MISS)]	= { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1128 	},
1129 },
1130 [C(LL)] = {
1131 	[C(OP_READ)] = {
1132 		[C(RESULT_ACCESS)]	= { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1133 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1134 	},
1135 	[C(OP_WRITE)] = {
1136 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1137 		[C(RESULT_MISS)]	= { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1138 	},
1139 },
1140 [C(DTLB)] = {
1141 	/*
1142 	 * Only general DTLB misses are counted use the same event for
1143 	 * read and write.
1144 	 */
1145 	[C(OP_READ)] = {
1146 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1147 	},
1148 	[C(OP_WRITE)] = {
1149 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1150 	},
1151 },
1152 [C(ITLB)] = {
1153 	[C(OP_READ)] = {
1154 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1155 	},
1156 	[C(OP_WRITE)] = {
1157 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1158 	},
1159 },
1160 [C(BPU)] = {
1161 	[C(OP_READ)] = {
1162 		[C(RESULT_MISS)]	= { 0x25, CNTR_ALL },
1163 	},
1164 },
1165 };
1166 
1167 #ifdef CONFIG_MIPS_MT_SMP
1168 static void check_and_calc_range(struct perf_event *event,
1169 				 const struct mips_perf_event *pev)
1170 {
1171 	struct hw_perf_event *hwc = &event->hw;
1172 
1173 	if (event->cpu >= 0) {
1174 		if (pev->range > V) {
1175 			/*
1176 			 * The user selected an event that is processor
1177 			 * wide, while expecting it to be VPE wide.
1178 			 */
1179 			hwc->config_base |= M_TC_EN_ALL;
1180 		} else {
1181 			/*
1182 			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1183 			 * for both CPUs.
1184 			 */
1185 			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1186 			hwc->config_base |= M_TC_EN_VPE;
1187 		}
1188 	} else
1189 		hwc->config_base |= M_TC_EN_ALL;
1190 }
1191 #else
1192 static void check_and_calc_range(struct perf_event *event,
1193 				 const struct mips_perf_event *pev)
1194 {
1195 }
1196 #endif
1197 
1198 static int __hw_perf_event_init(struct perf_event *event)
1199 {
1200 	struct perf_event_attr *attr = &event->attr;
1201 	struct hw_perf_event *hwc = &event->hw;
1202 	const struct mips_perf_event *pev;
1203 	int err;
1204 
1205 	/* Returning MIPS event descriptor for generic perf event. */
1206 	if (PERF_TYPE_HARDWARE == event->attr.type) {
1207 		if (event->attr.config >= PERF_COUNT_HW_MAX)
1208 			return -EINVAL;
1209 		pev = mipspmu_map_general_event(event->attr.config);
1210 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1211 		pev = mipspmu_map_cache_event(event->attr.config);
1212 	} else if (PERF_TYPE_RAW == event->attr.type) {
1213 		/* We are working on the global raw event. */
1214 		mutex_lock(&raw_event_mutex);
1215 		pev = mipspmu.map_raw_event(event->attr.config);
1216 	} else {
1217 		/* The event type is not (yet) supported. */
1218 		return -EOPNOTSUPP;
1219 	}
1220 
1221 	if (IS_ERR(pev)) {
1222 		if (PERF_TYPE_RAW == event->attr.type)
1223 			mutex_unlock(&raw_event_mutex);
1224 		return PTR_ERR(pev);
1225 	}
1226 
1227 	/*
1228 	 * We allow max flexibility on how each individual counter shared
1229 	 * by the single CPU operates (the mode exclusion and the range).
1230 	 */
1231 	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1232 
1233 	/* Calculate range bits and validate it. */
1234 	if (num_possible_cpus() > 1)
1235 		check_and_calc_range(event, pev);
1236 
1237 	hwc->event_base = mipspmu_perf_event_encode(pev);
1238 	if (PERF_TYPE_RAW == event->attr.type)
1239 		mutex_unlock(&raw_event_mutex);
1240 
1241 	if (!attr->exclude_user)
1242 		hwc->config_base |= M_PERFCTL_USER;
1243 	if (!attr->exclude_kernel) {
1244 		hwc->config_base |= M_PERFCTL_KERNEL;
1245 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1246 		hwc->config_base |= M_PERFCTL_EXL;
1247 	}
1248 	if (!attr->exclude_hv)
1249 		hwc->config_base |= M_PERFCTL_SUPERVISOR;
1250 
1251 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1252 	/*
1253 	 * The event can belong to another cpu. We do not assign a local
1254 	 * counter for it for now.
1255 	 */
1256 	hwc->idx = -1;
1257 	hwc->config = 0;
1258 
1259 	if (!hwc->sample_period) {
1260 		hwc->sample_period  = mipspmu.max_period;
1261 		hwc->last_period    = hwc->sample_period;
1262 		local64_set(&hwc->period_left, hwc->sample_period);
1263 	}
1264 
1265 	err = 0;
1266 	if (event->group_leader != event)
1267 		err = validate_group(event);
1268 
1269 	event->destroy = hw_perf_event_destroy;
1270 
1271 	if (err)
1272 		event->destroy(event);
1273 
1274 	return err;
1275 }
1276 
1277 static void pause_local_counters(void)
1278 {
1279 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1280 	int ctr = mipspmu.num_counters;
1281 	unsigned long flags;
1282 
1283 	local_irq_save(flags);
1284 	do {
1285 		ctr--;
1286 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1287 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1288 					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1289 	} while (ctr > 0);
1290 	local_irq_restore(flags);
1291 }
1292 
1293 static void resume_local_counters(void)
1294 {
1295 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1296 	int ctr = mipspmu.num_counters;
1297 
1298 	do {
1299 		ctr--;
1300 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1301 	} while (ctr > 0);
1302 }
1303 
1304 static int mipsxx_pmu_handle_shared_irq(void)
1305 {
1306 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1307 	struct perf_sample_data data;
1308 	unsigned int counters = mipspmu.num_counters;
1309 	u64 counter;
1310 	int handled = IRQ_NONE;
1311 	struct pt_regs *regs;
1312 
1313 	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1314 		return handled;
1315 	/*
1316 	 * First we pause the local counters, so that when we are locked
1317 	 * here, the counters are all paused. When it gets locked due to
1318 	 * perf_disable(), the timer interrupt handler will be delayed.
1319 	 *
1320 	 * See also mipsxx_pmu_start().
1321 	 */
1322 	pause_local_counters();
1323 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1324 	read_lock(&pmuint_rwlock);
1325 #endif
1326 
1327 	regs = get_irq_regs();
1328 
1329 	perf_sample_data_init(&data, 0, 0);
1330 
1331 	switch (counters) {
1332 #define HANDLE_COUNTER(n)						\
1333 	case n + 1:							\
1334 		if (test_bit(n, cpuc->used_mask)) {			\
1335 			counter = mipspmu.read_counter(n);		\
1336 			if (counter & mipspmu.overflow) {		\
1337 				handle_associated_event(cpuc, n, &data, regs); \
1338 				handled = IRQ_HANDLED;			\
1339 			}						\
1340 		}
1341 	HANDLE_COUNTER(3)
1342 	HANDLE_COUNTER(2)
1343 	HANDLE_COUNTER(1)
1344 	HANDLE_COUNTER(0)
1345 	}
1346 
1347 	/*
1348 	 * Do all the work for the pending perf events. We can do this
1349 	 * in here because the performance counter interrupt is a regular
1350 	 * interrupt, not NMI.
1351 	 */
1352 	if (handled == IRQ_HANDLED)
1353 		irq_work_run();
1354 
1355 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1356 	read_unlock(&pmuint_rwlock);
1357 #endif
1358 	resume_local_counters();
1359 	return handled;
1360 }
1361 
1362 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1363 {
1364 	return mipsxx_pmu_handle_shared_irq();
1365 }
1366 
1367 /* 24K */
1368 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1369 	((b) == 0 || (b) == 1 || (b) == 11)
1370 
1371 /* 34K */
1372 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1373 	((b) == 0 || (b) == 1 || (b) == 11)
1374 #ifdef CONFIG_MIPS_MT_SMP
1375 #define IS_RANGE_P_34K_EVENT(r, b)					\
1376 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1377 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1378 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1379 	 ((b) >= 64 && (b) <= 67))
1380 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1381 #endif
1382 
1383 /* 74K */
1384 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1385 	((b) == 0 || (b) == 1)
1386 
1387 /* proAptiv */
1388 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1389 	((b) == 0 || (b) == 1)
1390 /* P5600 */
1391 #define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1392 	((b) == 0 || (b) == 1)
1393 
1394 /* 1004K */
1395 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1396 	((b) == 0 || (b) == 1 || (b) == 11)
1397 #ifdef CONFIG_MIPS_MT_SMP
1398 #define IS_RANGE_P_1004K_EVENT(r, b)					\
1399 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1400 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1401 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1402 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1403 	 ((b) >= 64 && (b) <= 67))
1404 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1405 #endif
1406 
1407 /* interAptiv */
1408 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1409 	((b) == 0 || (b) == 1 || (b) == 11)
1410 #ifdef CONFIG_MIPS_MT_SMP
1411 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1412 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1413 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1414 	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1415 	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1416 	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1417 	 ((b) >= 64 && (b) <= 67))
1418 #define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1419 #endif
1420 
1421 /* BMIPS5000 */
1422 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1423 	((b) == 0 || (b) == 1)
1424 
1425 
1426 /*
1427  * For most cores the user can use 0-255 raw events, where 0-127 for the events
1428  * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1429  * indicate the even/odd bank selector. So, for example, when user wants to take
1430  * the Event Num of 15 for odd counters (by referring to the user manual), then
1431  * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1432  * to be used.
1433  *
1434  * Some newer cores have even more events, in which case the user can use raw
1435  * events 0-511, where 0-255 are for the events of even counters, and 256-511
1436  * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1437  */
1438 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1439 {
1440 	/* currently most cores have 7-bit event numbers */
1441 	unsigned int raw_id = config & 0xff;
1442 	unsigned int base_id = raw_id & 0x7f;
1443 
1444 	switch (current_cpu_type()) {
1445 	case CPU_24K:
1446 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1447 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1448 		else
1449 			raw_event.cntr_mask =
1450 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1451 #ifdef CONFIG_MIPS_MT_SMP
1452 		/*
1453 		 * This is actually doing nothing. Non-multithreading
1454 		 * CPUs will not check and calculate the range.
1455 		 */
1456 		raw_event.range = P;
1457 #endif
1458 		break;
1459 	case CPU_34K:
1460 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1461 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1462 		else
1463 			raw_event.cntr_mask =
1464 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1465 #ifdef CONFIG_MIPS_MT_SMP
1466 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1467 			raw_event.range = P;
1468 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1469 			raw_event.range = V;
1470 		else
1471 			raw_event.range = T;
1472 #endif
1473 		break;
1474 	case CPU_74K:
1475 	case CPU_1074K:
1476 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1477 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1478 		else
1479 			raw_event.cntr_mask =
1480 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1481 #ifdef CONFIG_MIPS_MT_SMP
1482 		raw_event.range = P;
1483 #endif
1484 		break;
1485 	case CPU_PROAPTIV:
1486 		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1487 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1488 		else
1489 			raw_event.cntr_mask =
1490 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1491 #ifdef CONFIG_MIPS_MT_SMP
1492 		raw_event.range = P;
1493 #endif
1494 		break;
1495 	case CPU_P5600:
1496 		/* 8-bit event numbers */
1497 		raw_id = config & 0x1ff;
1498 		base_id = raw_id & 0xff;
1499 		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1500 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1501 		else
1502 			raw_event.cntr_mask =
1503 				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1504 #ifdef CONFIG_MIPS_MT_SMP
1505 		raw_event.range = P;
1506 #endif
1507 		break;
1508 	case CPU_1004K:
1509 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1510 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1511 		else
1512 			raw_event.cntr_mask =
1513 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1514 #ifdef CONFIG_MIPS_MT_SMP
1515 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1516 			raw_event.range = P;
1517 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1518 			raw_event.range = V;
1519 		else
1520 			raw_event.range = T;
1521 #endif
1522 		break;
1523 	case CPU_INTERAPTIV:
1524 		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1525 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1526 		else
1527 			raw_event.cntr_mask =
1528 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1529 #ifdef CONFIG_MIPS_MT_SMP
1530 		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1531 			raw_event.range = P;
1532 		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1533 			raw_event.range = V;
1534 		else
1535 			raw_event.range = T;
1536 #endif
1537 		break;
1538 	case CPU_BMIPS5000:
1539 		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1540 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1541 		else
1542 			raw_event.cntr_mask =
1543 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1544 	}
1545 
1546 	raw_event.event_id = base_id;
1547 
1548 	return &raw_event;
1549 }
1550 
1551 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1552 {
1553 	unsigned int raw_id = config & 0xff;
1554 	unsigned int base_id = raw_id & 0x7f;
1555 
1556 
1557 	raw_event.cntr_mask = CNTR_ALL;
1558 	raw_event.event_id = base_id;
1559 
1560 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1561 		if (base_id > 0x42)
1562 			return ERR_PTR(-EOPNOTSUPP);
1563 	} else {
1564 		if (base_id > 0x3a)
1565 			return ERR_PTR(-EOPNOTSUPP);
1566 	}
1567 
1568 	switch (base_id) {
1569 	case 0x00:
1570 	case 0x0f:
1571 	case 0x1e:
1572 	case 0x1f:
1573 	case 0x2f:
1574 	case 0x34:
1575 	case 0x3b ... 0x3f:
1576 		return ERR_PTR(-EOPNOTSUPP);
1577 	default:
1578 		break;
1579 	}
1580 
1581 	return &raw_event;
1582 }
1583 
1584 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1585 {
1586 	unsigned int raw_id = config & 0xff;
1587 
1588 	/* Only 1-63 are defined */
1589 	if ((raw_id < 0x01) || (raw_id > 0x3f))
1590 		return ERR_PTR(-EOPNOTSUPP);
1591 
1592 	raw_event.cntr_mask = CNTR_ALL;
1593 	raw_event.event_id = raw_id;
1594 
1595 	return &raw_event;
1596 }
1597 
1598 static int __init
1599 init_hw_perf_events(void)
1600 {
1601 	int counters, irq;
1602 	int counter_bits;
1603 
1604 	pr_info("Performance counters: ");
1605 
1606 	counters = n_counters();
1607 	if (counters == 0) {
1608 		pr_cont("No available PMU.\n");
1609 		return -ENODEV;
1610 	}
1611 
1612 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1613 	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1614 	if (!cpu_has_mipsmt_pertccounters)
1615 		counters = counters_total_to_per_cpu(counters);
1616 #endif
1617 
1618 	if (get_c0_perfcount_int)
1619 		irq = get_c0_perfcount_int();
1620 	else if (cp0_perfcount_irq >= 0)
1621 		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1622 	else
1623 		irq = -1;
1624 
1625 	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1626 
1627 	switch (current_cpu_type()) {
1628 	case CPU_24K:
1629 		mipspmu.name = "mips/24K";
1630 		mipspmu.general_event_map = &mipsxxcore_event_map;
1631 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1632 		break;
1633 	case CPU_34K:
1634 		mipspmu.name = "mips/34K";
1635 		mipspmu.general_event_map = &mipsxxcore_event_map;
1636 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1637 		break;
1638 	case CPU_74K:
1639 		mipspmu.name = "mips/74K";
1640 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1641 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1642 		break;
1643 	case CPU_PROAPTIV:
1644 		mipspmu.name = "mips/proAptiv";
1645 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1646 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1647 		break;
1648 	case CPU_P5600:
1649 		mipspmu.name = "mips/P5600";
1650 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1651 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1652 		break;
1653 	case CPU_1004K:
1654 		mipspmu.name = "mips/1004K";
1655 		mipspmu.general_event_map = &mipsxxcore_event_map;
1656 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1657 		break;
1658 	case CPU_1074K:
1659 		mipspmu.name = "mips/1074K";
1660 		mipspmu.general_event_map = &mipsxxcore_event_map;
1661 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1662 		break;
1663 	case CPU_INTERAPTIV:
1664 		mipspmu.name = "mips/interAptiv";
1665 		mipspmu.general_event_map = &mipsxxcore_event_map;
1666 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1667 		break;
1668 	case CPU_LOONGSON1:
1669 		mipspmu.name = "mips/loongson1";
1670 		mipspmu.general_event_map = &mipsxxcore_event_map;
1671 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1672 		break;
1673 	case CPU_CAVIUM_OCTEON:
1674 	case CPU_CAVIUM_OCTEON_PLUS:
1675 	case CPU_CAVIUM_OCTEON2:
1676 		mipspmu.name = "octeon";
1677 		mipspmu.general_event_map = &octeon_event_map;
1678 		mipspmu.cache_event_map = &octeon_cache_map;
1679 		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1680 		break;
1681 	case CPU_BMIPS5000:
1682 		mipspmu.name = "BMIPS5000";
1683 		mipspmu.general_event_map = &bmips5000_event_map;
1684 		mipspmu.cache_event_map = &bmips5000_cache_map;
1685 		break;
1686 	case CPU_XLP:
1687 		mipspmu.name = "xlp";
1688 		mipspmu.general_event_map = &xlp_event_map;
1689 		mipspmu.cache_event_map = &xlp_cache_map;
1690 		mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1691 		break;
1692 	default:
1693 		pr_cont("Either hardware does not support performance "
1694 			"counters, or not yet implemented.\n");
1695 		return -ENODEV;
1696 	}
1697 
1698 	mipspmu.num_counters = counters;
1699 	mipspmu.irq = irq;
1700 
1701 	if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1702 		mipspmu.max_period = (1ULL << 63) - 1;
1703 		mipspmu.valid_count = (1ULL << 63) - 1;
1704 		mipspmu.overflow = 1ULL << 63;
1705 		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1706 		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1707 		counter_bits = 64;
1708 	} else {
1709 		mipspmu.max_period = (1ULL << 31) - 1;
1710 		mipspmu.valid_count = (1ULL << 31) - 1;
1711 		mipspmu.overflow = 1ULL << 31;
1712 		mipspmu.read_counter = mipsxx_pmu_read_counter;
1713 		mipspmu.write_counter = mipsxx_pmu_write_counter;
1714 		counter_bits = 32;
1715 	}
1716 
1717 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1718 
1719 	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1720 		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1721 		irq < 0 ? " (share with timer interrupt)" : "");
1722 
1723 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1724 
1725 	return 0;
1726 }
1727 early_initcall(init_hw_perf_events);
1728