xref: /linux/arch/sparc/kernel/perf_event.c (revision 97c1cf8fdbce332bfacc3e38aae3fe2af1369906)
1 /* Performance event support for sparc64.
2  *
3  * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
4  *
5  * This code is based almost entirely upon the x86 perf event
6  * code, which is:
7  *
8  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10  *  Copyright (C) 2009 Jaswinder Singh Rajput
11  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13  */
14 
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/ftrace.h>
18 #include <linux/kernel.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
21 
22 #include <asm/stacktrace.h>
23 #include <asm/cpudata.h>
24 #include <asm/uaccess.h>
25 #include <asm/atomic.h>
26 #include <asm/nmi.h>
27 #include <asm/pcr.h>
28 
29 #include "kstack.h"
30 
31 /* Sparc64 chips have two performance counters, 32-bits each, with
32  * overflow interrupts generated on transition from 0xffffffff to 0.
33  * The counters are accessed in one go using a 64-bit register.
34  *
35  * Both counters are controlled using a single control register.  The
36  * only way to stop all sampling is to clear all of the context (user,
37  * supervisor, hypervisor) sampling enable bits.  But these bits apply
38  * to both counters, thus the two counters can't be enabled/disabled
39  * individually.
40  *
41  * The control register has two event fields, one for each of the two
42  * counters.  It's thus nearly impossible to have one counter going
43  * while keeping the other one stopped.  Therefore it is possible to
44  * get overflow interrupts for counters not currently "in use" and
45  * that condition must be checked in the overflow interrupt handler.
46  *
47  * So we use a hack, in that we program inactive counters with the
48  * "sw_count0" and "sw_count1" events.  These count how many times
49  * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an
50  * unusual way to encode a NOP and therefore will not trigger in
51  * normal code.
52  */
53 
54 #define MAX_HWEVENTS			2
55 #define MAX_PERIOD			((1UL << 32) - 1)
56 
57 #define PIC_UPPER_INDEX			0
58 #define PIC_LOWER_INDEX			1
59 #define PIC_NO_INDEX			-1
60 
61 struct cpu_hw_events {
62 	/* Number of events currently scheduled onto this cpu.
63 	 * This tells how many entries in the arrays below
64 	 * are valid.
65 	 */
66 	int			n_events;
67 
68 	/* Number of new events added since the last hw_perf_disable().
69 	 * This works because the perf event layer always adds new
70 	 * events inside of a perf_{disable,enable}() sequence.
71 	 */
72 	int			n_added;
73 
74 	/* Array of events current scheduled on this cpu.  */
75 	struct perf_event	*event[MAX_HWEVENTS];
76 
77 	/* Array of encoded longs, specifying the %pcr register
78 	 * encoding and the mask of PIC counters this even can
79 	 * be scheduled on.  See perf_event_encode() et al.
80 	 */
81 	unsigned long		events[MAX_HWEVENTS];
82 
83 	/* The current counter index assigned to an event.  When the
84 	 * event hasn't been programmed into the cpu yet, this will
85 	 * hold PIC_NO_INDEX.  The event->hw.idx value tells us where
86 	 * we ought to schedule the event.
87 	 */
88 	int			current_idx[MAX_HWEVENTS];
89 
90 	/* Software copy of %pcr register on this cpu.  */
91 	u64			pcr;
92 
93 	/* Enabled/disable state.  */
94 	int			enabled;
95 
96 	unsigned int		group_flag;
97 };
98 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
99 
100 /* An event map describes the characteristics of a performance
101  * counter event.  In particular it gives the encoding as well as
102  * a mask telling which counters the event can be measured on.
103  */
104 struct perf_event_map {
105 	u16	encoding;
106 	u8	pic_mask;
107 #define PIC_NONE	0x00
108 #define PIC_UPPER	0x01
109 #define PIC_LOWER	0x02
110 };
111 
112 /* Encode a perf_event_map entry into a long.  */
113 static unsigned long perf_event_encode(const struct perf_event_map *pmap)
114 {
115 	return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
116 }
117 
118 static u8 perf_event_get_msk(unsigned long val)
119 {
120 	return val & 0xff;
121 }
122 
123 static u64 perf_event_get_enc(unsigned long val)
124 {
125 	return val >> 16;
126 }
127 
128 #define C(x) PERF_COUNT_HW_CACHE_##x
129 
130 #define CACHE_OP_UNSUPPORTED	0xfffe
131 #define CACHE_OP_NONSENSE	0xffff
132 
133 typedef struct perf_event_map cache_map_t
134 				[PERF_COUNT_HW_CACHE_MAX]
135 				[PERF_COUNT_HW_CACHE_OP_MAX]
136 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
137 
138 struct sparc_pmu {
139 	const struct perf_event_map	*(*event_map)(int);
140 	const cache_map_t		*cache_map;
141 	int				max_events;
142 	int				upper_shift;
143 	int				lower_shift;
144 	int				event_mask;
145 	int				hv_bit;
146 	int				irq_bit;
147 	int				upper_nop;
148 	int				lower_nop;
149 };
150 
151 static const struct perf_event_map ultra3_perfmon_event_map[] = {
152 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
153 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
154 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
155 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
156 };
157 
158 static const struct perf_event_map *ultra3_event_map(int event_id)
159 {
160 	return &ultra3_perfmon_event_map[event_id];
161 }
162 
163 static const cache_map_t ultra3_cache_map = {
164 [C(L1D)] = {
165 	[C(OP_READ)] = {
166 		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
167 		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
168 	},
169 	[C(OP_WRITE)] = {
170 		[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
171 		[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
172 	},
173 	[C(OP_PREFETCH)] = {
174 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
175 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
176 	},
177 },
178 [C(L1I)] = {
179 	[C(OP_READ)] = {
180 		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
181 		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
182 	},
183 	[ C(OP_WRITE) ] = {
184 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
185 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
186 	},
187 	[ C(OP_PREFETCH) ] = {
188 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
189 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
190 	},
191 },
192 [C(LL)] = {
193 	[C(OP_READ)] = {
194 		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
195 		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
196 	},
197 	[C(OP_WRITE)] = {
198 		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
199 		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
200 	},
201 	[C(OP_PREFETCH)] = {
202 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
203 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
204 	},
205 },
206 [C(DTLB)] = {
207 	[C(OP_READ)] = {
208 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
209 		[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
210 	},
211 	[ C(OP_WRITE) ] = {
212 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
213 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
214 	},
215 	[ C(OP_PREFETCH) ] = {
216 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
217 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
218 	},
219 },
220 [C(ITLB)] = {
221 	[C(OP_READ)] = {
222 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
223 		[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
224 	},
225 	[ C(OP_WRITE) ] = {
226 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
227 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
228 	},
229 	[ C(OP_PREFETCH) ] = {
230 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
231 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
232 	},
233 },
234 [C(BPU)] = {
235 	[C(OP_READ)] = {
236 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
238 	},
239 	[ C(OP_WRITE) ] = {
240 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
241 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
242 	},
243 	[ C(OP_PREFETCH) ] = {
244 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
245 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
246 	},
247 },
248 };
249 
250 static const struct sparc_pmu ultra3_pmu = {
251 	.event_map	= ultra3_event_map,
252 	.cache_map	= &ultra3_cache_map,
253 	.max_events	= ARRAY_SIZE(ultra3_perfmon_event_map),
254 	.upper_shift	= 11,
255 	.lower_shift	= 4,
256 	.event_mask	= 0x3f,
257 	.upper_nop	= 0x1c,
258 	.lower_nop	= 0x14,
259 };
260 
261 /* Niagara1 is very limited.  The upper PIC is hard-locked to count
262  * only instructions, so it is free running which creates all kinds of
263  * problems.  Some hardware designs make one wonder if the creator
264  * even looked at how this stuff gets used by software.
265  */
266 static const struct perf_event_map niagara1_perfmon_event_map[] = {
267 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
268 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
269 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
270 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
271 };
272 
273 static const struct perf_event_map *niagara1_event_map(int event_id)
274 {
275 	return &niagara1_perfmon_event_map[event_id];
276 }
277 
278 static const cache_map_t niagara1_cache_map = {
279 [C(L1D)] = {
280 	[C(OP_READ)] = {
281 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
282 		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
283 	},
284 	[C(OP_WRITE)] = {
285 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
286 		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
287 	},
288 	[C(OP_PREFETCH)] = {
289 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
290 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
291 	},
292 },
293 [C(L1I)] = {
294 	[C(OP_READ)] = {
295 		[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
296 		[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
297 	},
298 	[ C(OP_WRITE) ] = {
299 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
300 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
301 	},
302 	[ C(OP_PREFETCH) ] = {
303 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
304 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
305 	},
306 },
307 [C(LL)] = {
308 	[C(OP_READ)] = {
309 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
310 		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
311 	},
312 	[C(OP_WRITE)] = {
313 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
314 		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
315 	},
316 	[C(OP_PREFETCH)] = {
317 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
318 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
319 	},
320 },
321 [C(DTLB)] = {
322 	[C(OP_READ)] = {
323 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
324 		[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
325 	},
326 	[ C(OP_WRITE) ] = {
327 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
328 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
329 	},
330 	[ C(OP_PREFETCH) ] = {
331 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
332 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
333 	},
334 },
335 [C(ITLB)] = {
336 	[C(OP_READ)] = {
337 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
338 		[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
339 	},
340 	[ C(OP_WRITE) ] = {
341 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
342 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
343 	},
344 	[ C(OP_PREFETCH) ] = {
345 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
346 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
347 	},
348 },
349 [C(BPU)] = {
350 	[C(OP_READ)] = {
351 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
352 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
353 	},
354 	[ C(OP_WRITE) ] = {
355 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
356 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
357 	},
358 	[ C(OP_PREFETCH) ] = {
359 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
360 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
361 	},
362 },
363 };
364 
365 static const struct sparc_pmu niagara1_pmu = {
366 	.event_map	= niagara1_event_map,
367 	.cache_map	= &niagara1_cache_map,
368 	.max_events	= ARRAY_SIZE(niagara1_perfmon_event_map),
369 	.upper_shift	= 0,
370 	.lower_shift	= 4,
371 	.event_mask	= 0x7,
372 	.upper_nop	= 0x0,
373 	.lower_nop	= 0x0,
374 };
375 
376 static const struct perf_event_map niagara2_perfmon_event_map[] = {
377 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
378 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
379 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
380 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
381 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
382 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
383 };
384 
385 static const struct perf_event_map *niagara2_event_map(int event_id)
386 {
387 	return &niagara2_perfmon_event_map[event_id];
388 }
389 
390 static const cache_map_t niagara2_cache_map = {
391 [C(L1D)] = {
392 	[C(OP_READ)] = {
393 		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
394 		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
395 	},
396 	[C(OP_WRITE)] = {
397 		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
398 		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
399 	},
400 	[C(OP_PREFETCH)] = {
401 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
403 	},
404 },
405 [C(L1I)] = {
406 	[C(OP_READ)] = {
407 		[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
408 		[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
409 	},
410 	[ C(OP_WRITE) ] = {
411 		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
412 		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
413 	},
414 	[ C(OP_PREFETCH) ] = {
415 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
416 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
417 	},
418 },
419 [C(LL)] = {
420 	[C(OP_READ)] = {
421 		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
422 		[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
423 	},
424 	[C(OP_WRITE)] = {
425 		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
426 		[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
427 	},
428 	[C(OP_PREFETCH)] = {
429 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
431 	},
432 },
433 [C(DTLB)] = {
434 	[C(OP_READ)] = {
435 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
436 		[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
437 	},
438 	[ C(OP_WRITE) ] = {
439 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
440 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
441 	},
442 	[ C(OP_PREFETCH) ] = {
443 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
444 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
445 	},
446 },
447 [C(ITLB)] = {
448 	[C(OP_READ)] = {
449 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
450 		[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
451 	},
452 	[ C(OP_WRITE) ] = {
453 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
454 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
455 	},
456 	[ C(OP_PREFETCH) ] = {
457 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
458 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
459 	},
460 },
461 [C(BPU)] = {
462 	[C(OP_READ)] = {
463 		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
464 		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
465 	},
466 	[ C(OP_WRITE) ] = {
467 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
468 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
469 	},
470 	[ C(OP_PREFETCH) ] = {
471 		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
472 		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
473 	},
474 },
475 };
476 
477 static const struct sparc_pmu niagara2_pmu = {
478 	.event_map	= niagara2_event_map,
479 	.cache_map	= &niagara2_cache_map,
480 	.max_events	= ARRAY_SIZE(niagara2_perfmon_event_map),
481 	.upper_shift	= 19,
482 	.lower_shift	= 6,
483 	.event_mask	= 0xfff,
484 	.hv_bit		= 0x8,
485 	.irq_bit	= 0x30,
486 	.upper_nop	= 0x220,
487 	.lower_nop	= 0x220,
488 };
489 
490 static const struct sparc_pmu *sparc_pmu __read_mostly;
491 
492 static u64 event_encoding(u64 event_id, int idx)
493 {
494 	if (idx == PIC_UPPER_INDEX)
495 		event_id <<= sparc_pmu->upper_shift;
496 	else
497 		event_id <<= sparc_pmu->lower_shift;
498 	return event_id;
499 }
500 
501 static u64 mask_for_index(int idx)
502 {
503 	return event_encoding(sparc_pmu->event_mask, idx);
504 }
505 
506 static u64 nop_for_index(int idx)
507 {
508 	return event_encoding(idx == PIC_UPPER_INDEX ?
509 			      sparc_pmu->upper_nop :
510 			      sparc_pmu->lower_nop, idx);
511 }
512 
513 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
514 {
515 	u64 val, mask = mask_for_index(idx);
516 
517 	val = cpuc->pcr;
518 	val &= ~mask;
519 	val |= hwc->config;
520 	cpuc->pcr = val;
521 
522 	pcr_ops->write(cpuc->pcr);
523 }
524 
525 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
526 {
527 	u64 mask = mask_for_index(idx);
528 	u64 nop = nop_for_index(idx);
529 	u64 val;
530 
531 	val = cpuc->pcr;
532 	val &= ~mask;
533 	val |= nop;
534 	cpuc->pcr = val;
535 
536 	pcr_ops->write(cpuc->pcr);
537 }
538 
539 static u32 read_pmc(int idx)
540 {
541 	u64 val;
542 
543 	read_pic(val);
544 	if (idx == PIC_UPPER_INDEX)
545 		val >>= 32;
546 
547 	return val & 0xffffffff;
548 }
549 
550 static void write_pmc(int idx, u64 val)
551 {
552 	u64 shift, mask, pic;
553 
554 	shift = 0;
555 	if (idx == PIC_UPPER_INDEX)
556 		shift = 32;
557 
558 	mask = ((u64) 0xffffffff) << shift;
559 	val <<= shift;
560 
561 	read_pic(pic);
562 	pic &= ~mask;
563 	pic |= val;
564 	write_pic(pic);
565 }
566 
567 static u64 sparc_perf_event_update(struct perf_event *event,
568 				   struct hw_perf_event *hwc, int idx)
569 {
570 	int shift = 64 - 32;
571 	u64 prev_raw_count, new_raw_count;
572 	s64 delta;
573 
574 again:
575 	prev_raw_count = local64_read(&hwc->prev_count);
576 	new_raw_count = read_pmc(idx);
577 
578 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
579 			     new_raw_count) != prev_raw_count)
580 		goto again;
581 
582 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
583 	delta >>= shift;
584 
585 	local64_add(delta, &event->count);
586 	local64_sub(delta, &hwc->period_left);
587 
588 	return new_raw_count;
589 }
590 
591 static int sparc_perf_event_set_period(struct perf_event *event,
592 				       struct hw_perf_event *hwc, int idx)
593 {
594 	s64 left = local64_read(&hwc->period_left);
595 	s64 period = hwc->sample_period;
596 	int ret = 0;
597 
598 	if (unlikely(left <= -period)) {
599 		left = period;
600 		local64_set(&hwc->period_left, left);
601 		hwc->last_period = period;
602 		ret = 1;
603 	}
604 
605 	if (unlikely(left <= 0)) {
606 		left += period;
607 		local64_set(&hwc->period_left, left);
608 		hwc->last_period = period;
609 		ret = 1;
610 	}
611 	if (left > MAX_PERIOD)
612 		left = MAX_PERIOD;
613 
614 	local64_set(&hwc->prev_count, (u64)-left);
615 
616 	write_pmc(idx, (u64)(-left) & 0xffffffff);
617 
618 	perf_event_update_userpage(event);
619 
620 	return ret;
621 }
622 
623 /* If performance event entries have been added, move existing
624  * events around (if necessary) and then assign new entries to
625  * counters.
626  */
627 static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
628 {
629 	int i;
630 
631 	if (!cpuc->n_added)
632 		goto out;
633 
634 	/* Read in the counters which are moving.  */
635 	for (i = 0; i < cpuc->n_events; i++) {
636 		struct perf_event *cp = cpuc->event[i];
637 
638 		if (cpuc->current_idx[i] != PIC_NO_INDEX &&
639 		    cpuc->current_idx[i] != cp->hw.idx) {
640 			sparc_perf_event_update(cp, &cp->hw,
641 						cpuc->current_idx[i]);
642 			cpuc->current_idx[i] = PIC_NO_INDEX;
643 		}
644 	}
645 
646 	/* Assign to counters all unassigned events.  */
647 	for (i = 0; i < cpuc->n_events; i++) {
648 		struct perf_event *cp = cpuc->event[i];
649 		struct hw_perf_event *hwc = &cp->hw;
650 		int idx = hwc->idx;
651 		u64 enc;
652 
653 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
654 			continue;
655 
656 		sparc_perf_event_set_period(cp, hwc, idx);
657 		cpuc->current_idx[i] = idx;
658 
659 		enc = perf_event_get_enc(cpuc->events[i]);
660 		pcr &= ~mask_for_index(idx);
661 		pcr |= event_encoding(enc, idx);
662 	}
663 out:
664 	return pcr;
665 }
666 
667 void hw_perf_enable(void)
668 {
669 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
670 	u64 pcr;
671 
672 	if (cpuc->enabled)
673 		return;
674 
675 	cpuc->enabled = 1;
676 	barrier();
677 
678 	pcr = cpuc->pcr;
679 	if (!cpuc->n_events) {
680 		pcr = 0;
681 	} else {
682 		pcr = maybe_change_configuration(cpuc, pcr);
683 
684 		/* We require that all of the events have the same
685 		 * configuration, so just fetch the settings from the
686 		 * first entry.
687 		 */
688 		cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
689 	}
690 
691 	pcr_ops->write(cpuc->pcr);
692 }
693 
694 void hw_perf_disable(void)
695 {
696 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
697 	u64 val;
698 
699 	if (!cpuc->enabled)
700 		return;
701 
702 	cpuc->enabled = 0;
703 	cpuc->n_added = 0;
704 
705 	val = cpuc->pcr;
706 	val &= ~(PCR_UTRACE | PCR_STRACE |
707 		 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
708 	cpuc->pcr = val;
709 
710 	pcr_ops->write(cpuc->pcr);
711 }
712 
713 static void sparc_pmu_disable(struct perf_event *event)
714 {
715 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
716 	struct hw_perf_event *hwc = &event->hw;
717 	unsigned long flags;
718 	int i;
719 
720 	local_irq_save(flags);
721 	perf_disable();
722 
723 	for (i = 0; i < cpuc->n_events; i++) {
724 		if (event == cpuc->event[i]) {
725 			int idx = cpuc->current_idx[i];
726 
727 			/* Shift remaining entries down into
728 			 * the existing slot.
729 			 */
730 			while (++i < cpuc->n_events) {
731 				cpuc->event[i - 1] = cpuc->event[i];
732 				cpuc->events[i - 1] = cpuc->events[i];
733 				cpuc->current_idx[i - 1] =
734 					cpuc->current_idx[i];
735 			}
736 
737 			/* Absorb the final count and turn off the
738 			 * event.
739 			 */
740 			sparc_pmu_disable_event(cpuc, hwc, idx);
741 			barrier();
742 			sparc_perf_event_update(event, hwc, idx);
743 
744 			perf_event_update_userpage(event);
745 
746 			cpuc->n_events--;
747 			break;
748 		}
749 	}
750 
751 	perf_enable();
752 	local_irq_restore(flags);
753 }
754 
755 static int active_event_index(struct cpu_hw_events *cpuc,
756 			      struct perf_event *event)
757 {
758 	int i;
759 
760 	for (i = 0; i < cpuc->n_events; i++) {
761 		if (cpuc->event[i] == event)
762 			break;
763 	}
764 	BUG_ON(i == cpuc->n_events);
765 	return cpuc->current_idx[i];
766 }
767 
768 static void sparc_pmu_read(struct perf_event *event)
769 {
770 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
771 	int idx = active_event_index(cpuc, event);
772 	struct hw_perf_event *hwc = &event->hw;
773 
774 	sparc_perf_event_update(event, hwc, idx);
775 }
776 
777 static void sparc_pmu_unthrottle(struct perf_event *event)
778 {
779 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
780 	int idx = active_event_index(cpuc, event);
781 	struct hw_perf_event *hwc = &event->hw;
782 
783 	sparc_pmu_enable_event(cpuc, hwc, idx);
784 }
785 
786 static atomic_t active_events = ATOMIC_INIT(0);
787 static DEFINE_MUTEX(pmc_grab_mutex);
788 
789 static void perf_stop_nmi_watchdog(void *unused)
790 {
791 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
792 
793 	stop_nmi_watchdog(NULL);
794 	cpuc->pcr = pcr_ops->read();
795 }
796 
797 void perf_event_grab_pmc(void)
798 {
799 	if (atomic_inc_not_zero(&active_events))
800 		return;
801 
802 	mutex_lock(&pmc_grab_mutex);
803 	if (atomic_read(&active_events) == 0) {
804 		if (atomic_read(&nmi_active) > 0) {
805 			on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
806 			BUG_ON(atomic_read(&nmi_active) != 0);
807 		}
808 		atomic_inc(&active_events);
809 	}
810 	mutex_unlock(&pmc_grab_mutex);
811 }
812 
813 void perf_event_release_pmc(void)
814 {
815 	if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
816 		if (atomic_read(&nmi_active) == 0)
817 			on_each_cpu(start_nmi_watchdog, NULL, 1);
818 		mutex_unlock(&pmc_grab_mutex);
819 	}
820 }
821 
822 static const struct perf_event_map *sparc_map_cache_event(u64 config)
823 {
824 	unsigned int cache_type, cache_op, cache_result;
825 	const struct perf_event_map *pmap;
826 
827 	if (!sparc_pmu->cache_map)
828 		return ERR_PTR(-ENOENT);
829 
830 	cache_type = (config >>  0) & 0xff;
831 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
832 		return ERR_PTR(-EINVAL);
833 
834 	cache_op = (config >>  8) & 0xff;
835 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
836 		return ERR_PTR(-EINVAL);
837 
838 	cache_result = (config >> 16) & 0xff;
839 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
840 		return ERR_PTR(-EINVAL);
841 
842 	pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
843 
844 	if (pmap->encoding == CACHE_OP_UNSUPPORTED)
845 		return ERR_PTR(-ENOENT);
846 
847 	if (pmap->encoding == CACHE_OP_NONSENSE)
848 		return ERR_PTR(-EINVAL);
849 
850 	return pmap;
851 }
852 
853 static void hw_perf_event_destroy(struct perf_event *event)
854 {
855 	perf_event_release_pmc();
856 }
857 
858 /* Make sure all events can be scheduled into the hardware at
859  * the same time.  This is simplified by the fact that we only
860  * need to support 2 simultaneous HW events.
861  *
862  * As a side effect, the evts[]->hw.idx values will be assigned
863  * on success.  These are pending indexes.  When the events are
864  * actually programmed into the chip, these values will propagate
865  * to the per-cpu cpuc->current_idx[] slots, see the code in
866  * maybe_change_configuration() for details.
867  */
868 static int sparc_check_constraints(struct perf_event **evts,
869 				   unsigned long *events, int n_ev)
870 {
871 	u8 msk0 = 0, msk1 = 0;
872 	int idx0 = 0;
873 
874 	/* This case is possible when we are invoked from
875 	 * hw_perf_group_sched_in().
876 	 */
877 	if (!n_ev)
878 		return 0;
879 
880 	if (n_ev > perf_max_events)
881 		return -1;
882 
883 	msk0 = perf_event_get_msk(events[0]);
884 	if (n_ev == 1) {
885 		if (msk0 & PIC_LOWER)
886 			idx0 = 1;
887 		goto success;
888 	}
889 	BUG_ON(n_ev != 2);
890 	msk1 = perf_event_get_msk(events[1]);
891 
892 	/* If both events can go on any counter, OK.  */
893 	if (msk0 == (PIC_UPPER | PIC_LOWER) &&
894 	    msk1 == (PIC_UPPER | PIC_LOWER))
895 		goto success;
896 
897 	/* If one event is limited to a specific counter,
898 	 * and the other can go on both, OK.
899 	 */
900 	if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
901 	    msk1 == (PIC_UPPER | PIC_LOWER)) {
902 		if (msk0 & PIC_LOWER)
903 			idx0 = 1;
904 		goto success;
905 	}
906 
907 	if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
908 	    msk0 == (PIC_UPPER | PIC_LOWER)) {
909 		if (msk1 & PIC_UPPER)
910 			idx0 = 1;
911 		goto success;
912 	}
913 
914 	/* If the events are fixed to different counters, OK.  */
915 	if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
916 	    (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
917 		if (msk0 & PIC_LOWER)
918 			idx0 = 1;
919 		goto success;
920 	}
921 
922 	/* Otherwise, there is a conflict.  */
923 	return -1;
924 
925 success:
926 	evts[0]->hw.idx = idx0;
927 	if (n_ev == 2)
928 		evts[1]->hw.idx = idx0 ^ 1;
929 	return 0;
930 }
931 
932 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
933 {
934 	int eu = 0, ek = 0, eh = 0;
935 	struct perf_event *event;
936 	int i, n, first;
937 
938 	n = n_prev + n_new;
939 	if (n <= 1)
940 		return 0;
941 
942 	first = 1;
943 	for (i = 0; i < n; i++) {
944 		event = evts[i];
945 		if (first) {
946 			eu = event->attr.exclude_user;
947 			ek = event->attr.exclude_kernel;
948 			eh = event->attr.exclude_hv;
949 			first = 0;
950 		} else if (event->attr.exclude_user != eu ||
951 			   event->attr.exclude_kernel != ek ||
952 			   event->attr.exclude_hv != eh) {
953 			return -EAGAIN;
954 		}
955 	}
956 
957 	return 0;
958 }
959 
960 static int collect_events(struct perf_event *group, int max_count,
961 			  struct perf_event *evts[], unsigned long *events,
962 			  int *current_idx)
963 {
964 	struct perf_event *event;
965 	int n = 0;
966 
967 	if (!is_software_event(group)) {
968 		if (n >= max_count)
969 			return -1;
970 		evts[n] = group;
971 		events[n] = group->hw.event_base;
972 		current_idx[n++] = PIC_NO_INDEX;
973 	}
974 	list_for_each_entry(event, &group->sibling_list, group_entry) {
975 		if (!is_software_event(event) &&
976 		    event->state != PERF_EVENT_STATE_OFF) {
977 			if (n >= max_count)
978 				return -1;
979 			evts[n] = event;
980 			events[n] = event->hw.event_base;
981 			current_idx[n++] = PIC_NO_INDEX;
982 		}
983 	}
984 	return n;
985 }
986 
987 static int sparc_pmu_enable(struct perf_event *event)
988 {
989 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
990 	int n0, ret = -EAGAIN;
991 	unsigned long flags;
992 
993 	local_irq_save(flags);
994 	perf_disable();
995 
996 	n0 = cpuc->n_events;
997 	if (n0 >= perf_max_events)
998 		goto out;
999 
1000 	cpuc->event[n0] = event;
1001 	cpuc->events[n0] = event->hw.event_base;
1002 	cpuc->current_idx[n0] = PIC_NO_INDEX;
1003 
1004 	/*
1005 	 * If group events scheduling transaction was started,
1006 	 * skip the schedulability test here, it will be peformed
1007 	 * at commit time(->commit_txn) as a whole
1008 	 */
1009 	if (cpuc->group_flag & PERF_EVENT_TXN)
1010 		goto nocheck;
1011 
1012 	if (check_excludes(cpuc->event, n0, 1))
1013 		goto out;
1014 	if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1015 		goto out;
1016 
1017 nocheck:
1018 	cpuc->n_events++;
1019 	cpuc->n_added++;
1020 
1021 	ret = 0;
1022 out:
1023 	perf_enable();
1024 	local_irq_restore(flags);
1025 	return ret;
1026 }
1027 
1028 static int __hw_perf_event_init(struct perf_event *event)
1029 {
1030 	struct perf_event_attr *attr = &event->attr;
1031 	struct perf_event *evts[MAX_HWEVENTS];
1032 	struct hw_perf_event *hwc = &event->hw;
1033 	unsigned long events[MAX_HWEVENTS];
1034 	int current_idx_dmy[MAX_HWEVENTS];
1035 	const struct perf_event_map *pmap;
1036 	int n;
1037 
1038 	if (atomic_read(&nmi_active) < 0)
1039 		return -ENODEV;
1040 
1041 	pmap = NULL;
1042 	if (attr->type == PERF_TYPE_HARDWARE) {
1043 		if (attr->config >= sparc_pmu->max_events)
1044 			return -EINVAL;
1045 		pmap = sparc_pmu->event_map(attr->config);
1046 	} else if (attr->type == PERF_TYPE_HW_CACHE) {
1047 		pmap = sparc_map_cache_event(attr->config);
1048 		if (IS_ERR(pmap))
1049 			return PTR_ERR(pmap);
1050 	} else if (attr->type != PERF_TYPE_RAW)
1051 		return -EOPNOTSUPP;
1052 
1053 	if (pmap) {
1054 		hwc->event_base = perf_event_encode(pmap);
1055 	} else {
1056 		/* User gives us "(encoding << 16) | pic_mask" for
1057 		 * PERF_TYPE_RAW events.
1058 		 */
1059 		hwc->event_base = attr->config;
1060 	}
1061 
1062 	/* We save the enable bits in the config_base.  */
1063 	hwc->config_base = sparc_pmu->irq_bit;
1064 	if (!attr->exclude_user)
1065 		hwc->config_base |= PCR_UTRACE;
1066 	if (!attr->exclude_kernel)
1067 		hwc->config_base |= PCR_STRACE;
1068 	if (!attr->exclude_hv)
1069 		hwc->config_base |= sparc_pmu->hv_bit;
1070 
1071 	n = 0;
1072 	if (event->group_leader != event) {
1073 		n = collect_events(event->group_leader,
1074 				   perf_max_events - 1,
1075 				   evts, events, current_idx_dmy);
1076 		if (n < 0)
1077 			return -EINVAL;
1078 	}
1079 	events[n] = hwc->event_base;
1080 	evts[n] = event;
1081 
1082 	if (check_excludes(evts, n, 1))
1083 		return -EINVAL;
1084 
1085 	if (sparc_check_constraints(evts, events, n + 1))
1086 		return -EINVAL;
1087 
1088 	hwc->idx = PIC_NO_INDEX;
1089 
1090 	/* Try to do all error checking before this point, as unwinding
1091 	 * state after grabbing the PMC is difficult.
1092 	 */
1093 	perf_event_grab_pmc();
1094 	event->destroy = hw_perf_event_destroy;
1095 
1096 	if (!hwc->sample_period) {
1097 		hwc->sample_period = MAX_PERIOD;
1098 		hwc->last_period = hwc->sample_period;
1099 		local64_set(&hwc->period_left, hwc->sample_period);
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 /*
1106  * Start group events scheduling transaction
1107  * Set the flag to make pmu::enable() not perform the
1108  * schedulability test, it will be performed at commit time
1109  */
1110 static void sparc_pmu_start_txn(const struct pmu *pmu)
1111 {
1112 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1113 
1114 	cpuhw->group_flag |= PERF_EVENT_TXN;
1115 }
1116 
1117 /*
1118  * Stop group events scheduling transaction
1119  * Clear the flag and pmu::enable() will perform the
1120  * schedulability test.
1121  */
1122 static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1123 {
1124 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1125 
1126 	cpuhw->group_flag &= ~PERF_EVENT_TXN;
1127 }
1128 
1129 /*
1130  * Commit group events scheduling transaction
1131  * Perform the group schedulability test as a whole
1132  * Return 0 if success
1133  */
1134 static int sparc_pmu_commit_txn(const struct pmu *pmu)
1135 {
1136 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1137 	int n;
1138 
1139 	if (!sparc_pmu)
1140 		return -EINVAL;
1141 
1142 	cpuc = &__get_cpu_var(cpu_hw_events);
1143 	n = cpuc->n_events;
1144 	if (check_excludes(cpuc->event, 0, n))
1145 		return -EINVAL;
1146 	if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1147 		return -EAGAIN;
1148 
1149 	cpuc->group_flag &= ~PERF_EVENT_TXN;
1150 	return 0;
1151 }
1152 
1153 static const struct pmu pmu = {
1154 	.enable		= sparc_pmu_enable,
1155 	.disable	= sparc_pmu_disable,
1156 	.read		= sparc_pmu_read,
1157 	.unthrottle	= sparc_pmu_unthrottle,
1158 	.start_txn	= sparc_pmu_start_txn,
1159 	.cancel_txn	= sparc_pmu_cancel_txn,
1160 	.commit_txn	= sparc_pmu_commit_txn,
1161 };
1162 
1163 const struct pmu *hw_perf_event_init(struct perf_event *event)
1164 {
1165 	int err = __hw_perf_event_init(event);
1166 
1167 	if (err)
1168 		return ERR_PTR(err);
1169 	return &pmu;
1170 }
1171 
1172 void perf_event_print_debug(void)
1173 {
1174 	unsigned long flags;
1175 	u64 pcr, pic;
1176 	int cpu;
1177 
1178 	if (!sparc_pmu)
1179 		return;
1180 
1181 	local_irq_save(flags);
1182 
1183 	cpu = smp_processor_id();
1184 
1185 	pcr = pcr_ops->read();
1186 	read_pic(pic);
1187 
1188 	pr_info("\n");
1189 	pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1190 		cpu, pcr, pic);
1191 
1192 	local_irq_restore(flags);
1193 }
1194 
1195 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1196 					    unsigned long cmd, void *__args)
1197 {
1198 	struct die_args *args = __args;
1199 	struct perf_sample_data data;
1200 	struct cpu_hw_events *cpuc;
1201 	struct pt_regs *regs;
1202 	int i;
1203 
1204 	if (!atomic_read(&active_events))
1205 		return NOTIFY_DONE;
1206 
1207 	switch (cmd) {
1208 	case DIE_NMI:
1209 		break;
1210 
1211 	default:
1212 		return NOTIFY_DONE;
1213 	}
1214 
1215 	regs = args->regs;
1216 
1217 	perf_sample_data_init(&data, 0);
1218 
1219 	cpuc = &__get_cpu_var(cpu_hw_events);
1220 
1221 	/* If the PMU has the TOE IRQ enable bits, we need to do a
1222 	 * dummy write to the %pcr to clear the overflow bits and thus
1223 	 * the interrupt.
1224 	 *
1225 	 * Do this before we peek at the counters to determine
1226 	 * overflow so we don't lose any events.
1227 	 */
1228 	if (sparc_pmu->irq_bit)
1229 		pcr_ops->write(cpuc->pcr);
1230 
1231 	for (i = 0; i < cpuc->n_events; i++) {
1232 		struct perf_event *event = cpuc->event[i];
1233 		int idx = cpuc->current_idx[i];
1234 		struct hw_perf_event *hwc;
1235 		u64 val;
1236 
1237 		hwc = &event->hw;
1238 		val = sparc_perf_event_update(event, hwc, idx);
1239 		if (val & (1ULL << 31))
1240 			continue;
1241 
1242 		data.period = event->hw.last_period;
1243 		if (!sparc_perf_event_set_period(event, hwc, idx))
1244 			continue;
1245 
1246 		if (perf_event_overflow(event, 1, &data, regs))
1247 			sparc_pmu_disable_event(cpuc, hwc, idx);
1248 	}
1249 
1250 	return NOTIFY_STOP;
1251 }
1252 
1253 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1254 	.notifier_call		= perf_event_nmi_handler,
1255 };
1256 
1257 static bool __init supported_pmu(void)
1258 {
1259 	if (!strcmp(sparc_pmu_type, "ultra3") ||
1260 	    !strcmp(sparc_pmu_type, "ultra3+") ||
1261 	    !strcmp(sparc_pmu_type, "ultra3i") ||
1262 	    !strcmp(sparc_pmu_type, "ultra4+")) {
1263 		sparc_pmu = &ultra3_pmu;
1264 		return true;
1265 	}
1266 	if (!strcmp(sparc_pmu_type, "niagara")) {
1267 		sparc_pmu = &niagara1_pmu;
1268 		return true;
1269 	}
1270 	if (!strcmp(sparc_pmu_type, "niagara2")) {
1271 		sparc_pmu = &niagara2_pmu;
1272 		return true;
1273 	}
1274 	return false;
1275 }
1276 
1277 void __init init_hw_perf_events(void)
1278 {
1279 	pr_info("Performance events: ");
1280 
1281 	if (!supported_pmu()) {
1282 		pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1283 		return;
1284 	}
1285 
1286 	pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1287 
1288 	/* All sparc64 PMUs currently have 2 events.  */
1289 	perf_max_events = 2;
1290 
1291 	register_die_notifier(&perf_event_nmi_notifier);
1292 }
1293 
1294 static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1295 {
1296 	if (entry->nr < PERF_MAX_STACK_DEPTH)
1297 		entry->ip[entry->nr++] = ip;
1298 }
1299 
1300 static void perf_callchain_kernel(struct pt_regs *regs,
1301 				  struct perf_callchain_entry *entry)
1302 {
1303 	unsigned long ksp, fp;
1304 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1305 	int graph = 0;
1306 #endif
1307 
1308 	callchain_store(entry, PERF_CONTEXT_KERNEL);
1309 	callchain_store(entry, regs->tpc);
1310 
1311 	ksp = regs->u_regs[UREG_I6];
1312 	fp = ksp + STACK_BIAS;
1313 	do {
1314 		struct sparc_stackf *sf;
1315 		struct pt_regs *regs;
1316 		unsigned long pc;
1317 
1318 		if (!kstack_valid(current_thread_info(), fp))
1319 			break;
1320 
1321 		sf = (struct sparc_stackf *) fp;
1322 		regs = (struct pt_regs *) (sf + 1);
1323 
1324 		if (kstack_is_trap_frame(current_thread_info(), regs)) {
1325 			if (user_mode(regs))
1326 				break;
1327 			pc = regs->tpc;
1328 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1329 		} else {
1330 			pc = sf->callers_pc;
1331 			fp = (unsigned long)sf->fp + STACK_BIAS;
1332 		}
1333 		callchain_store(entry, pc);
1334 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1336 			int index = current->curr_ret_stack;
1337 			if (current->ret_stack && index >= graph) {
1338 				pc = current->ret_stack[index - graph].ret;
1339 				callchain_store(entry, pc);
1340 				graph++;
1341 			}
1342 		}
1343 #endif
1344 	} while (entry->nr < PERF_MAX_STACK_DEPTH);
1345 }
1346 
1347 static void perf_callchain_user_64(struct pt_regs *regs,
1348 				   struct perf_callchain_entry *entry)
1349 {
1350 	unsigned long ufp;
1351 
1352 	callchain_store(entry, PERF_CONTEXT_USER);
1353 	callchain_store(entry, regs->tpc);
1354 
1355 	ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1356 	do {
1357 		struct sparc_stackf *usf, sf;
1358 		unsigned long pc;
1359 
1360 		usf = (struct sparc_stackf *) ufp;
1361 		if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1362 			break;
1363 
1364 		pc = sf.callers_pc;
1365 		ufp = (unsigned long)sf.fp + STACK_BIAS;
1366 		callchain_store(entry, pc);
1367 	} while (entry->nr < PERF_MAX_STACK_DEPTH);
1368 }
1369 
1370 static void perf_callchain_user_32(struct pt_regs *regs,
1371 				   struct perf_callchain_entry *entry)
1372 {
1373 	unsigned long ufp;
1374 
1375 	callchain_store(entry, PERF_CONTEXT_USER);
1376 	callchain_store(entry, regs->tpc);
1377 
1378 	ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1379 	do {
1380 		struct sparc_stackf32 *usf, sf;
1381 		unsigned long pc;
1382 
1383 		usf = (struct sparc_stackf32 *) ufp;
1384 		if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1385 			break;
1386 
1387 		pc = sf.callers_pc;
1388 		ufp = (unsigned long)sf.fp;
1389 		callchain_store(entry, pc);
1390 	} while (entry->nr < PERF_MAX_STACK_DEPTH);
1391 }
1392 
1393 /* Like powerpc we can't get PMU interrupts within the PMU handler,
1394  * so no need for separate NMI and IRQ chains as on x86.
1395  */
1396 static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1397 
1398 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1399 {
1400 	struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
1401 
1402 	entry->nr = 0;
1403 	if (!user_mode(regs)) {
1404 		stack_trace_flush();
1405 		perf_callchain_kernel(regs, entry);
1406 		if (current->mm)
1407 			regs = task_pt_regs(current);
1408 		else
1409 			regs = NULL;
1410 	}
1411 	if (regs) {
1412 		flushw_user();
1413 		if (test_thread_flag(TIF_32BIT))
1414 			perf_callchain_user_32(regs, entry);
1415 		else
1416 			perf_callchain_user_64(regs, entry);
1417 	}
1418 	return entry;
1419 }
1420