xref: /linux/arch/sh/kernel/perf_event.c (revision c473b2c6f6c6d012da98416b5de28cc48c4306c9)
1ac44e669SPaul Mundt /*
2ac44e669SPaul Mundt  * Performance event support framework for SuperH hardware counters.
3ac44e669SPaul Mundt  *
4ac44e669SPaul Mundt  *  Copyright (C) 2009  Paul Mundt
5ac44e669SPaul Mundt  *
6ac44e669SPaul Mundt  * Heavily based on the x86 and PowerPC implementations.
7ac44e669SPaul Mundt  *
8ac44e669SPaul Mundt  * x86:
9ac44e669SPaul Mundt  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
10ac44e669SPaul Mundt  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11ac44e669SPaul Mundt  *  Copyright (C) 2009 Jaswinder Singh Rajput
12ac44e669SPaul Mundt  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13ac44e669SPaul Mundt  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
14ac44e669SPaul Mundt  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15ac44e669SPaul Mundt  *
16ac44e669SPaul Mundt  * ppc:
17ac44e669SPaul Mundt  *  Copyright 2008-2009 Paul Mackerras, IBM Corporation.
18ac44e669SPaul Mundt  *
19ac44e669SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
20ac44e669SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
21ac44e669SPaul Mundt  * for more details.
22ac44e669SPaul Mundt  */
23ac44e669SPaul Mundt #include <linux/kernel.h>
24ac44e669SPaul Mundt #include <linux/init.h>
25ac44e669SPaul Mundt #include <linux/io.h>
26ac44e669SPaul Mundt #include <linux/irq.h>
27ac44e669SPaul Mundt #include <linux/perf_event.h>
28f7be3455SPaul Gortmaker #include <linux/export.h>
29ac44e669SPaul Mundt #include <asm/processor.h>
30ac44e669SPaul Mundt 
31ac44e669SPaul Mundt struct cpu_hw_events {
32ac44e669SPaul Mundt 	struct perf_event	*events[MAX_HWEVENTS];
33ac44e669SPaul Mundt 	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
34ac44e669SPaul Mundt 	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
35ac44e669SPaul Mundt };
36ac44e669SPaul Mundt 
37ac44e669SPaul Mundt DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
38ac44e669SPaul Mundt 
39ac44e669SPaul Mundt static struct sh_pmu *sh_pmu __read_mostly;
40ac44e669SPaul Mundt 
41ac44e669SPaul Mundt /* Number of perf_events counting hardware events */
42ac44e669SPaul Mundt static atomic_t num_events;
43ac44e669SPaul Mundt /* Used to avoid races in calling reserve/release_pmc_hardware */
44ac44e669SPaul Mundt static DEFINE_MUTEX(pmc_reserve_mutex);
45ac44e669SPaul Mundt 
46ac44e669SPaul Mundt /*
47ac44e669SPaul Mundt  * Stub these out for now, do something more profound later.
48ac44e669SPaul Mundt  */
49ac44e669SPaul Mundt int reserve_pmc_hardware(void)
50ac44e669SPaul Mundt {
51ac44e669SPaul Mundt 	return 0;
52ac44e669SPaul Mundt }
53ac44e669SPaul Mundt 
54ac44e669SPaul Mundt void release_pmc_hardware(void)
55ac44e669SPaul Mundt {
56ac44e669SPaul Mundt }
57ac44e669SPaul Mundt 
58ac44e669SPaul Mundt static inline int sh_pmu_initialized(void)
59ac44e669SPaul Mundt {
60ac44e669SPaul Mundt 	return !!sh_pmu;
61ac44e669SPaul Mundt }
62ac44e669SPaul Mundt 
6384c79910SMatt Fleming const char *perf_pmu_name(void)
6484c79910SMatt Fleming {
6584c79910SMatt Fleming 	if (!sh_pmu)
6684c79910SMatt Fleming 		return NULL;
6784c79910SMatt Fleming 
6884c79910SMatt Fleming 	return sh_pmu->name;
6984c79910SMatt Fleming }
7084c79910SMatt Fleming EXPORT_SYMBOL_GPL(perf_pmu_name);
7184c79910SMatt Fleming 
723bf101baSMatt Fleming int perf_num_counters(void)
733bf101baSMatt Fleming {
743bf101baSMatt Fleming 	if (!sh_pmu)
753bf101baSMatt Fleming 		return 0;
763bf101baSMatt Fleming 
773bf101baSMatt Fleming 	return sh_pmu->num_events;
783bf101baSMatt Fleming }
793bf101baSMatt Fleming EXPORT_SYMBOL_GPL(perf_num_counters);
803bf101baSMatt Fleming 
81ac44e669SPaul Mundt /*
82ac44e669SPaul Mundt  * Release the PMU if this is the last perf_event.
83ac44e669SPaul Mundt  */
84ac44e669SPaul Mundt static void hw_perf_event_destroy(struct perf_event *event)
85ac44e669SPaul Mundt {
86ac44e669SPaul Mundt 	if (!atomic_add_unless(&num_events, -1, 1)) {
87ac44e669SPaul Mundt 		mutex_lock(&pmc_reserve_mutex);
88ac44e669SPaul Mundt 		if (atomic_dec_return(&num_events) == 0)
89ac44e669SPaul Mundt 			release_pmc_hardware();
90ac44e669SPaul Mundt 		mutex_unlock(&pmc_reserve_mutex);
91ac44e669SPaul Mundt 	}
92ac44e669SPaul Mundt }
93ac44e669SPaul Mundt 
94ac44e669SPaul Mundt static int hw_perf_cache_event(int config, int *evp)
95ac44e669SPaul Mundt {
96ac44e669SPaul Mundt 	unsigned long type, op, result;
97ac44e669SPaul Mundt 	int ev;
98ac44e669SPaul Mundt 
99ac44e669SPaul Mundt 	if (!sh_pmu->cache_events)
100ac44e669SPaul Mundt 		return -EINVAL;
101ac44e669SPaul Mundt 
102ac44e669SPaul Mundt 	/* unpack config */
103ac44e669SPaul Mundt 	type = config & 0xff;
104ac44e669SPaul Mundt 	op = (config >> 8) & 0xff;
105ac44e669SPaul Mundt 	result = (config >> 16) & 0xff;
106ac44e669SPaul Mundt 
107ac44e669SPaul Mundt 	if (type >= PERF_COUNT_HW_CACHE_MAX ||
108ac44e669SPaul Mundt 	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
109ac44e669SPaul Mundt 	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
110ac44e669SPaul Mundt 		return -EINVAL;
111ac44e669SPaul Mundt 
112ac44e669SPaul Mundt 	ev = (*sh_pmu->cache_events)[type][op][result];
113ac44e669SPaul Mundt 	if (ev == 0)
114ac44e669SPaul Mundt 		return -EOPNOTSUPP;
115ac44e669SPaul Mundt 	if (ev == -1)
116ac44e669SPaul Mundt 		return -EINVAL;
117ac44e669SPaul Mundt 	*evp = ev;
118ac44e669SPaul Mundt 	return 0;
119ac44e669SPaul Mundt }
120ac44e669SPaul Mundt 
121ac44e669SPaul Mundt static int __hw_perf_event_init(struct perf_event *event)
122ac44e669SPaul Mundt {
123ac44e669SPaul Mundt 	struct perf_event_attr *attr = &event->attr;
124ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
1258820002cSPaul Mundt 	int config = -1;
126ac44e669SPaul Mundt 	int err;
127ac44e669SPaul Mundt 
128ac44e669SPaul Mundt 	if (!sh_pmu_initialized())
129ac44e669SPaul Mundt 		return -ENODEV;
130ac44e669SPaul Mundt 
131ac44e669SPaul Mundt 	/*
132ac44e669SPaul Mundt 	 * All of the on-chip counters are "limited", in that they have
133ac44e669SPaul Mundt 	 * no interrupts, and are therefore unable to do sampling without
134ac44e669SPaul Mundt 	 * further work and timer assistance.
135ac44e669SPaul Mundt 	 */
136ac44e669SPaul Mundt 	if (hwc->sample_period)
137ac44e669SPaul Mundt 		return -EINVAL;
138ac44e669SPaul Mundt 
139ac44e669SPaul Mundt 	/*
140ac44e669SPaul Mundt 	 * See if we need to reserve the counter.
141ac44e669SPaul Mundt 	 *
142ac44e669SPaul Mundt 	 * If no events are currently in use, then we have to take a
143ac44e669SPaul Mundt 	 * mutex to ensure that we don't race with another task doing
144ac44e669SPaul Mundt 	 * reserve_pmc_hardware or release_pmc_hardware.
145ac44e669SPaul Mundt 	 */
146ac44e669SPaul Mundt 	err = 0;
147ac44e669SPaul Mundt 	if (!atomic_inc_not_zero(&num_events)) {
148ac44e669SPaul Mundt 		mutex_lock(&pmc_reserve_mutex);
149ac44e669SPaul Mundt 		if (atomic_read(&num_events) == 0 &&
150ac44e669SPaul Mundt 		    reserve_pmc_hardware())
151ac44e669SPaul Mundt 			err = -EBUSY;
152ac44e669SPaul Mundt 		else
153ac44e669SPaul Mundt 			atomic_inc(&num_events);
154ac44e669SPaul Mundt 		mutex_unlock(&pmc_reserve_mutex);
155ac44e669SPaul Mundt 	}
156ac44e669SPaul Mundt 
157ac44e669SPaul Mundt 	if (err)
158ac44e669SPaul Mundt 		return err;
159ac44e669SPaul Mundt 
160ac44e669SPaul Mundt 	event->destroy = hw_perf_event_destroy;
161ac44e669SPaul Mundt 
162ac44e669SPaul Mundt 	switch (attr->type) {
163ac44e669SPaul Mundt 	case PERF_TYPE_RAW:
164ac44e669SPaul Mundt 		config = attr->config & sh_pmu->raw_event_mask;
165ac44e669SPaul Mundt 		break;
166ac44e669SPaul Mundt 	case PERF_TYPE_HW_CACHE:
167ac44e669SPaul Mundt 		err = hw_perf_cache_event(attr->config, &config);
168ac44e669SPaul Mundt 		if (err)
169ac44e669SPaul Mundt 			return err;
170ac44e669SPaul Mundt 		break;
171ac44e669SPaul Mundt 	case PERF_TYPE_HARDWARE:
172ac44e669SPaul Mundt 		if (attr->config >= sh_pmu->max_events)
173ac44e669SPaul Mundt 			return -EINVAL;
174ac44e669SPaul Mundt 
175ac44e669SPaul Mundt 		config = sh_pmu->event_map(attr->config);
176ac44e669SPaul Mundt 		break;
177ac44e669SPaul Mundt 	}
178ac44e669SPaul Mundt 
179ac44e669SPaul Mundt 	if (config == -1)
180ac44e669SPaul Mundt 		return -EINVAL;
181ac44e669SPaul Mundt 
182ac44e669SPaul Mundt 	hwc->config |= config;
183ac44e669SPaul Mundt 
184ac44e669SPaul Mundt 	return 0;
185ac44e669SPaul Mundt }
186ac44e669SPaul Mundt 
187ac44e669SPaul Mundt static void sh_perf_event_update(struct perf_event *event,
188ac44e669SPaul Mundt 				   struct hw_perf_event *hwc, int idx)
189ac44e669SPaul Mundt {
190ac44e669SPaul Mundt 	u64 prev_raw_count, new_raw_count;
191ac44e669SPaul Mundt 	s64 delta;
192ac44e669SPaul Mundt 	int shift = 0;
193ac44e669SPaul Mundt 
194ac44e669SPaul Mundt 	/*
195ac44e669SPaul Mundt 	 * Depending on the counter configuration, they may or may not
196ac44e669SPaul Mundt 	 * be chained, in which case the previous counter value can be
197ac44e669SPaul Mundt 	 * updated underneath us if the lower-half overflows.
198ac44e669SPaul Mundt 	 *
199ac44e669SPaul Mundt 	 * Our tactic to handle this is to first atomically read and
200ac44e669SPaul Mundt 	 * exchange a new raw count - then add that new-prev delta
201ac44e669SPaul Mundt 	 * count to the generic counter atomically.
202ac44e669SPaul Mundt 	 *
203ac44e669SPaul Mundt 	 * As there is no interrupt associated with the overflow events,
204ac44e669SPaul Mundt 	 * this is the simplest approach for maintaining consistency.
205ac44e669SPaul Mundt 	 */
206ac44e669SPaul Mundt again:
207e7850595SPeter Zijlstra 	prev_raw_count = local64_read(&hwc->prev_count);
208ac44e669SPaul Mundt 	new_raw_count = sh_pmu->read(idx);
209ac44e669SPaul Mundt 
210e7850595SPeter Zijlstra 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
211ac44e669SPaul Mundt 			     new_raw_count) != prev_raw_count)
212ac44e669SPaul Mundt 		goto again;
213ac44e669SPaul Mundt 
214ac44e669SPaul Mundt 	/*
215ac44e669SPaul Mundt 	 * Now we have the new raw value and have updated the prev
216ac44e669SPaul Mundt 	 * timestamp already. We can now calculate the elapsed delta
217ac44e669SPaul Mundt 	 * (counter-)time and add that to the generic counter.
218ac44e669SPaul Mundt 	 *
219ac44e669SPaul Mundt 	 * Careful, not all hw sign-extends above the physical width
220ac44e669SPaul Mundt 	 * of the count.
221ac44e669SPaul Mundt 	 */
222ac44e669SPaul Mundt 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
223ac44e669SPaul Mundt 	delta >>= shift;
224ac44e669SPaul Mundt 
225e7850595SPeter Zijlstra 	local64_add(delta, &event->count);
226ac44e669SPaul Mundt }
227ac44e669SPaul Mundt 
228a4eaf7f1SPeter Zijlstra static void sh_pmu_stop(struct perf_event *event, int flags)
229ac44e669SPaul Mundt {
230*c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
231ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
232ac44e669SPaul Mundt 	int idx = hwc->idx;
233ac44e669SPaul Mundt 
234a4eaf7f1SPeter Zijlstra 	if (!(event->hw.state & PERF_HES_STOPPED)) {
235ac44e669SPaul Mundt 		sh_pmu->disable(hwc, idx);
236ac44e669SPaul Mundt 		cpuc->events[idx] = NULL;
237a4eaf7f1SPeter Zijlstra 		event->hw.state |= PERF_HES_STOPPED;
238a4eaf7f1SPeter Zijlstra 	}
239a4eaf7f1SPeter Zijlstra 
240a4eaf7f1SPeter Zijlstra 	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
241a4eaf7f1SPeter Zijlstra 		sh_perf_event_update(event, &event->hw, idx);
242a4eaf7f1SPeter Zijlstra 		event->hw.state |= PERF_HES_UPTODATE;
243a4eaf7f1SPeter Zijlstra 	}
244a4eaf7f1SPeter Zijlstra }
245a4eaf7f1SPeter Zijlstra 
246a4eaf7f1SPeter Zijlstra static void sh_pmu_start(struct perf_event *event, int flags)
247a4eaf7f1SPeter Zijlstra {
248*c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
249a4eaf7f1SPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
250a4eaf7f1SPeter Zijlstra 	int idx = hwc->idx;
251a4eaf7f1SPeter Zijlstra 
252a4eaf7f1SPeter Zijlstra 	if (WARN_ON_ONCE(idx == -1))
253a4eaf7f1SPeter Zijlstra 		return;
254a4eaf7f1SPeter Zijlstra 
255a4eaf7f1SPeter Zijlstra 	if (flags & PERF_EF_RELOAD)
256a4eaf7f1SPeter Zijlstra 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
257a4eaf7f1SPeter Zijlstra 
258a4eaf7f1SPeter Zijlstra 	cpuc->events[idx] = event;
259a4eaf7f1SPeter Zijlstra 	event->hw.state = 0;
260a4eaf7f1SPeter Zijlstra 	sh_pmu->enable(hwc, idx);
261a4eaf7f1SPeter Zijlstra }
262a4eaf7f1SPeter Zijlstra 
263a4eaf7f1SPeter Zijlstra static void sh_pmu_del(struct perf_event *event, int flags)
264a4eaf7f1SPeter Zijlstra {
265*c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
266a4eaf7f1SPeter Zijlstra 
267a4eaf7f1SPeter Zijlstra 	sh_pmu_stop(event, PERF_EF_UPDATE);
268a4eaf7f1SPeter Zijlstra 	__clear_bit(event->hw.idx, cpuc->used_mask);
269ac44e669SPaul Mundt 
270ac44e669SPaul Mundt 	perf_event_update_userpage(event);
271ac44e669SPaul Mundt }
272ac44e669SPaul Mundt 
273a4eaf7f1SPeter Zijlstra static int sh_pmu_add(struct perf_event *event, int flags)
274ac44e669SPaul Mundt {
275*c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
276ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
277ac44e669SPaul Mundt 	int idx = hwc->idx;
27824cd7f54SPeter Zijlstra 	int ret = -EAGAIN;
279ac44e669SPaul Mundt 
28033696fc0SPeter Zijlstra 	perf_pmu_disable(event->pmu);
281ac44e669SPaul Mundt 
282a4eaf7f1SPeter Zijlstra 	if (__test_and_set_bit(idx, cpuc->used_mask)) {
283ac44e669SPaul Mundt 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
284ac44e669SPaul Mundt 		if (idx == sh_pmu->num_events)
28524cd7f54SPeter Zijlstra 			goto out;
286ac44e669SPaul Mundt 
287a4eaf7f1SPeter Zijlstra 		__set_bit(idx, cpuc->used_mask);
288ac44e669SPaul Mundt 		hwc->idx = idx;
289ac44e669SPaul Mundt 	}
290ac44e669SPaul Mundt 
291ac44e669SPaul Mundt 	sh_pmu->disable(hwc, idx);
292ac44e669SPaul Mundt 
293a4eaf7f1SPeter Zijlstra 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
294a4eaf7f1SPeter Zijlstra 	if (flags & PERF_EF_START)
295a4eaf7f1SPeter Zijlstra 		sh_pmu_start(event, PERF_EF_RELOAD);
296ac44e669SPaul Mundt 
297ac44e669SPaul Mundt 	perf_event_update_userpage(event);
29824cd7f54SPeter Zijlstra 	ret = 0;
29924cd7f54SPeter Zijlstra out:
30033696fc0SPeter Zijlstra 	perf_pmu_enable(event->pmu);
30124cd7f54SPeter Zijlstra 	return ret;
302ac44e669SPaul Mundt }
303ac44e669SPaul Mundt 
304ac44e669SPaul Mundt static void sh_pmu_read(struct perf_event *event)
305ac44e669SPaul Mundt {
306ac44e669SPaul Mundt 	sh_perf_event_update(event, &event->hw, event->hw.idx);
307ac44e669SPaul Mundt }
308ac44e669SPaul Mundt 
309b0a873ebSPeter Zijlstra static int sh_pmu_event_init(struct perf_event *event)
310ac44e669SPaul Mundt {
311b0a873ebSPeter Zijlstra 	int err;
312b0a873ebSPeter Zijlstra 
3132481c5faSStephane Eranian 	/* does not support taken branch sampling */
3142481c5faSStephane Eranian 	if (has_branch_stack(event))
3152481c5faSStephane Eranian 		return -EOPNOTSUPP;
3162481c5faSStephane Eranian 
317b0a873ebSPeter Zijlstra 	switch (event->attr.type) {
318b0a873ebSPeter Zijlstra 	case PERF_TYPE_RAW:
319b0a873ebSPeter Zijlstra 	case PERF_TYPE_HW_CACHE:
320b0a873ebSPeter Zijlstra 	case PERF_TYPE_HARDWARE:
321b0a873ebSPeter Zijlstra 		err = __hw_perf_event_init(event);
322b0a873ebSPeter Zijlstra 		break;
323b0a873ebSPeter Zijlstra 
324b0a873ebSPeter Zijlstra 	default:
325b0a873ebSPeter Zijlstra 		return -ENOENT;
326b0a873ebSPeter Zijlstra 	}
327b0a873ebSPeter Zijlstra 
328ac44e669SPaul Mundt 	if (unlikely(err)) {
329ac44e669SPaul Mundt 		if (event->destroy)
330ac44e669SPaul Mundt 			event->destroy(event);
331ac44e669SPaul Mundt 	}
332ac44e669SPaul Mundt 
333b0a873ebSPeter Zijlstra 	return err;
334ac44e669SPaul Mundt }
335ac44e669SPaul Mundt 
336a4eaf7f1SPeter Zijlstra static void sh_pmu_enable(struct pmu *pmu)
33733696fc0SPeter Zijlstra {
33833696fc0SPeter Zijlstra 	if (!sh_pmu_initialized())
33933696fc0SPeter Zijlstra 		return;
34033696fc0SPeter Zijlstra 
34133696fc0SPeter Zijlstra 	sh_pmu->enable_all();
34233696fc0SPeter Zijlstra }
34333696fc0SPeter Zijlstra 
344a4eaf7f1SPeter Zijlstra static void sh_pmu_disable(struct pmu *pmu)
34533696fc0SPeter Zijlstra {
34633696fc0SPeter Zijlstra 	if (!sh_pmu_initialized())
34733696fc0SPeter Zijlstra 		return;
34833696fc0SPeter Zijlstra 
34933696fc0SPeter Zijlstra 	sh_pmu->disable_all();
35033696fc0SPeter Zijlstra }
35133696fc0SPeter Zijlstra 
35251b0fe39SPeter Zijlstra static struct pmu pmu = {
353a4eaf7f1SPeter Zijlstra 	.pmu_enable	= sh_pmu_enable,
354a4eaf7f1SPeter Zijlstra 	.pmu_disable	= sh_pmu_disable,
355b0a873ebSPeter Zijlstra 	.event_init	= sh_pmu_event_init,
356a4eaf7f1SPeter Zijlstra 	.add		= sh_pmu_add,
357a4eaf7f1SPeter Zijlstra 	.del		= sh_pmu_del,
358a4eaf7f1SPeter Zijlstra 	.start		= sh_pmu_start,
359a4eaf7f1SPeter Zijlstra 	.stop		= sh_pmu_stop,
360ac44e669SPaul Mundt 	.read		= sh_pmu_read,
361ac44e669SPaul Mundt };
362ac44e669SPaul Mundt 
3633f6da390SPeter Zijlstra static void sh_pmu_setup(int cpu)
364ac44e669SPaul Mundt {
365ac44e669SPaul Mundt 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
366ac44e669SPaul Mundt 
367ac44e669SPaul Mundt 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
368ac44e669SPaul Mundt }
369ac44e669SPaul Mundt 
3704603f53aSPaul Gortmaker static int
3713f6da390SPeter Zijlstra sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
3723f6da390SPeter Zijlstra {
3733f6da390SPeter Zijlstra 	unsigned int cpu = (long)hcpu;
3743f6da390SPeter Zijlstra 
3753f6da390SPeter Zijlstra 	switch (action & ~CPU_TASKS_FROZEN) {
3763f6da390SPeter Zijlstra 	case CPU_UP_PREPARE:
3773f6da390SPeter Zijlstra 		sh_pmu_setup(cpu);
3783f6da390SPeter Zijlstra 		break;
3793f6da390SPeter Zijlstra 
3803f6da390SPeter Zijlstra 	default:
3813f6da390SPeter Zijlstra 		break;
3823f6da390SPeter Zijlstra 	}
3833f6da390SPeter Zijlstra 
3843f6da390SPeter Zijlstra 	return NOTIFY_OK;
3853f6da390SPeter Zijlstra }
3863f6da390SPeter Zijlstra 
3874603f53aSPaul Gortmaker int register_sh_pmu(struct sh_pmu *_pmu)
388ac44e669SPaul Mundt {
389ac44e669SPaul Mundt 	if (sh_pmu)
390ac44e669SPaul Mundt 		return -EBUSY;
391a4eaf7f1SPeter Zijlstra 	sh_pmu = _pmu;
392ac44e669SPaul Mundt 
393a4eaf7f1SPeter Zijlstra 	pr_info("Performance Events: %s support registered\n", _pmu->name);
394ac44e669SPaul Mundt 
395a4eaf7f1SPeter Zijlstra 	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
396ac44e669SPaul Mundt 
3972e80a82aSPeter Zijlstra 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
3983f6da390SPeter Zijlstra 	perf_cpu_notifier(sh_pmu_notifier);
399ac44e669SPaul Mundt 	return 0;
400ac44e669SPaul Mundt }
401