xref: /linux/arch/sh/kernel/perf_event.c (revision 8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17)
1*5933f6d2SKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
2ac44e669SPaul Mundt /*
3ac44e669SPaul Mundt  * Performance event support framework for SuperH hardware counters.
4ac44e669SPaul Mundt  *
5ac44e669SPaul Mundt  *  Copyright (C) 2009  Paul Mundt
6ac44e669SPaul Mundt  *
7ac44e669SPaul Mundt  * Heavily based on the x86 and PowerPC implementations.
8ac44e669SPaul Mundt  *
9ac44e669SPaul Mundt  * x86:
10ac44e669SPaul Mundt  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
11ac44e669SPaul Mundt  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
12ac44e669SPaul Mundt  *  Copyright (C) 2009 Jaswinder Singh Rajput
13ac44e669SPaul Mundt  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
1490eec103SPeter Zijlstra  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
15ac44e669SPaul Mundt  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
16ac44e669SPaul Mundt  *
17ac44e669SPaul Mundt  * ppc:
18ac44e669SPaul Mundt  *  Copyright 2008-2009 Paul Mackerras, IBM Corporation.
19ac44e669SPaul Mundt  */
20ac44e669SPaul Mundt #include <linux/kernel.h>
21ac44e669SPaul Mundt #include <linux/init.h>
22ac44e669SPaul Mundt #include <linux/io.h>
23ac44e669SPaul Mundt #include <linux/irq.h>
24ac44e669SPaul Mundt #include <linux/perf_event.h>
25f7be3455SPaul Gortmaker #include <linux/export.h>
26ac44e669SPaul Mundt #include <asm/processor.h>
27ac44e669SPaul Mundt 
28ac44e669SPaul Mundt struct cpu_hw_events {
29ac44e669SPaul Mundt 	struct perf_event	*events[MAX_HWEVENTS];
30ac44e669SPaul Mundt 	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
31ac44e669SPaul Mundt 	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
32ac44e669SPaul Mundt };
33ac44e669SPaul Mundt 
34ac44e669SPaul Mundt DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
35ac44e669SPaul Mundt 
36ac44e669SPaul Mundt static struct sh_pmu *sh_pmu __read_mostly;
37ac44e669SPaul Mundt 
38ac44e669SPaul Mundt /* Number of perf_events counting hardware events */
39ac44e669SPaul Mundt static atomic_t num_events;
40ac44e669SPaul Mundt /* Used to avoid races in calling reserve/release_pmc_hardware */
41ac44e669SPaul Mundt static DEFINE_MUTEX(pmc_reserve_mutex);
42ac44e669SPaul Mundt 
43ac44e669SPaul Mundt /*
44ac44e669SPaul Mundt  * Stub these out for now, do something more profound later.
45ac44e669SPaul Mundt  */
reserve_pmc_hardware(void)46ac44e669SPaul Mundt int reserve_pmc_hardware(void)
47ac44e669SPaul Mundt {
48ac44e669SPaul Mundt 	return 0;
49ac44e669SPaul Mundt }
50ac44e669SPaul Mundt 
release_pmc_hardware(void)51ac44e669SPaul Mundt void release_pmc_hardware(void)
52ac44e669SPaul Mundt {
53ac44e669SPaul Mundt }
54ac44e669SPaul Mundt 
sh_pmu_initialized(void)55ac44e669SPaul Mundt static inline int sh_pmu_initialized(void)
56ac44e669SPaul Mundt {
57ac44e669SPaul Mundt 	return !!sh_pmu;
58ac44e669SPaul Mundt }
59ac44e669SPaul Mundt 
60ac44e669SPaul Mundt /*
61ac44e669SPaul Mundt  * Release the PMU if this is the last perf_event.
62ac44e669SPaul Mundt  */
hw_perf_event_destroy(struct perf_event * event)63ac44e669SPaul Mundt static void hw_perf_event_destroy(struct perf_event *event)
64ac44e669SPaul Mundt {
65ac44e669SPaul Mundt 	if (!atomic_add_unless(&num_events, -1, 1)) {
66ac44e669SPaul Mundt 		mutex_lock(&pmc_reserve_mutex);
67ac44e669SPaul Mundt 		if (atomic_dec_return(&num_events) == 0)
68ac44e669SPaul Mundt 			release_pmc_hardware();
69ac44e669SPaul Mundt 		mutex_unlock(&pmc_reserve_mutex);
70ac44e669SPaul Mundt 	}
71ac44e669SPaul Mundt }
72ac44e669SPaul Mundt 
hw_perf_cache_event(int config,int * evp)73ac44e669SPaul Mundt static int hw_perf_cache_event(int config, int *evp)
74ac44e669SPaul Mundt {
75ac44e669SPaul Mundt 	unsigned long type, op, result;
76ac44e669SPaul Mundt 	int ev;
77ac44e669SPaul Mundt 
78ac44e669SPaul Mundt 	if (!sh_pmu->cache_events)
79ac44e669SPaul Mundt 		return -EINVAL;
80ac44e669SPaul Mundt 
81ac44e669SPaul Mundt 	/* unpack config */
82ac44e669SPaul Mundt 	type = config & 0xff;
83ac44e669SPaul Mundt 	op = (config >> 8) & 0xff;
84ac44e669SPaul Mundt 	result = (config >> 16) & 0xff;
85ac44e669SPaul Mundt 
86ac44e669SPaul Mundt 	if (type >= PERF_COUNT_HW_CACHE_MAX ||
87ac44e669SPaul Mundt 	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
88ac44e669SPaul Mundt 	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
89ac44e669SPaul Mundt 		return -EINVAL;
90ac44e669SPaul Mundt 
91ac44e669SPaul Mundt 	ev = (*sh_pmu->cache_events)[type][op][result];
92ac44e669SPaul Mundt 	if (ev == 0)
93ac44e669SPaul Mundt 		return -EOPNOTSUPP;
94ac44e669SPaul Mundt 	if (ev == -1)
95ac44e669SPaul Mundt 		return -EINVAL;
96ac44e669SPaul Mundt 	*evp = ev;
97ac44e669SPaul Mundt 	return 0;
98ac44e669SPaul Mundt }
99ac44e669SPaul Mundt 
__hw_perf_event_init(struct perf_event * event)100ac44e669SPaul Mundt static int __hw_perf_event_init(struct perf_event *event)
101ac44e669SPaul Mundt {
102ac44e669SPaul Mundt 	struct perf_event_attr *attr = &event->attr;
103ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
1048820002cSPaul Mundt 	int config = -1;
105ac44e669SPaul Mundt 	int err;
106ac44e669SPaul Mundt 
107ac44e669SPaul Mundt 	if (!sh_pmu_initialized())
108ac44e669SPaul Mundt 		return -ENODEV;
109ac44e669SPaul Mundt 
110ac44e669SPaul Mundt 	/*
111ac44e669SPaul Mundt 	 * See if we need to reserve the counter.
112ac44e669SPaul Mundt 	 *
113ac44e669SPaul Mundt 	 * If no events are currently in use, then we have to take a
114ac44e669SPaul Mundt 	 * mutex to ensure that we don't race with another task doing
115ac44e669SPaul Mundt 	 * reserve_pmc_hardware or release_pmc_hardware.
116ac44e669SPaul Mundt 	 */
117ac44e669SPaul Mundt 	err = 0;
118ac44e669SPaul Mundt 	if (!atomic_inc_not_zero(&num_events)) {
119ac44e669SPaul Mundt 		mutex_lock(&pmc_reserve_mutex);
120ac44e669SPaul Mundt 		if (atomic_read(&num_events) == 0 &&
121ac44e669SPaul Mundt 		    reserve_pmc_hardware())
122ac44e669SPaul Mundt 			err = -EBUSY;
123ac44e669SPaul Mundt 		else
124ac44e669SPaul Mundt 			atomic_inc(&num_events);
125ac44e669SPaul Mundt 		mutex_unlock(&pmc_reserve_mutex);
126ac44e669SPaul Mundt 	}
127ac44e669SPaul Mundt 
128ac44e669SPaul Mundt 	if (err)
129ac44e669SPaul Mundt 		return err;
130ac44e669SPaul Mundt 
131ac44e669SPaul Mundt 	event->destroy = hw_perf_event_destroy;
132ac44e669SPaul Mundt 
133ac44e669SPaul Mundt 	switch (attr->type) {
134ac44e669SPaul Mundt 	case PERF_TYPE_RAW:
135ac44e669SPaul Mundt 		config = attr->config & sh_pmu->raw_event_mask;
136ac44e669SPaul Mundt 		break;
137ac44e669SPaul Mundt 	case PERF_TYPE_HW_CACHE:
138ac44e669SPaul Mundt 		err = hw_perf_cache_event(attr->config, &config);
139ac44e669SPaul Mundt 		if (err)
140ac44e669SPaul Mundt 			return err;
141ac44e669SPaul Mundt 		break;
142ac44e669SPaul Mundt 	case PERF_TYPE_HARDWARE:
143ac44e669SPaul Mundt 		if (attr->config >= sh_pmu->max_events)
144ac44e669SPaul Mundt 			return -EINVAL;
145ac44e669SPaul Mundt 
146ac44e669SPaul Mundt 		config = sh_pmu->event_map(attr->config);
147ac44e669SPaul Mundt 		break;
148ac44e669SPaul Mundt 	}
149ac44e669SPaul Mundt 
150ac44e669SPaul Mundt 	if (config == -1)
151ac44e669SPaul Mundt 		return -EINVAL;
152ac44e669SPaul Mundt 
153ac44e669SPaul Mundt 	hwc->config |= config;
154ac44e669SPaul Mundt 
155ac44e669SPaul Mundt 	return 0;
156ac44e669SPaul Mundt }
157ac44e669SPaul Mundt 
sh_perf_event_update(struct perf_event * event,struct hw_perf_event * hwc,int idx)158ac44e669SPaul Mundt static void sh_perf_event_update(struct perf_event *event,
159ac44e669SPaul Mundt 				   struct hw_perf_event *hwc, int idx)
160ac44e669SPaul Mundt {
161ac44e669SPaul Mundt 	u64 prev_raw_count, new_raw_count;
162ac44e669SPaul Mundt 	s64 delta;
163ac44e669SPaul Mundt 	int shift = 0;
164ac44e669SPaul Mundt 
165ac44e669SPaul Mundt 	/*
166ac44e669SPaul Mundt 	 * Depending on the counter configuration, they may or may not
167ac44e669SPaul Mundt 	 * be chained, in which case the previous counter value can be
168ac44e669SPaul Mundt 	 * updated underneath us if the lower-half overflows.
169ac44e669SPaul Mundt 	 *
170ac44e669SPaul Mundt 	 * Our tactic to handle this is to first atomically read and
171ac44e669SPaul Mundt 	 * exchange a new raw count - then add that new-prev delta
172ac44e669SPaul Mundt 	 * count to the generic counter atomically.
173ac44e669SPaul Mundt 	 *
174ac44e669SPaul Mundt 	 * As there is no interrupt associated with the overflow events,
175ac44e669SPaul Mundt 	 * this is the simplest approach for maintaining consistency.
176ac44e669SPaul Mundt 	 */
177ac44e669SPaul Mundt again:
178e7850595SPeter Zijlstra 	prev_raw_count = local64_read(&hwc->prev_count);
179ac44e669SPaul Mundt 	new_raw_count = sh_pmu->read(idx);
180ac44e669SPaul Mundt 
181e7850595SPeter Zijlstra 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
182ac44e669SPaul Mundt 			     new_raw_count) != prev_raw_count)
183ac44e669SPaul Mundt 		goto again;
184ac44e669SPaul Mundt 
185ac44e669SPaul Mundt 	/*
186ac44e669SPaul Mundt 	 * Now we have the new raw value and have updated the prev
187ac44e669SPaul Mundt 	 * timestamp already. We can now calculate the elapsed delta
188ac44e669SPaul Mundt 	 * (counter-)time and add that to the generic counter.
189ac44e669SPaul Mundt 	 *
190ac44e669SPaul Mundt 	 * Careful, not all hw sign-extends above the physical width
191ac44e669SPaul Mundt 	 * of the count.
192ac44e669SPaul Mundt 	 */
193ac44e669SPaul Mundt 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
194ac44e669SPaul Mundt 	delta >>= shift;
195ac44e669SPaul Mundt 
196e7850595SPeter Zijlstra 	local64_add(delta, &event->count);
197ac44e669SPaul Mundt }
198ac44e669SPaul Mundt 
sh_pmu_stop(struct perf_event * event,int flags)199a4eaf7f1SPeter Zijlstra static void sh_pmu_stop(struct perf_event *event, int flags)
200ac44e669SPaul Mundt {
201c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
202ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
203ac44e669SPaul Mundt 	int idx = hwc->idx;
204ac44e669SPaul Mundt 
205a4eaf7f1SPeter Zijlstra 	if (!(event->hw.state & PERF_HES_STOPPED)) {
206ac44e669SPaul Mundt 		sh_pmu->disable(hwc, idx);
207ac44e669SPaul Mundt 		cpuc->events[idx] = NULL;
208a4eaf7f1SPeter Zijlstra 		event->hw.state |= PERF_HES_STOPPED;
209a4eaf7f1SPeter Zijlstra 	}
210a4eaf7f1SPeter Zijlstra 
211a4eaf7f1SPeter Zijlstra 	if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
212a4eaf7f1SPeter Zijlstra 		sh_perf_event_update(event, &event->hw, idx);
213a4eaf7f1SPeter Zijlstra 		event->hw.state |= PERF_HES_UPTODATE;
214a4eaf7f1SPeter Zijlstra 	}
215a4eaf7f1SPeter Zijlstra }
216a4eaf7f1SPeter Zijlstra 
sh_pmu_start(struct perf_event * event,int flags)217a4eaf7f1SPeter Zijlstra static void sh_pmu_start(struct perf_event *event, int flags)
218a4eaf7f1SPeter Zijlstra {
219c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
220a4eaf7f1SPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
221a4eaf7f1SPeter Zijlstra 	int idx = hwc->idx;
222a4eaf7f1SPeter Zijlstra 
223a4eaf7f1SPeter Zijlstra 	if (WARN_ON_ONCE(idx == -1))
224a4eaf7f1SPeter Zijlstra 		return;
225a4eaf7f1SPeter Zijlstra 
226a4eaf7f1SPeter Zijlstra 	if (flags & PERF_EF_RELOAD)
227a4eaf7f1SPeter Zijlstra 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
228a4eaf7f1SPeter Zijlstra 
229a4eaf7f1SPeter Zijlstra 	cpuc->events[idx] = event;
230a4eaf7f1SPeter Zijlstra 	event->hw.state = 0;
231a4eaf7f1SPeter Zijlstra 	sh_pmu->enable(hwc, idx);
232a4eaf7f1SPeter Zijlstra }
233a4eaf7f1SPeter Zijlstra 
sh_pmu_del(struct perf_event * event,int flags)234a4eaf7f1SPeter Zijlstra static void sh_pmu_del(struct perf_event *event, int flags)
235a4eaf7f1SPeter Zijlstra {
236c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
237a4eaf7f1SPeter Zijlstra 
238a4eaf7f1SPeter Zijlstra 	sh_pmu_stop(event, PERF_EF_UPDATE);
239a4eaf7f1SPeter Zijlstra 	__clear_bit(event->hw.idx, cpuc->used_mask);
240ac44e669SPaul Mundt 
241ac44e669SPaul Mundt 	perf_event_update_userpage(event);
242ac44e669SPaul Mundt }
243ac44e669SPaul Mundt 
sh_pmu_add(struct perf_event * event,int flags)244a4eaf7f1SPeter Zijlstra static int sh_pmu_add(struct perf_event *event, int flags)
245ac44e669SPaul Mundt {
246c473b2c6SChristoph Lameter 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
247ac44e669SPaul Mundt 	struct hw_perf_event *hwc = &event->hw;
248ac44e669SPaul Mundt 	int idx = hwc->idx;
24924cd7f54SPeter Zijlstra 	int ret = -EAGAIN;
250ac44e669SPaul Mundt 
25133696fc0SPeter Zijlstra 	perf_pmu_disable(event->pmu);
252ac44e669SPaul Mundt 
253a4eaf7f1SPeter Zijlstra 	if (__test_and_set_bit(idx, cpuc->used_mask)) {
254ac44e669SPaul Mundt 		idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
255ac44e669SPaul Mundt 		if (idx == sh_pmu->num_events)
25624cd7f54SPeter Zijlstra 			goto out;
257ac44e669SPaul Mundt 
258a4eaf7f1SPeter Zijlstra 		__set_bit(idx, cpuc->used_mask);
259ac44e669SPaul Mundt 		hwc->idx = idx;
260ac44e669SPaul Mundt 	}
261ac44e669SPaul Mundt 
262ac44e669SPaul Mundt 	sh_pmu->disable(hwc, idx);
263ac44e669SPaul Mundt 
264a4eaf7f1SPeter Zijlstra 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
265a4eaf7f1SPeter Zijlstra 	if (flags & PERF_EF_START)
266a4eaf7f1SPeter Zijlstra 		sh_pmu_start(event, PERF_EF_RELOAD);
267ac44e669SPaul Mundt 
268ac44e669SPaul Mundt 	perf_event_update_userpage(event);
26924cd7f54SPeter Zijlstra 	ret = 0;
27024cd7f54SPeter Zijlstra out:
27133696fc0SPeter Zijlstra 	perf_pmu_enable(event->pmu);
27224cd7f54SPeter Zijlstra 	return ret;
273ac44e669SPaul Mundt }
274ac44e669SPaul Mundt 
sh_pmu_read(struct perf_event * event)275ac44e669SPaul Mundt static void sh_pmu_read(struct perf_event *event)
276ac44e669SPaul Mundt {
277ac44e669SPaul Mundt 	sh_perf_event_update(event, &event->hw, event->hw.idx);
278ac44e669SPaul Mundt }
279ac44e669SPaul Mundt 
sh_pmu_event_init(struct perf_event * event)280b0a873ebSPeter Zijlstra static int sh_pmu_event_init(struct perf_event *event)
281ac44e669SPaul Mundt {
282b0a873ebSPeter Zijlstra 	int err;
283b0a873ebSPeter Zijlstra 
2842481c5faSStephane Eranian 	/* does not support taken branch sampling */
2852481c5faSStephane Eranian 	if (has_branch_stack(event))
2862481c5faSStephane Eranian 		return -EOPNOTSUPP;
2872481c5faSStephane Eranian 
288b0a873ebSPeter Zijlstra 	switch (event->attr.type) {
289b0a873ebSPeter Zijlstra 	case PERF_TYPE_RAW:
290b0a873ebSPeter Zijlstra 	case PERF_TYPE_HW_CACHE:
291b0a873ebSPeter Zijlstra 	case PERF_TYPE_HARDWARE:
292b0a873ebSPeter Zijlstra 		err = __hw_perf_event_init(event);
293b0a873ebSPeter Zijlstra 		break;
294b0a873ebSPeter Zijlstra 
295b0a873ebSPeter Zijlstra 	default:
296b0a873ebSPeter Zijlstra 		return -ENOENT;
297b0a873ebSPeter Zijlstra 	}
298b0a873ebSPeter Zijlstra 
299ac44e669SPaul Mundt 	if (unlikely(err)) {
300ac44e669SPaul Mundt 		if (event->destroy)
301ac44e669SPaul Mundt 			event->destroy(event);
302ac44e669SPaul Mundt 	}
303ac44e669SPaul Mundt 
304b0a873ebSPeter Zijlstra 	return err;
305ac44e669SPaul Mundt }
306ac44e669SPaul Mundt 
sh_pmu_enable(struct pmu * pmu)307a4eaf7f1SPeter Zijlstra static void sh_pmu_enable(struct pmu *pmu)
30833696fc0SPeter Zijlstra {
30933696fc0SPeter Zijlstra 	if (!sh_pmu_initialized())
31033696fc0SPeter Zijlstra 		return;
31133696fc0SPeter Zijlstra 
31233696fc0SPeter Zijlstra 	sh_pmu->enable_all();
31333696fc0SPeter Zijlstra }
31433696fc0SPeter Zijlstra 
sh_pmu_disable(struct pmu * pmu)315a4eaf7f1SPeter Zijlstra static void sh_pmu_disable(struct pmu *pmu)
31633696fc0SPeter Zijlstra {
31733696fc0SPeter Zijlstra 	if (!sh_pmu_initialized())
31833696fc0SPeter Zijlstra 		return;
31933696fc0SPeter Zijlstra 
32033696fc0SPeter Zijlstra 	sh_pmu->disable_all();
32133696fc0SPeter Zijlstra }
32233696fc0SPeter Zijlstra 
32351b0fe39SPeter Zijlstra static struct pmu pmu = {
324a4eaf7f1SPeter Zijlstra 	.pmu_enable	= sh_pmu_enable,
325a4eaf7f1SPeter Zijlstra 	.pmu_disable	= sh_pmu_disable,
326b0a873ebSPeter Zijlstra 	.event_init	= sh_pmu_event_init,
327a4eaf7f1SPeter Zijlstra 	.add		= sh_pmu_add,
328a4eaf7f1SPeter Zijlstra 	.del		= sh_pmu_del,
329a4eaf7f1SPeter Zijlstra 	.start		= sh_pmu_start,
330a4eaf7f1SPeter Zijlstra 	.stop		= sh_pmu_stop,
331ac44e669SPaul Mundt 	.read		= sh_pmu_read,
332ac44e669SPaul Mundt };
333ac44e669SPaul Mundt 
sh_pmu_prepare_cpu(unsigned int cpu)334e3cfce17SThomas Gleixner static int sh_pmu_prepare_cpu(unsigned int cpu)
335ac44e669SPaul Mundt {
336ac44e669SPaul Mundt 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
337ac44e669SPaul Mundt 
338ac44e669SPaul Mundt 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
339e3cfce17SThomas Gleixner 	return 0;
3403f6da390SPeter Zijlstra }
3413f6da390SPeter Zijlstra 
register_sh_pmu(struct sh_pmu * _pmu)3424603f53aSPaul Gortmaker int register_sh_pmu(struct sh_pmu *_pmu)
343ac44e669SPaul Mundt {
344ac44e669SPaul Mundt 	if (sh_pmu)
345ac44e669SPaul Mundt 		return -EBUSY;
346a4eaf7f1SPeter Zijlstra 	sh_pmu = _pmu;
347ac44e669SPaul Mundt 
348a4eaf7f1SPeter Zijlstra 	pr_info("Performance Events: %s support registered\n", _pmu->name);
349ac44e669SPaul Mundt 
350a10d60c0SVince Weaver 	/*
351a10d60c0SVince Weaver 	 * All of the on-chip counters are "limited", in that they have
352a10d60c0SVince Weaver 	 * no interrupts, and are therefore unable to do sampling without
353a10d60c0SVince Weaver 	 * further work and timer assistance.
354a10d60c0SVince Weaver 	 */
355a10d60c0SVince Weaver 	pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
356a10d60c0SVince Weaver 
357a4eaf7f1SPeter Zijlstra 	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
358ac44e669SPaul Mundt 
3592e80a82aSPeter Zijlstra 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
360e3cfce17SThomas Gleixner 	cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
361e3cfce17SThomas Gleixner 			  NULL);
362ac44e669SPaul Mundt 	return 0;
363ac44e669SPaul Mundt }
364