xref: /linux/arch/sh/kernel/hw_breakpoint.c (revision 195bce73bd10bb939950c1591606185404de281d)
109a07294SPaul Mundt /*
209a07294SPaul Mundt  * arch/sh/kernel/hw_breakpoint.c
309a07294SPaul Mundt  *
409a07294SPaul Mundt  * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
509a07294SPaul Mundt  *
64352fc1bSPaul Mundt  * Copyright (C) 2009 - 2010  Paul Mundt
709a07294SPaul Mundt  *
809a07294SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
909a07294SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
1009a07294SPaul Mundt  * for more details.
1109a07294SPaul Mundt  */
1209a07294SPaul Mundt #include <linux/init.h>
1309a07294SPaul Mundt #include <linux/perf_event.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1509a07294SPaul Mundt #include <linux/hw_breakpoint.h>
1609a07294SPaul Mundt #include <linux/percpu.h>
1709a07294SPaul Mundt #include <linux/kallsyms.h>
1809a07294SPaul Mundt #include <linux/notifier.h>
1909a07294SPaul Mundt #include <linux/kprobes.h>
2009a07294SPaul Mundt #include <linux/kdebug.h>
2109a07294SPaul Mundt #include <linux/io.h>
224352fc1bSPaul Mundt #include <linux/clk.h>
2309a07294SPaul Mundt #include <asm/hw_breakpoint.h>
2409a07294SPaul Mundt #include <asm/mmu_context.h>
2534d0b5afSPaul Mundt #include <asm/ptrace.h>
26e839ca52SDavid Howells #include <asm/traps.h>
2709a07294SPaul Mundt 
2809a07294SPaul Mundt /*
2909a07294SPaul Mundt  * Stores the breakpoints currently in use on each breakpoint address
3009a07294SPaul Mundt  * register for each cpus
3109a07294SPaul Mundt  */
3209a07294SPaul Mundt static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
3309a07294SPaul Mundt 
344352fc1bSPaul Mundt /*
354352fc1bSPaul Mundt  * A dummy placeholder for early accesses until the CPUs get a chance to
364352fc1bSPaul Mundt  * register their UBCs later in the boot process.
374352fc1bSPaul Mundt  */
384352fc1bSPaul Mundt static struct sh_ubc ubc_dummy = { .num_events = 0 };
3909a07294SPaul Mundt 
404352fc1bSPaul Mundt static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
4109a07294SPaul Mundt 
4209a07294SPaul Mundt /*
4309a07294SPaul Mundt  * Install a perf counter breakpoint.
4409a07294SPaul Mundt  *
4509a07294SPaul Mundt  * We seek a free UBC channel and use it for this breakpoint.
4609a07294SPaul Mundt  *
4709a07294SPaul Mundt  * Atomic: we hold the counter->ctx->lock and we only handle variables
4809a07294SPaul Mundt  * and registers local to this cpu.
4909a07294SPaul Mundt  */
5009a07294SPaul Mundt int arch_install_hw_breakpoint(struct perf_event *bp)
5109a07294SPaul Mundt {
5209a07294SPaul Mundt 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
5309a07294SPaul Mundt 	int i;
5409a07294SPaul Mundt 
554352fc1bSPaul Mundt 	for (i = 0; i < sh_ubc->num_events; i++) {
56c473b2c6SChristoph Lameter 		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
5709a07294SPaul Mundt 
5809a07294SPaul Mundt 		if (!*slot) {
5909a07294SPaul Mundt 			*slot = bp;
6009a07294SPaul Mundt 			break;
6109a07294SPaul Mundt 		}
6209a07294SPaul Mundt 	}
6309a07294SPaul Mundt 
644352fc1bSPaul Mundt 	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
6509a07294SPaul Mundt 		return -EBUSY;
6609a07294SPaul Mundt 
674352fc1bSPaul Mundt 	clk_enable(sh_ubc->clk);
684352fc1bSPaul Mundt 	sh_ubc->enable(info, i);
6909a07294SPaul Mundt 
7009a07294SPaul Mundt 	return 0;
7109a07294SPaul Mundt }
7209a07294SPaul Mundt 
7309a07294SPaul Mundt /*
7409a07294SPaul Mundt  * Uninstall the breakpoint contained in the given counter.
7509a07294SPaul Mundt  *
7609a07294SPaul Mundt  * First we search the debug address register it uses and then we disable
7709a07294SPaul Mundt  * it.
7809a07294SPaul Mundt  *
7909a07294SPaul Mundt  * Atomic: we hold the counter->ctx->lock and we only handle variables
8009a07294SPaul Mundt  * and registers local to this cpu.
8109a07294SPaul Mundt  */
8209a07294SPaul Mundt void arch_uninstall_hw_breakpoint(struct perf_event *bp)
8309a07294SPaul Mundt {
8409a07294SPaul Mundt 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
8509a07294SPaul Mundt 	int i;
8609a07294SPaul Mundt 
874352fc1bSPaul Mundt 	for (i = 0; i < sh_ubc->num_events; i++) {
88c473b2c6SChristoph Lameter 		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
8909a07294SPaul Mundt 
9009a07294SPaul Mundt 		if (*slot == bp) {
9109a07294SPaul Mundt 			*slot = NULL;
9209a07294SPaul Mundt 			break;
9309a07294SPaul Mundt 		}
9409a07294SPaul Mundt 	}
9509a07294SPaul Mundt 
964352fc1bSPaul Mundt 	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
9709a07294SPaul Mundt 		return;
9809a07294SPaul Mundt 
994352fc1bSPaul Mundt 	sh_ubc->disable(info, i);
1004352fc1bSPaul Mundt 	clk_disable(sh_ubc->clk);
10109a07294SPaul Mundt }
10209a07294SPaul Mundt 
10309a07294SPaul Mundt static int get_hbp_len(u16 hbp_len)
10409a07294SPaul Mundt {
10509a07294SPaul Mundt 	unsigned int len_in_bytes = 0;
10609a07294SPaul Mundt 
10709a07294SPaul Mundt 	switch (hbp_len) {
10809a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_1:
10909a07294SPaul Mundt 		len_in_bytes = 1;
11009a07294SPaul Mundt 		break;
11109a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_2:
11209a07294SPaul Mundt 		len_in_bytes = 2;
11309a07294SPaul Mundt 		break;
11409a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_4:
11509a07294SPaul Mundt 		len_in_bytes = 4;
11609a07294SPaul Mundt 		break;
11709a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_8:
11809a07294SPaul Mundt 		len_in_bytes = 8;
11909a07294SPaul Mundt 		break;
12009a07294SPaul Mundt 	}
12109a07294SPaul Mundt 	return len_in_bytes;
12209a07294SPaul Mundt }
12309a07294SPaul Mundt 
12409a07294SPaul Mundt /*
12509a07294SPaul Mundt  * Check for virtual address in kernel space.
12609a07294SPaul Mundt  */
127b2812d03SFrederic Weisbecker int arch_check_bp_in_kernelspace(struct perf_event *bp)
12809a07294SPaul Mundt {
12909a07294SPaul Mundt 	unsigned int len;
130b2812d03SFrederic Weisbecker 	unsigned long va;
131b2812d03SFrederic Weisbecker 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
13209a07294SPaul Mundt 
133b2812d03SFrederic Weisbecker 	va = info->address;
134b2812d03SFrederic Weisbecker 	len = get_hbp_len(info->len);
13509a07294SPaul Mundt 
13609a07294SPaul Mundt 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
13709a07294SPaul Mundt }
13809a07294SPaul Mundt 
13909a07294SPaul Mundt int arch_bp_generic_fields(int sh_len, int sh_type,
14009a07294SPaul Mundt 			   int *gen_len, int *gen_type)
14109a07294SPaul Mundt {
14209a07294SPaul Mundt 	/* Len */
14309a07294SPaul Mundt 	switch (sh_len) {
14409a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_1:
14509a07294SPaul Mundt 		*gen_len = HW_BREAKPOINT_LEN_1;
14609a07294SPaul Mundt 		break;
14709a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_2:
14809a07294SPaul Mundt 		*gen_len = HW_BREAKPOINT_LEN_2;
14909a07294SPaul Mundt 		break;
15009a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_4:
15109a07294SPaul Mundt 		*gen_len = HW_BREAKPOINT_LEN_4;
15209a07294SPaul Mundt 		break;
15309a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_8:
15409a07294SPaul Mundt 		*gen_len = HW_BREAKPOINT_LEN_8;
15509a07294SPaul Mundt 		break;
15609a07294SPaul Mundt 	default:
15709a07294SPaul Mundt 		return -EINVAL;
15809a07294SPaul Mundt 	}
15909a07294SPaul Mundt 
16009a07294SPaul Mundt 	/* Type */
16109a07294SPaul Mundt 	switch (sh_type) {
16209a07294SPaul Mundt 	case SH_BREAKPOINT_READ:
16309a07294SPaul Mundt 		*gen_type = HW_BREAKPOINT_R;
16409a07294SPaul Mundt 	case SH_BREAKPOINT_WRITE:
16509a07294SPaul Mundt 		*gen_type = HW_BREAKPOINT_W;
16609a07294SPaul Mundt 		break;
16709a07294SPaul Mundt 	case SH_BREAKPOINT_RW:
16809a07294SPaul Mundt 		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
16909a07294SPaul Mundt 		break;
17009a07294SPaul Mundt 	default:
17109a07294SPaul Mundt 		return -EINVAL;
17209a07294SPaul Mundt 	}
17309a07294SPaul Mundt 
17409a07294SPaul Mundt 	return 0;
17509a07294SPaul Mundt }
17609a07294SPaul Mundt 
17709a07294SPaul Mundt static int arch_build_bp_info(struct perf_event *bp)
17809a07294SPaul Mundt {
17909a07294SPaul Mundt 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
18009a07294SPaul Mundt 
18109a07294SPaul Mundt 	info->address = bp->attr.bp_addr;
18209a07294SPaul Mundt 
18309a07294SPaul Mundt 	/* Len */
18409a07294SPaul Mundt 	switch (bp->attr.bp_len) {
18509a07294SPaul Mundt 	case HW_BREAKPOINT_LEN_1:
18609a07294SPaul Mundt 		info->len = SH_BREAKPOINT_LEN_1;
18709a07294SPaul Mundt 		break;
18809a07294SPaul Mundt 	case HW_BREAKPOINT_LEN_2:
18909a07294SPaul Mundt 		info->len = SH_BREAKPOINT_LEN_2;
19009a07294SPaul Mundt 		break;
19109a07294SPaul Mundt 	case HW_BREAKPOINT_LEN_4:
19209a07294SPaul Mundt 		info->len = SH_BREAKPOINT_LEN_4;
19309a07294SPaul Mundt 		break;
19409a07294SPaul Mundt 	case HW_BREAKPOINT_LEN_8:
19509a07294SPaul Mundt 		info->len = SH_BREAKPOINT_LEN_8;
19609a07294SPaul Mundt 		break;
19709a07294SPaul Mundt 	default:
19809a07294SPaul Mundt 		return -EINVAL;
19909a07294SPaul Mundt 	}
20009a07294SPaul Mundt 
20109a07294SPaul Mundt 	/* Type */
20209a07294SPaul Mundt 	switch (bp->attr.bp_type) {
20309a07294SPaul Mundt 	case HW_BREAKPOINT_R:
20409a07294SPaul Mundt 		info->type = SH_BREAKPOINT_READ;
20509a07294SPaul Mundt 		break;
20609a07294SPaul Mundt 	case HW_BREAKPOINT_W:
20709a07294SPaul Mundt 		info->type = SH_BREAKPOINT_WRITE;
20809a07294SPaul Mundt 		break;
20909a07294SPaul Mundt 	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
21009a07294SPaul Mundt 		info->type = SH_BREAKPOINT_RW;
21109a07294SPaul Mundt 		break;
21209a07294SPaul Mundt 	default:
21309a07294SPaul Mundt 		return -EINVAL;
21409a07294SPaul Mundt 	}
21509a07294SPaul Mundt 
21609a07294SPaul Mundt 	return 0;
21709a07294SPaul Mundt }
21809a07294SPaul Mundt 
21909a07294SPaul Mundt /*
22009a07294SPaul Mundt  * Validate the arch-specific HW Breakpoint register settings
22109a07294SPaul Mundt  */
222b2812d03SFrederic Weisbecker int arch_validate_hwbkpt_settings(struct perf_event *bp)
22309a07294SPaul Mundt {
22409a07294SPaul Mundt 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
22509a07294SPaul Mundt 	unsigned int align;
22609a07294SPaul Mundt 	int ret;
22709a07294SPaul Mundt 
22809a07294SPaul Mundt 	ret = arch_build_bp_info(bp);
22909a07294SPaul Mundt 	if (ret)
23009a07294SPaul Mundt 		return ret;
23109a07294SPaul Mundt 
23209a07294SPaul Mundt 	ret = -EINVAL;
23309a07294SPaul Mundt 
23409a07294SPaul Mundt 	switch (info->len) {
23509a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_1:
23609a07294SPaul Mundt 		align = 0;
23709a07294SPaul Mundt 		break;
23809a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_2:
23909a07294SPaul Mundt 		align = 1;
24009a07294SPaul Mundt 		break;
24109a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_4:
24209a07294SPaul Mundt 		align = 3;
24309a07294SPaul Mundt 		break;
24409a07294SPaul Mundt 	case SH_BREAKPOINT_LEN_8:
24509a07294SPaul Mundt 		align = 7;
24609a07294SPaul Mundt 		break;
24709a07294SPaul Mundt 	default:
24809a07294SPaul Mundt 		return ret;
24909a07294SPaul Mundt 	}
25009a07294SPaul Mundt 
251105244ecSPaul Mundt 	/*
252105244ecSPaul Mundt 	 * For kernel-addresses, either the address or symbol name can be
253105244ecSPaul Mundt 	 * specified.
254105244ecSPaul Mundt 	 */
255105244ecSPaul Mundt 	if (info->name)
256105244ecSPaul Mundt 		info->address = (unsigned long)kallsyms_lookup_name(info->name);
25709a07294SPaul Mundt 
25809a07294SPaul Mundt 	/*
25909a07294SPaul Mundt 	 * Check that the low-order bits of the address are appropriate
26009a07294SPaul Mundt 	 * for the alignment implied by len.
26109a07294SPaul Mundt 	 */
26209a07294SPaul Mundt 	if (info->address & align)
26309a07294SPaul Mundt 		return -EINVAL;
26409a07294SPaul Mundt 
26509a07294SPaul Mundt 	return 0;
26609a07294SPaul Mundt }
26709a07294SPaul Mundt 
26809a07294SPaul Mundt /*
26909a07294SPaul Mundt  * Release the user breakpoints used by ptrace
27009a07294SPaul Mundt  */
27109a07294SPaul Mundt void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
27209a07294SPaul Mundt {
27309a07294SPaul Mundt 	int i;
27409a07294SPaul Mundt 	struct thread_struct *t = &tsk->thread;
27509a07294SPaul Mundt 
2764352fc1bSPaul Mundt 	for (i = 0; i < sh_ubc->num_events; i++) {
27709a07294SPaul Mundt 		unregister_hw_breakpoint(t->ptrace_bps[i]);
27809a07294SPaul Mundt 		t->ptrace_bps[i] = NULL;
27909a07294SPaul Mundt 	}
28009a07294SPaul Mundt }
28109a07294SPaul Mundt 
28209a07294SPaul Mundt static int __kprobes hw_breakpoint_handler(struct die_args *args)
28309a07294SPaul Mundt {
28409a07294SPaul Mundt 	int cpu, i, rc = NOTIFY_STOP;
28509a07294SPaul Mundt 	struct perf_event *bp;
2864352fc1bSPaul Mundt 	unsigned int cmf, resume_mask;
28709a07294SPaul Mundt 
2884352fc1bSPaul Mundt 	/*
2894352fc1bSPaul Mundt 	 * Do an early return if none of the channels triggered.
2904352fc1bSPaul Mundt 	 */
2914352fc1bSPaul Mundt 	cmf = sh_ubc->triggered_mask();
2924352fc1bSPaul Mundt 	if (unlikely(!cmf))
2934352fc1bSPaul Mundt 		return NOTIFY_DONE;
2944352fc1bSPaul Mundt 
2954352fc1bSPaul Mundt 	/*
2964352fc1bSPaul Mundt 	 * By default, resume all of the active channels.
2974352fc1bSPaul Mundt 	 */
2984352fc1bSPaul Mundt 	resume_mask = sh_ubc->active_mask();
2994352fc1bSPaul Mundt 
3004352fc1bSPaul Mundt 	/*
3014352fc1bSPaul Mundt 	 * Disable breakpoints during exception handling.
3024352fc1bSPaul Mundt 	 */
3034352fc1bSPaul Mundt 	sh_ubc->disable_all();
30409a07294SPaul Mundt 
30509a07294SPaul Mundt 	cpu = get_cpu();
3064352fc1bSPaul Mundt 	for (i = 0; i < sh_ubc->num_events; i++) {
3074352fc1bSPaul Mundt 		unsigned long event_mask = (1 << i);
3084352fc1bSPaul Mundt 
3094352fc1bSPaul Mundt 		if (likely(!(cmf & event_mask)))
3104352fc1bSPaul Mundt 			continue;
3114352fc1bSPaul Mundt 
31209a07294SPaul Mundt 		/*
31309a07294SPaul Mundt 		 * The counter may be concurrently released but that can only
31409a07294SPaul Mundt 		 * occur from a call_rcu() path. We can then safely fetch
31509a07294SPaul Mundt 		 * the breakpoint, use its callback, touch its counter
31609a07294SPaul Mundt 		 * while we are in an rcu_read_lock() path.
31709a07294SPaul Mundt 		 */
31809a07294SPaul Mundt 		rcu_read_lock();
31909a07294SPaul Mundt 
32009a07294SPaul Mundt 		bp = per_cpu(bp_per_reg[i], cpu);
3214352fc1bSPaul Mundt 		if (bp)
32209a07294SPaul Mundt 			rc = NOTIFY_DONE;
3234352fc1bSPaul Mundt 
3244352fc1bSPaul Mundt 		/*
3254352fc1bSPaul Mundt 		 * Reset the condition match flag to denote completion of
3264352fc1bSPaul Mundt 		 * exception handling.
3274352fc1bSPaul Mundt 		 */
3284352fc1bSPaul Mundt 		sh_ubc->clear_triggered_mask(event_mask);
3294352fc1bSPaul Mundt 
3304352fc1bSPaul Mundt 		/*
3314352fc1bSPaul Mundt 		 * bp can be NULL due to concurrent perf counter
3324352fc1bSPaul Mundt 		 * removing.
3334352fc1bSPaul Mundt 		 */
3344352fc1bSPaul Mundt 		if (!bp) {
33509a07294SPaul Mundt 			rcu_read_unlock();
33609a07294SPaul Mundt 			break;
33709a07294SPaul Mundt 		}
33809a07294SPaul Mundt 
3394352fc1bSPaul Mundt 		/*
3404352fc1bSPaul Mundt 		 * Don't restore the channel if the breakpoint is from
3414352fc1bSPaul Mundt 		 * ptrace, as it always operates in one-shot mode.
3424352fc1bSPaul Mundt 		 */
3434352fc1bSPaul Mundt 		if (bp->overflow_handler == ptrace_triggered)
3444352fc1bSPaul Mundt 			resume_mask &= ~(1 << i);
3454352fc1bSPaul Mundt 
346a28b460eSPaul Mundt 		perf_bp_event(bp, args->regs);
34709a07294SPaul Mundt 
3484352fc1bSPaul Mundt 		/* Deliver the signal to userspace */
349b2812d03SFrederic Weisbecker 		if (!arch_check_bp_in_kernelspace(bp)) {
350*195bce73SEric W. Biederman 			force_sig_fault(SIGTRAP, TRAP_HWBKPT,
351*195bce73SEric W. Biederman 					(void __user *)NULL, current);
3524352fc1bSPaul Mundt 		}
3534352fc1bSPaul Mundt 
35409a07294SPaul Mundt 		rcu_read_unlock();
35509a07294SPaul Mundt 	}
35609a07294SPaul Mundt 
3574352fc1bSPaul Mundt 	if (cmf == 0)
3584352fc1bSPaul Mundt 		rc = NOTIFY_DONE;
35909a07294SPaul Mundt 
3604352fc1bSPaul Mundt 	sh_ubc->enable_all(resume_mask);
36109a07294SPaul Mundt 
36209a07294SPaul Mundt 	put_cpu();
36309a07294SPaul Mundt 
36409a07294SPaul Mundt 	return rc;
36509a07294SPaul Mundt }
36609a07294SPaul Mundt 
36709a07294SPaul Mundt BUILD_TRAP_HANDLER(breakpoint)
36809a07294SPaul Mundt {
36909a07294SPaul Mundt 	unsigned long ex = lookup_exception_vector();
37009a07294SPaul Mundt 	TRAP_HANDLER_DECL;
37109a07294SPaul Mundt 
3724352fc1bSPaul Mundt 	notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
37309a07294SPaul Mundt }
37409a07294SPaul Mundt 
37509a07294SPaul Mundt /*
37609a07294SPaul Mundt  * Handle debug exception notifications.
37709a07294SPaul Mundt  */
37809a07294SPaul Mundt int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
37909a07294SPaul Mundt 				    unsigned long val, void *data)
38009a07294SPaul Mundt {
381b74ab703SPaul Mundt 	struct die_args *args = data;
382b74ab703SPaul Mundt 
38309a07294SPaul Mundt 	if (val != DIE_BREAKPOINT)
38409a07294SPaul Mundt 		return NOTIFY_DONE;
38509a07294SPaul Mundt 
386b74ab703SPaul Mundt 	/*
387b74ab703SPaul Mundt 	 * If the breakpoint hasn't been triggered by the UBC, it's
388b74ab703SPaul Mundt 	 * probably from a debugger, so don't do anything more here.
3894352fc1bSPaul Mundt 	 *
3904352fc1bSPaul Mundt 	 * This also permits the UBC interface clock to remain off for
3914352fc1bSPaul Mundt 	 * non-UBC breakpoints, as we don't need to check the triggered
3924352fc1bSPaul Mundt 	 * or active channel masks.
393b74ab703SPaul Mundt 	 */
3944352fc1bSPaul Mundt 	if (args->trapnr != sh_ubc->trap_nr)
395b74ab703SPaul Mundt 		return NOTIFY_DONE;
396b74ab703SPaul Mundt 
39709a07294SPaul Mundt 	return hw_breakpoint_handler(data);
39809a07294SPaul Mundt }
39909a07294SPaul Mundt 
40009a07294SPaul Mundt void hw_breakpoint_pmu_read(struct perf_event *bp)
40109a07294SPaul Mundt {
40209a07294SPaul Mundt 	/* TODO */
40309a07294SPaul Mundt }
40409a07294SPaul Mundt 
4054352fc1bSPaul Mundt int register_sh_ubc(struct sh_ubc *ubc)
4064352fc1bSPaul Mundt {
4074352fc1bSPaul Mundt 	/* Bail if it's already assigned */
4084352fc1bSPaul Mundt 	if (sh_ubc != &ubc_dummy)
4094352fc1bSPaul Mundt 		return -EBUSY;
4104352fc1bSPaul Mundt 	sh_ubc = ubc;
4114352fc1bSPaul Mundt 
4124352fc1bSPaul Mundt 	pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
4134352fc1bSPaul Mundt 
4144352fc1bSPaul Mundt 	WARN_ON(ubc->num_events > HBP_NUM);
4154352fc1bSPaul Mundt 
4164352fc1bSPaul Mundt 	return 0;
4174352fc1bSPaul Mundt }
418