xref: /linux/arch/x86/kernel/hw_breakpoint.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15  *
16  * Copyright (C) 2007 Alan Stern
17  * Copyright (C) 2009 IBM Corporation
18  * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
19  *
20  * Authors: Alan Stern <stern@rowland.harvard.edu>
21  *          K.Prasad <prasad@linux.vnet.ibm.com>
22  *          Frederic Weisbecker <fweisbec@gmail.com>
23  */
24 
25 /*
26  * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
27  * using the CPU's debug registers.
28  */
29 
30 #include <linux/perf_event.h>
31 #include <linux/hw_breakpoint.h>
32 #include <linux/irqflags.h>
33 #include <linux/notifier.h>
34 #include <linux/kallsyms.h>
35 #include <linux/percpu.h>
36 #include <linux/kdebug.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/sched.h>
40 #include <linux/smp.h>
41 
42 #include <asm/hw_breakpoint.h>
43 #include <asm/processor.h>
44 #include <asm/debugreg.h>
45 
46 /* Per cpu debug control register value */
47 DEFINE_PER_CPU(unsigned long, cpu_dr7);
48 EXPORT_PER_CPU_SYMBOL(cpu_dr7);
49 
50 /* Per cpu debug address registers values */
51 static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
52 
53 /*
54  * Stores the breakpoints currently in use on each breakpoint address
55  * register for each cpus
56  */
57 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
58 
59 
60 static inline unsigned long
61 __encode_dr7(int drnum, unsigned int len, unsigned int type)
62 {
63 	unsigned long bp_info;
64 
65 	bp_info = (len | type) & 0xf;
66 	bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
67 	bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
68 
69 	return bp_info;
70 }
71 
72 /*
73  * Encode the length, type, Exact, and Enable bits for a particular breakpoint
74  * as stored in debug register 7.
75  */
76 unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
77 {
78 	return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
79 }
80 
81 /*
82  * Decode the length and type bits for a particular breakpoint as
83  * stored in debug register 7.  Return the "enabled" status.
84  */
85 int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
86 {
87 	int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
88 
89 	*len = (bp_info & 0xc) | 0x40;
90 	*type = (bp_info & 0x3) | 0x80;
91 
92 	return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
93 }
94 
95 /*
96  * Install a perf counter breakpoint.
97  *
98  * We seek a free debug address register and use it for this
99  * breakpoint. Eventually we enable it in the debug control register.
100  *
101  * Atomic: we hold the counter->ctx->lock and we only handle variables
102  * and registers local to this cpu.
103  */
104 int arch_install_hw_breakpoint(struct perf_event *bp)
105 {
106 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
107 	unsigned long *dr7;
108 	int i;
109 
110 	for (i = 0; i < HBP_NUM; i++) {
111 		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
112 
113 		if (!*slot) {
114 			*slot = bp;
115 			break;
116 		}
117 	}
118 
119 	if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
120 		return -EBUSY;
121 
122 	set_debugreg(info->address, i);
123 	__this_cpu_write(cpu_debugreg[i], info->address);
124 
125 	dr7 = this_cpu_ptr(&cpu_dr7);
126 	*dr7 |= encode_dr7(i, info->len, info->type);
127 
128 	set_debugreg(*dr7, 7);
129 	if (info->mask)
130 		set_dr_addr_mask(info->mask, i);
131 
132 	return 0;
133 }
134 
135 /*
136  * Uninstall the breakpoint contained in the given counter.
137  *
138  * First we search the debug address register it uses and then we disable
139  * it.
140  *
141  * Atomic: we hold the counter->ctx->lock and we only handle variables
142  * and registers local to this cpu.
143  */
144 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
145 {
146 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
147 	unsigned long *dr7;
148 	int i;
149 
150 	for (i = 0; i < HBP_NUM; i++) {
151 		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
152 
153 		if (*slot == bp) {
154 			*slot = NULL;
155 			break;
156 		}
157 	}
158 
159 	if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
160 		return;
161 
162 	dr7 = this_cpu_ptr(&cpu_dr7);
163 	*dr7 &= ~__encode_dr7(i, info->len, info->type);
164 
165 	set_debugreg(*dr7, 7);
166 	if (info->mask)
167 		set_dr_addr_mask(0, i);
168 }
169 
170 /*
171  * Check for virtual address in kernel space.
172  */
173 int arch_check_bp_in_kernelspace(struct perf_event *bp)
174 {
175 	unsigned int len;
176 	unsigned long va;
177 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178 
179 	va = info->address;
180 	len = bp->attr.bp_len;
181 
182 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
183 }
184 
185 int arch_bp_generic_fields(int x86_len, int x86_type,
186 			   int *gen_len, int *gen_type)
187 {
188 	/* Type */
189 	switch (x86_type) {
190 	case X86_BREAKPOINT_EXECUTE:
191 		if (x86_len != X86_BREAKPOINT_LEN_X)
192 			return -EINVAL;
193 
194 		*gen_type = HW_BREAKPOINT_X;
195 		*gen_len = sizeof(long);
196 		return 0;
197 	case X86_BREAKPOINT_WRITE:
198 		*gen_type = HW_BREAKPOINT_W;
199 		break;
200 	case X86_BREAKPOINT_RW:
201 		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
202 		break;
203 	default:
204 		return -EINVAL;
205 	}
206 
207 	/* Len */
208 	switch (x86_len) {
209 	case X86_BREAKPOINT_LEN_1:
210 		*gen_len = HW_BREAKPOINT_LEN_1;
211 		break;
212 	case X86_BREAKPOINT_LEN_2:
213 		*gen_len = HW_BREAKPOINT_LEN_2;
214 		break;
215 	case X86_BREAKPOINT_LEN_4:
216 		*gen_len = HW_BREAKPOINT_LEN_4;
217 		break;
218 #ifdef CONFIG_X86_64
219 	case X86_BREAKPOINT_LEN_8:
220 		*gen_len = HW_BREAKPOINT_LEN_8;
221 		break;
222 #endif
223 	default:
224 		return -EINVAL;
225 	}
226 
227 	return 0;
228 }
229 
230 
231 static int arch_build_bp_info(struct perf_event *bp)
232 {
233 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
234 
235 	info->address = bp->attr.bp_addr;
236 
237 	/* Type */
238 	switch (bp->attr.bp_type) {
239 	case HW_BREAKPOINT_W:
240 		info->type = X86_BREAKPOINT_WRITE;
241 		break;
242 	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
243 		info->type = X86_BREAKPOINT_RW;
244 		break;
245 	case HW_BREAKPOINT_X:
246 		info->type = X86_BREAKPOINT_EXECUTE;
247 		/*
248 		 * x86 inst breakpoints need to have a specific undefined len.
249 		 * But we still need to check userspace is not trying to setup
250 		 * an unsupported length, to get a range breakpoint for example.
251 		 */
252 		if (bp->attr.bp_len == sizeof(long)) {
253 			info->len = X86_BREAKPOINT_LEN_X;
254 			return 0;
255 		}
256 	default:
257 		return -EINVAL;
258 	}
259 
260 	/* Len */
261 	info->mask = 0;
262 
263 	switch (bp->attr.bp_len) {
264 	case HW_BREAKPOINT_LEN_1:
265 		info->len = X86_BREAKPOINT_LEN_1;
266 		break;
267 	case HW_BREAKPOINT_LEN_2:
268 		info->len = X86_BREAKPOINT_LEN_2;
269 		break;
270 	case HW_BREAKPOINT_LEN_4:
271 		info->len = X86_BREAKPOINT_LEN_4;
272 		break;
273 #ifdef CONFIG_X86_64
274 	case HW_BREAKPOINT_LEN_8:
275 		info->len = X86_BREAKPOINT_LEN_8;
276 		break;
277 #endif
278 	default:
279 		if (!is_power_of_2(bp->attr.bp_len))
280 			return -EINVAL;
281 		if (!cpu_has_bpext)
282 			return -EOPNOTSUPP;
283 		info->mask = bp->attr.bp_len - 1;
284 		info->len = X86_BREAKPOINT_LEN_1;
285 	}
286 
287 	return 0;
288 }
289 
290 /*
291  * Validate the arch-specific HW Breakpoint register settings
292  */
293 int arch_validate_hwbkpt_settings(struct perf_event *bp)
294 {
295 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
296 	unsigned int align;
297 	int ret;
298 
299 
300 	ret = arch_build_bp_info(bp);
301 	if (ret)
302 		return ret;
303 
304 	switch (info->len) {
305 	case X86_BREAKPOINT_LEN_1:
306 		align = 0;
307 		if (info->mask)
308 			align = info->mask;
309 		break;
310 	case X86_BREAKPOINT_LEN_2:
311 		align = 1;
312 		break;
313 	case X86_BREAKPOINT_LEN_4:
314 		align = 3;
315 		break;
316 #ifdef CONFIG_X86_64
317 	case X86_BREAKPOINT_LEN_8:
318 		align = 7;
319 		break;
320 #endif
321 	default:
322 		WARN_ON_ONCE(1);
323 	}
324 
325 	/*
326 	 * Check that the low-order bits of the address are appropriate
327 	 * for the alignment implied by len.
328 	 */
329 	if (info->address & align)
330 		return -EINVAL;
331 
332 	return 0;
333 }
334 
335 /*
336  * Dump the debug register contents to the user.
337  * We can't dump our per cpu values because it
338  * may contain cpu wide breakpoint, something that
339  * doesn't belong to the current task.
340  *
341  * TODO: include non-ptrace user breakpoints (perf)
342  */
343 void aout_dump_debugregs(struct user *dump)
344 {
345 	int i;
346 	int dr7 = 0;
347 	struct perf_event *bp;
348 	struct arch_hw_breakpoint *info;
349 	struct thread_struct *thread = &current->thread;
350 
351 	for (i = 0; i < HBP_NUM; i++) {
352 		bp = thread->ptrace_bps[i];
353 
354 		if (bp && !bp->attr.disabled) {
355 			dump->u_debugreg[i] = bp->attr.bp_addr;
356 			info = counter_arch_bp(bp);
357 			dr7 |= encode_dr7(i, info->len, info->type);
358 		} else {
359 			dump->u_debugreg[i] = 0;
360 		}
361 	}
362 
363 	dump->u_debugreg[4] = 0;
364 	dump->u_debugreg[5] = 0;
365 	dump->u_debugreg[6] = current->thread.debugreg6;
366 
367 	dump->u_debugreg[7] = dr7;
368 }
369 EXPORT_SYMBOL_GPL(aout_dump_debugregs);
370 
371 /*
372  * Release the user breakpoints used by ptrace
373  */
374 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
375 {
376 	int i;
377 	struct thread_struct *t = &tsk->thread;
378 
379 	for (i = 0; i < HBP_NUM; i++) {
380 		unregister_hw_breakpoint(t->ptrace_bps[i]);
381 		t->ptrace_bps[i] = NULL;
382 	}
383 
384 	t->debugreg6 = 0;
385 	t->ptrace_dr7 = 0;
386 }
387 
388 void hw_breakpoint_restore(void)
389 {
390 	set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
391 	set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
392 	set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
393 	set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
394 	set_debugreg(current->thread.debugreg6, 6);
395 	set_debugreg(__this_cpu_read(cpu_dr7), 7);
396 }
397 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
398 
399 /*
400  * Handle debug exception notifications.
401  *
402  * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
403  *
404  * NOTIFY_DONE returned if one of the following conditions is true.
405  * i) When the causative address is from user-space and the exception
406  * is a valid one, i.e. not triggered as a result of lazy debug register
407  * switching
408  * ii) When there are more bits than trap<n> set in DR6 register (such
409  * as BD, BS or BT) indicating that more than one debug condition is
410  * met and requires some more action in do_debug().
411  *
412  * NOTIFY_STOP returned for all other cases
413  *
414  */
415 static int hw_breakpoint_handler(struct die_args *args)
416 {
417 	int i, cpu, rc = NOTIFY_STOP;
418 	struct perf_event *bp;
419 	unsigned long dr7, dr6;
420 	unsigned long *dr6_p;
421 
422 	/* The DR6 value is pointed by args->err */
423 	dr6_p = (unsigned long *)ERR_PTR(args->err);
424 	dr6 = *dr6_p;
425 
426 	/* If it's a single step, TRAP bits are random */
427 	if (dr6 & DR_STEP)
428 		return NOTIFY_DONE;
429 
430 	/* Do an early return if no trap bits are set in DR6 */
431 	if ((dr6 & DR_TRAP_BITS) == 0)
432 		return NOTIFY_DONE;
433 
434 	get_debugreg(dr7, 7);
435 	/* Disable breakpoints during exception handling */
436 	set_debugreg(0UL, 7);
437 	/*
438 	 * Assert that local interrupts are disabled
439 	 * Reset the DRn bits in the virtualized register value.
440 	 * The ptrace trigger routine will add in whatever is needed.
441 	 */
442 	current->thread.debugreg6 &= ~DR_TRAP_BITS;
443 	cpu = get_cpu();
444 
445 	/* Handle all the breakpoints that were triggered */
446 	for (i = 0; i < HBP_NUM; ++i) {
447 		if (likely(!(dr6 & (DR_TRAP0 << i))))
448 			continue;
449 
450 		/*
451 		 * The counter may be concurrently released but that can only
452 		 * occur from a call_rcu() path. We can then safely fetch
453 		 * the breakpoint, use its callback, touch its counter
454 		 * while we are in an rcu_read_lock() path.
455 		 */
456 		rcu_read_lock();
457 
458 		bp = per_cpu(bp_per_reg[i], cpu);
459 		/*
460 		 * Reset the 'i'th TRAP bit in dr6 to denote completion of
461 		 * exception handling
462 		 */
463 		(*dr6_p) &= ~(DR_TRAP0 << i);
464 		/*
465 		 * bp can be NULL due to lazy debug register switching
466 		 * or due to concurrent perf counter removing.
467 		 */
468 		if (!bp) {
469 			rcu_read_unlock();
470 			break;
471 		}
472 
473 		perf_bp_event(bp, args->regs);
474 
475 		/*
476 		 * Set up resume flag to avoid breakpoint recursion when
477 		 * returning back to origin.
478 		 */
479 		if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
480 			args->regs->flags |= X86_EFLAGS_RF;
481 
482 		rcu_read_unlock();
483 	}
484 	/*
485 	 * Further processing in do_debug() is needed for a) user-space
486 	 * breakpoints (to generate signals) and b) when the system has
487 	 * taken exception due to multiple causes
488 	 */
489 	if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
490 	    (dr6 & (~DR_TRAP_BITS)))
491 		rc = NOTIFY_DONE;
492 
493 	set_debugreg(dr7, 7);
494 	put_cpu();
495 
496 	return rc;
497 }
498 
499 /*
500  * Handle debug exception notifications.
501  */
502 int hw_breakpoint_exceptions_notify(
503 		struct notifier_block *unused, unsigned long val, void *data)
504 {
505 	if (val != DIE_DEBUG)
506 		return NOTIFY_DONE;
507 
508 	return hw_breakpoint_handler(data);
509 }
510 
511 void hw_breakpoint_pmu_read(struct perf_event *bp)
512 {
513 	/* TODO */
514 }
515