xref: /linux/kernel/debug/debug_core.c (revision c053784454550cf750399caa65482b31ffbe3c57)
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/interrupt.h>
33 #include <linux/spinlock.h>
34 #include <linux/console.h>
35 #include <linux/threads.h>
36 #include <linux/uaccess.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/ptrace.h>
40 #include <linux/string.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/sysrq.h>
44 #include <linux/init.h>
45 #include <linux/kgdb.h>
46 #include <linux/kdb.h>
47 #include <linux/pid.h>
48 #include <linux/smp.h>
49 #include <linux/mm.h>
50 #include <linux/rcupdate.h>
51 
52 #include <asm/cacheflush.h>
53 #include <asm/byteorder.h>
54 #include <asm/atomic.h>
55 #include <asm/system.h>
56 
57 #include "debug_core.h"
58 
59 static int kgdb_break_asap;
60 
61 struct debuggerinfo_struct kgdb_info[NR_CPUS];
62 
63 /**
64  * kgdb_connected - Is a host GDB connected to us?
65  */
66 int				kgdb_connected;
67 EXPORT_SYMBOL_GPL(kgdb_connected);
68 
69 /* All the KGDB handlers are installed */
70 int			kgdb_io_module_registered;
71 
72 /* Guard for recursive entry */
73 static int			exception_level;
74 
75 struct kgdb_io		*dbg_io_ops;
76 static DEFINE_SPINLOCK(kgdb_registration_lock);
77 
78 /* kgdb console driver is loaded */
79 static int kgdb_con_registered;
80 /* determine if kgdb console output should be used */
81 static int kgdb_use_con;
82 /* Flag for alternate operations for early debugging */
83 bool dbg_is_early = true;
84 /* Next cpu to become the master debug core */
85 int dbg_switch_cpu;
86 
87 /* Use kdb or gdbserver mode */
88 int dbg_kdb_mode = 1;
89 
90 static int __init opt_kgdb_con(char *str)
91 {
92 	kgdb_use_con = 1;
93 	return 0;
94 }
95 
96 early_param("kgdbcon", opt_kgdb_con);
97 
98 module_param(kgdb_use_con, int, 0644);
99 
100 /*
101  * Holds information about breakpoints in a kernel. These breakpoints are
102  * added and removed by gdb.
103  */
104 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
105 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
106 };
107 
108 /*
109  * The CPU# of the active CPU, or -1 if none:
110  */
111 atomic_t			kgdb_active = ATOMIC_INIT(-1);
112 EXPORT_SYMBOL_GPL(kgdb_active);
113 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
114 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
115 
116 /*
117  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
118  * bootup code (which might not have percpu set up yet):
119  */
120 static atomic_t			masters_in_kgdb;
121 static atomic_t			slaves_in_kgdb;
122 static atomic_t			kgdb_break_tasklet_var;
123 atomic_t			kgdb_setting_breakpoint;
124 
125 struct task_struct		*kgdb_usethread;
126 struct task_struct		*kgdb_contthread;
127 
128 int				kgdb_single_step;
129 static pid_t			kgdb_sstep_pid;
130 
131 /* to keep track of the CPU which is doing the single stepping*/
132 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
133 
134 /*
135  * If you are debugging a problem where roundup (the collection of
136  * all other CPUs) is a problem [this should be extremely rare],
137  * then use the nokgdbroundup option to avoid roundup. In that case
138  * the other CPUs might interfere with your debugging context, so
139  * use this with care:
140  */
141 static int kgdb_do_roundup = 1;
142 
143 static int __init opt_nokgdbroundup(char *str)
144 {
145 	kgdb_do_roundup = 0;
146 
147 	return 0;
148 }
149 
150 early_param("nokgdbroundup", opt_nokgdbroundup);
151 
152 /*
153  * Finally, some KGDB code :-)
154  */
155 
156 /*
157  * Weak aliases for breakpoint management,
158  * can be overriden by architectures when needed:
159  */
160 int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
161 {
162 	int err;
163 
164 	err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
165 	if (err)
166 		return err;
167 
168 	return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
169 				  BREAK_INSTR_SIZE);
170 }
171 
172 int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
173 {
174 	return probe_kernel_write((char *)addr,
175 				  (char *)bundle, BREAK_INSTR_SIZE);
176 }
177 
178 int __weak kgdb_validate_break_address(unsigned long addr)
179 {
180 	char tmp_variable[BREAK_INSTR_SIZE];
181 	int err;
182 	/* Validate setting the breakpoint and then removing it.  In the
183 	 * remove fails, the kernel needs to emit a bad message because we
184 	 * are deep trouble not being able to put things back the way we
185 	 * found them.
186 	 */
187 	err = kgdb_arch_set_breakpoint(addr, tmp_variable);
188 	if (err)
189 		return err;
190 	err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
191 	if (err)
192 		printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
193 		   "memory destroyed at: %lx", addr);
194 	return err;
195 }
196 
197 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
198 {
199 	return instruction_pointer(regs);
200 }
201 
202 int __weak kgdb_arch_init(void)
203 {
204 	return 0;
205 }
206 
207 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
208 {
209 	return 0;
210 }
211 
212 /**
213  *	kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
214  *	@regs: Current &struct pt_regs.
215  *
216  *	This function will be called if the particular architecture must
217  *	disable hardware debugging while it is processing gdb packets or
218  *	handling exception.
219  */
220 void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
221 {
222 }
223 
224 /*
225  * Some architectures need cache flushes when we set/clear a
226  * breakpoint:
227  */
228 static void kgdb_flush_swbreak_addr(unsigned long addr)
229 {
230 	if (!CACHE_FLUSH_IS_SAFE)
231 		return;
232 
233 	if (current->mm && current->mm->mmap_cache) {
234 		flush_cache_range(current->mm->mmap_cache,
235 				  addr, addr + BREAK_INSTR_SIZE);
236 	}
237 	/* Force flush instruction cache if it was outside the mm */
238 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
239 }
240 
241 /*
242  * SW breakpoint management:
243  */
244 int dbg_activate_sw_breakpoints(void)
245 {
246 	unsigned long addr;
247 	int error;
248 	int ret = 0;
249 	int i;
250 
251 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
252 		if (kgdb_break[i].state != BP_SET)
253 			continue;
254 
255 		addr = kgdb_break[i].bpt_addr;
256 		error = kgdb_arch_set_breakpoint(addr,
257 				kgdb_break[i].saved_instr);
258 		if (error) {
259 			ret = error;
260 			printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
261 			continue;
262 		}
263 
264 		kgdb_flush_swbreak_addr(addr);
265 		kgdb_break[i].state = BP_ACTIVE;
266 	}
267 	return ret;
268 }
269 
270 int dbg_set_sw_break(unsigned long addr)
271 {
272 	int err = kgdb_validate_break_address(addr);
273 	int breakno = -1;
274 	int i;
275 
276 	if (err)
277 		return err;
278 
279 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
280 		if ((kgdb_break[i].state == BP_SET) &&
281 					(kgdb_break[i].bpt_addr == addr))
282 			return -EEXIST;
283 	}
284 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
285 		if (kgdb_break[i].state == BP_REMOVED &&
286 					kgdb_break[i].bpt_addr == addr) {
287 			breakno = i;
288 			break;
289 		}
290 	}
291 
292 	if (breakno == -1) {
293 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
294 			if (kgdb_break[i].state == BP_UNDEFINED) {
295 				breakno = i;
296 				break;
297 			}
298 		}
299 	}
300 
301 	if (breakno == -1)
302 		return -E2BIG;
303 
304 	kgdb_break[breakno].state = BP_SET;
305 	kgdb_break[breakno].type = BP_BREAKPOINT;
306 	kgdb_break[breakno].bpt_addr = addr;
307 
308 	return 0;
309 }
310 
311 int dbg_deactivate_sw_breakpoints(void)
312 {
313 	unsigned long addr;
314 	int error;
315 	int ret = 0;
316 	int i;
317 
318 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
319 		if (kgdb_break[i].state != BP_ACTIVE)
320 			continue;
321 		addr = kgdb_break[i].bpt_addr;
322 		error = kgdb_arch_remove_breakpoint(addr,
323 					kgdb_break[i].saved_instr);
324 		if (error) {
325 			printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
326 			ret = error;
327 		}
328 
329 		kgdb_flush_swbreak_addr(addr);
330 		kgdb_break[i].state = BP_SET;
331 	}
332 	return ret;
333 }
334 
335 int dbg_remove_sw_break(unsigned long addr)
336 {
337 	int i;
338 
339 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
340 		if ((kgdb_break[i].state == BP_SET) &&
341 				(kgdb_break[i].bpt_addr == addr)) {
342 			kgdb_break[i].state = BP_REMOVED;
343 			return 0;
344 		}
345 	}
346 	return -ENOENT;
347 }
348 
349 int kgdb_isremovedbreak(unsigned long addr)
350 {
351 	int i;
352 
353 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
354 		if ((kgdb_break[i].state == BP_REMOVED) &&
355 					(kgdb_break[i].bpt_addr == addr))
356 			return 1;
357 	}
358 	return 0;
359 }
360 
361 int dbg_remove_all_break(void)
362 {
363 	unsigned long addr;
364 	int error;
365 	int i;
366 
367 	/* Clear memory breakpoints. */
368 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
369 		if (kgdb_break[i].state != BP_ACTIVE)
370 			goto setundefined;
371 		addr = kgdb_break[i].bpt_addr;
372 		error = kgdb_arch_remove_breakpoint(addr,
373 				kgdb_break[i].saved_instr);
374 		if (error)
375 			printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
376 			   addr);
377 setundefined:
378 		kgdb_break[i].state = BP_UNDEFINED;
379 	}
380 
381 	/* Clear hardware breakpoints. */
382 	if (arch_kgdb_ops.remove_all_hw_break)
383 		arch_kgdb_ops.remove_all_hw_break();
384 
385 	return 0;
386 }
387 
388 /*
389  * Return true if there is a valid kgdb I/O module.  Also if no
390  * debugger is attached a message can be printed to the console about
391  * waiting for the debugger to attach.
392  *
393  * The print_wait argument is only to be true when called from inside
394  * the core kgdb_handle_exception, because it will wait for the
395  * debugger to attach.
396  */
397 static int kgdb_io_ready(int print_wait)
398 {
399 	if (!dbg_io_ops)
400 		return 0;
401 	if (kgdb_connected)
402 		return 1;
403 	if (atomic_read(&kgdb_setting_breakpoint))
404 		return 1;
405 	if (print_wait) {
406 #ifdef CONFIG_KGDB_KDB
407 		if (!dbg_kdb_mode)
408 			printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
409 #else
410 		printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
411 #endif
412 	}
413 	return 1;
414 }
415 
416 static int kgdb_reenter_check(struct kgdb_state *ks)
417 {
418 	unsigned long addr;
419 
420 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
421 		return 0;
422 
423 	/* Panic on recursive debugger calls: */
424 	exception_level++;
425 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
426 	dbg_deactivate_sw_breakpoints();
427 
428 	/*
429 	 * If the break point removed ok at the place exception
430 	 * occurred, try to recover and print a warning to the end
431 	 * user because the user planted a breakpoint in a place that
432 	 * KGDB needs in order to function.
433 	 */
434 	if (dbg_remove_sw_break(addr) == 0) {
435 		exception_level = 0;
436 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
437 		dbg_activate_sw_breakpoints();
438 		printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
439 			addr);
440 		WARN_ON_ONCE(1);
441 
442 		return 1;
443 	}
444 	dbg_remove_all_break();
445 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
446 
447 	if (exception_level > 1) {
448 		dump_stack();
449 		panic("Recursive entry to debugger");
450 	}
451 
452 	printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
453 #ifdef CONFIG_KGDB_KDB
454 	/* Allow kdb to debug itself one level */
455 	return 0;
456 #endif
457 	dump_stack();
458 	panic("Recursive entry to debugger");
459 
460 	return 1;
461 }
462 
463 static void dbg_touch_watchdogs(void)
464 {
465 	touch_softlockup_watchdog_sync();
466 	clocksource_touch_watchdog();
467 	rcu_cpu_stall_reset();
468 }
469 
470 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
471 		int exception_state)
472 {
473 	unsigned long flags;
474 	int sstep_tries = 100;
475 	int error;
476 	int cpu;
477 	int trace_on = 0;
478 	int online_cpus = num_online_cpus();
479 
480 	kgdb_info[ks->cpu].enter_kgdb++;
481 	kgdb_info[ks->cpu].exception_state |= exception_state;
482 
483 	if (exception_state == DCPU_WANT_MASTER)
484 		atomic_inc(&masters_in_kgdb);
485 	else
486 		atomic_inc(&slaves_in_kgdb);
487 	kgdb_disable_hw_debug(ks->linux_regs);
488 
489 acquirelock:
490 	/*
491 	 * Interrupts will be restored by the 'trap return' code, except when
492 	 * single stepping.
493 	 */
494 	local_irq_save(flags);
495 
496 	cpu = ks->cpu;
497 	kgdb_info[cpu].debuggerinfo = regs;
498 	kgdb_info[cpu].task = current;
499 	kgdb_info[cpu].ret_state = 0;
500 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
501 
502 	/* Make sure the above info reaches the primary CPU */
503 	smp_mb();
504 
505 	if (exception_level == 1) {
506 		if (raw_spin_trylock(&dbg_master_lock))
507 			atomic_xchg(&kgdb_active, cpu);
508 		goto cpu_master_loop;
509 	}
510 
511 	/*
512 	 * CPU will loop if it is a slave or request to become a kgdb
513 	 * master cpu and acquire the kgdb_active lock:
514 	 */
515 	while (1) {
516 cpu_loop:
517 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
518 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
519 			goto cpu_master_loop;
520 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
521 			if (raw_spin_trylock(&dbg_master_lock)) {
522 				atomic_xchg(&kgdb_active, cpu);
523 				break;
524 			}
525 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
526 			if (!raw_spin_is_locked(&dbg_slave_lock))
527 				goto return_normal;
528 		} else {
529 return_normal:
530 			/* Return to normal operation by executing any
531 			 * hw breakpoint fixup.
532 			 */
533 			if (arch_kgdb_ops.correct_hw_break)
534 				arch_kgdb_ops.correct_hw_break();
535 			if (trace_on)
536 				tracing_on();
537 			kgdb_info[cpu].exception_state &=
538 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
539 			kgdb_info[cpu].enter_kgdb--;
540 			smp_mb__before_atomic_dec();
541 			atomic_dec(&slaves_in_kgdb);
542 			dbg_touch_watchdogs();
543 			local_irq_restore(flags);
544 			return 0;
545 		}
546 		cpu_relax();
547 	}
548 
549 	/*
550 	 * For single stepping, try to only enter on the processor
551 	 * that was single stepping.  To gaurd against a deadlock, the
552 	 * kernel will only try for the value of sstep_tries before
553 	 * giving up and continuing on.
554 	 */
555 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
556 	    (kgdb_info[cpu].task &&
557 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
558 		atomic_set(&kgdb_active, -1);
559 		raw_spin_unlock(&dbg_master_lock);
560 		dbg_touch_watchdogs();
561 		local_irq_restore(flags);
562 
563 		goto acquirelock;
564 	}
565 
566 	if (!kgdb_io_ready(1)) {
567 		kgdb_info[cpu].ret_state = 1;
568 		goto kgdb_restore; /* No I/O connection, resume the system */
569 	}
570 
571 	/*
572 	 * Don't enter if we have hit a removed breakpoint.
573 	 */
574 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
575 		goto kgdb_restore;
576 
577 	/* Call the I/O driver's pre_exception routine */
578 	if (dbg_io_ops->pre_exception)
579 		dbg_io_ops->pre_exception();
580 
581 	/*
582 	 * Get the passive CPU lock which will hold all the non-primary
583 	 * CPU in a spin state while the debugger is active
584 	 */
585 	if (!kgdb_single_step)
586 		raw_spin_lock(&dbg_slave_lock);
587 
588 #ifdef CONFIG_SMP
589 	/* Signal the other CPUs to enter kgdb_wait() */
590 	if ((!kgdb_single_step) && kgdb_do_roundup)
591 		kgdb_roundup_cpus(flags);
592 #endif
593 
594 	/*
595 	 * Wait for the other CPUs to be notified and be waiting for us:
596 	 */
597 	while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
598 				atomic_read(&slaves_in_kgdb)) != online_cpus)
599 		cpu_relax();
600 
601 	/*
602 	 * At this point the primary processor is completely
603 	 * in the debugger and all secondary CPUs are quiescent
604 	 */
605 	dbg_deactivate_sw_breakpoints();
606 	kgdb_single_step = 0;
607 	kgdb_contthread = current;
608 	exception_level = 0;
609 	trace_on = tracing_is_on();
610 	if (trace_on)
611 		tracing_off();
612 
613 	while (1) {
614 cpu_master_loop:
615 		if (dbg_kdb_mode) {
616 			kgdb_connected = 1;
617 			error = kdb_stub(ks);
618 			if (error == -1)
619 				continue;
620 			kgdb_connected = 0;
621 		} else {
622 			error = gdb_serial_stub(ks);
623 		}
624 
625 		if (error == DBG_PASS_EVENT) {
626 			dbg_kdb_mode = !dbg_kdb_mode;
627 		} else if (error == DBG_SWITCH_CPU_EVENT) {
628 			kgdb_info[dbg_switch_cpu].exception_state |=
629 				DCPU_NEXT_MASTER;
630 			goto cpu_loop;
631 		} else {
632 			kgdb_info[cpu].ret_state = error;
633 			break;
634 		}
635 	}
636 
637 	/* Call the I/O driver's post_exception routine */
638 	if (dbg_io_ops->post_exception)
639 		dbg_io_ops->post_exception();
640 
641 	if (!kgdb_single_step) {
642 		raw_spin_unlock(&dbg_slave_lock);
643 		/* Wait till all the CPUs have quit from the debugger. */
644 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
645 			cpu_relax();
646 	}
647 
648 kgdb_restore:
649 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
650 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
651 		if (kgdb_info[sstep_cpu].task)
652 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
653 		else
654 			kgdb_sstep_pid = 0;
655 	}
656 	if (arch_kgdb_ops.correct_hw_break)
657 		arch_kgdb_ops.correct_hw_break();
658 	if (trace_on)
659 		tracing_on();
660 
661 	kgdb_info[cpu].exception_state &=
662 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
663 	kgdb_info[cpu].enter_kgdb--;
664 	smp_mb__before_atomic_dec();
665 	atomic_dec(&masters_in_kgdb);
666 	/* Free kgdb_active */
667 	atomic_set(&kgdb_active, -1);
668 	raw_spin_unlock(&dbg_master_lock);
669 	dbg_touch_watchdogs();
670 	local_irq_restore(flags);
671 
672 	return kgdb_info[cpu].ret_state;
673 }
674 
675 /*
676  * kgdb_handle_exception() - main entry point from a kernel exception
677  *
678  * Locking hierarchy:
679  *	interface locks, if any (begin_session)
680  *	kgdb lock (kgdb_active)
681  */
682 int
683 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
684 {
685 	struct kgdb_state kgdb_var;
686 	struct kgdb_state *ks = &kgdb_var;
687 
688 	ks->cpu			= raw_smp_processor_id();
689 	ks->ex_vector		= evector;
690 	ks->signo		= signo;
691 	ks->err_code		= ecode;
692 	ks->kgdb_usethreadid	= 0;
693 	ks->linux_regs		= regs;
694 
695 	if (kgdb_reenter_check(ks))
696 		return 0; /* Ouch, double exception ! */
697 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
698 		return 0;
699 
700 	return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
701 }
702 
703 int kgdb_nmicallback(int cpu, void *regs)
704 {
705 #ifdef CONFIG_SMP
706 	struct kgdb_state kgdb_var;
707 	struct kgdb_state *ks = &kgdb_var;
708 
709 	memset(ks, 0, sizeof(struct kgdb_state));
710 	ks->cpu			= cpu;
711 	ks->linux_regs		= regs;
712 
713 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
714 			raw_spin_is_locked(&dbg_master_lock)) {
715 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
716 		return 0;
717 	}
718 #endif
719 	return 1;
720 }
721 
722 static void kgdb_console_write(struct console *co, const char *s,
723    unsigned count)
724 {
725 	unsigned long flags;
726 
727 	/* If we're debugging, or KGDB has not connected, don't try
728 	 * and print. */
729 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
730 		return;
731 
732 	local_irq_save(flags);
733 	gdbstub_msg_write(s, count);
734 	local_irq_restore(flags);
735 }
736 
737 static struct console kgdbcons = {
738 	.name		= "kgdb",
739 	.write		= kgdb_console_write,
740 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
741 	.index		= -1,
742 };
743 
744 #ifdef CONFIG_MAGIC_SYSRQ
745 static void sysrq_handle_dbg(int key)
746 {
747 	if (!dbg_io_ops) {
748 		printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
749 		return;
750 	}
751 	if (!kgdb_connected) {
752 #ifdef CONFIG_KGDB_KDB
753 		if (!dbg_kdb_mode)
754 			printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
755 #else
756 		printk(KERN_CRIT "Entering KGDB\n");
757 #endif
758 	}
759 
760 	kgdb_breakpoint();
761 }
762 
763 static struct sysrq_key_op sysrq_dbg_op = {
764 	.handler	= sysrq_handle_dbg,
765 	.help_msg	= "debug(G)",
766 	.action_msg	= "DEBUG",
767 };
768 #endif
769 
770 static int kgdb_panic_event(struct notifier_block *self,
771 			    unsigned long val,
772 			    void *data)
773 {
774 	if (dbg_kdb_mode)
775 		kdb_printf("PANIC: %s\n", (char *)data);
776 	kgdb_breakpoint();
777 	return NOTIFY_DONE;
778 }
779 
780 static struct notifier_block kgdb_panic_event_nb = {
781        .notifier_call	= kgdb_panic_event,
782        .priority	= INT_MAX,
783 };
784 
785 void __weak kgdb_arch_late(void)
786 {
787 }
788 
789 void __init dbg_late_init(void)
790 {
791 	dbg_is_early = false;
792 	if (kgdb_io_module_registered)
793 		kgdb_arch_late();
794 	kdb_init(KDB_INIT_FULL);
795 }
796 
797 static void kgdb_register_callbacks(void)
798 {
799 	if (!kgdb_io_module_registered) {
800 		kgdb_io_module_registered = 1;
801 		kgdb_arch_init();
802 		if (!dbg_is_early)
803 			kgdb_arch_late();
804 		atomic_notifier_chain_register(&panic_notifier_list,
805 					       &kgdb_panic_event_nb);
806 #ifdef CONFIG_MAGIC_SYSRQ
807 		register_sysrq_key('g', &sysrq_dbg_op);
808 #endif
809 		if (kgdb_use_con && !kgdb_con_registered) {
810 			register_console(&kgdbcons);
811 			kgdb_con_registered = 1;
812 		}
813 	}
814 }
815 
816 static void kgdb_unregister_callbacks(void)
817 {
818 	/*
819 	 * When this routine is called KGDB should unregister from the
820 	 * panic handler and clean up, making sure it is not handling any
821 	 * break exceptions at the time.
822 	 */
823 	if (kgdb_io_module_registered) {
824 		kgdb_io_module_registered = 0;
825 		atomic_notifier_chain_unregister(&panic_notifier_list,
826 					       &kgdb_panic_event_nb);
827 		kgdb_arch_exit();
828 #ifdef CONFIG_MAGIC_SYSRQ
829 		unregister_sysrq_key('g', &sysrq_dbg_op);
830 #endif
831 		if (kgdb_con_registered) {
832 			unregister_console(&kgdbcons);
833 			kgdb_con_registered = 0;
834 		}
835 	}
836 }
837 
838 /*
839  * There are times a tasklet needs to be used vs a compiled in
840  * break point so as to cause an exception outside a kgdb I/O module,
841  * such as is the case with kgdboe, where calling a breakpoint in the
842  * I/O driver itself would be fatal.
843  */
844 static void kgdb_tasklet_bpt(unsigned long ing)
845 {
846 	kgdb_breakpoint();
847 	atomic_set(&kgdb_break_tasklet_var, 0);
848 }
849 
850 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
851 
852 void kgdb_schedule_breakpoint(void)
853 {
854 	if (atomic_read(&kgdb_break_tasklet_var) ||
855 		atomic_read(&kgdb_active) != -1 ||
856 		atomic_read(&kgdb_setting_breakpoint))
857 		return;
858 	atomic_inc(&kgdb_break_tasklet_var);
859 	tasklet_schedule(&kgdb_tasklet_breakpoint);
860 }
861 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
862 
863 static void kgdb_initial_breakpoint(void)
864 {
865 	kgdb_break_asap = 0;
866 
867 	printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
868 	kgdb_breakpoint();
869 }
870 
871 /**
872  *	kgdb_register_io_module - register KGDB IO module
873  *	@new_dbg_io_ops: the io ops vector
874  *
875  *	Register it with the KGDB core.
876  */
877 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
878 {
879 	int err;
880 
881 	spin_lock(&kgdb_registration_lock);
882 
883 	if (dbg_io_ops) {
884 		spin_unlock(&kgdb_registration_lock);
885 
886 		printk(KERN_ERR "kgdb: Another I/O driver is already "
887 				"registered with KGDB.\n");
888 		return -EBUSY;
889 	}
890 
891 	if (new_dbg_io_ops->init) {
892 		err = new_dbg_io_ops->init();
893 		if (err) {
894 			spin_unlock(&kgdb_registration_lock);
895 			return err;
896 		}
897 	}
898 
899 	dbg_io_ops = new_dbg_io_ops;
900 
901 	spin_unlock(&kgdb_registration_lock);
902 
903 	printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
904 	       new_dbg_io_ops->name);
905 
906 	/* Arm KGDB now. */
907 	kgdb_register_callbacks();
908 
909 	if (kgdb_break_asap)
910 		kgdb_initial_breakpoint();
911 
912 	return 0;
913 }
914 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
915 
916 /**
917  *	kkgdb_unregister_io_module - unregister KGDB IO module
918  *	@old_dbg_io_ops: the io ops vector
919  *
920  *	Unregister it with the KGDB core.
921  */
922 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
923 {
924 	BUG_ON(kgdb_connected);
925 
926 	/*
927 	 * KGDB is no longer able to communicate out, so
928 	 * unregister our callbacks and reset state.
929 	 */
930 	kgdb_unregister_callbacks();
931 
932 	spin_lock(&kgdb_registration_lock);
933 
934 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
935 	dbg_io_ops = NULL;
936 
937 	spin_unlock(&kgdb_registration_lock);
938 
939 	printk(KERN_INFO
940 		"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
941 		old_dbg_io_ops->name);
942 }
943 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
944 
945 int dbg_io_get_char(void)
946 {
947 	int ret = dbg_io_ops->read_char();
948 	if (ret == NO_POLL_CHAR)
949 		return -1;
950 	if (!dbg_kdb_mode)
951 		return ret;
952 	if (ret == 127)
953 		return 8;
954 	return ret;
955 }
956 
957 /**
958  * kgdb_breakpoint - generate breakpoint exception
959  *
960  * This function will generate a breakpoint exception.  It is used at the
961  * beginning of a program to sync up with a debugger and can be used
962  * otherwise as a quick means to stop program execution and "break" into
963  * the debugger.
964  */
965 void kgdb_breakpoint(void)
966 {
967 	atomic_inc(&kgdb_setting_breakpoint);
968 	wmb(); /* Sync point before breakpoint */
969 	arch_kgdb_breakpoint();
970 	wmb(); /* Sync point after breakpoint */
971 	atomic_dec(&kgdb_setting_breakpoint);
972 }
973 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
974 
975 static int __init opt_kgdb_wait(char *str)
976 {
977 	kgdb_break_asap = 1;
978 
979 	kdb_init(KDB_INIT_EARLY);
980 	if (kgdb_io_module_registered)
981 		kgdb_initial_breakpoint();
982 
983 	return 0;
984 }
985 
986 early_param("kgdbwait", opt_kgdb_wait);
987