xref: /linux/arch/sh/kernel/kgdb.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 /*
2  * SuperH KGDB support
3  *
4  * Copyright (C) 2008 - 2012  Paul Mundt
5  *
6  * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/kgdb.h>
13 #include <linux/kdebug.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/sched.h>
17 #include <linux/sched/task_stack.h>
18 
19 #include <asm/cacheflush.h>
20 #include <asm/traps.h>
21 
22 /* Macros for single step instruction identification */
23 #define OPCODE_BT(op)		(((op) & 0xff00) == 0x8900)
24 #define OPCODE_BF(op)		(((op) & 0xff00) == 0x8b00)
25 #define OPCODE_BTF_DISP(op)	(((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
26 				 (((op) & 0x7f ) << 1))
27 #define OPCODE_BFS(op)		(((op) & 0xff00) == 0x8f00)
28 #define OPCODE_BTS(op)		(((op) & 0xff00) == 0x8d00)
29 #define OPCODE_BRA(op)		(((op) & 0xf000) == 0xa000)
30 #define OPCODE_BRA_DISP(op)	(((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
31 				 (((op) & 0x7ff) << 1))
32 #define OPCODE_BRAF(op)		(((op) & 0xf0ff) == 0x0023)
33 #define OPCODE_BRAF_REG(op)	(((op) & 0x0f00) >> 8)
34 #define OPCODE_BSR(op)		(((op) & 0xf000) == 0xb000)
35 #define OPCODE_BSR_DISP(op)	(((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
36 				 (((op) & 0x7ff) << 1))
37 #define OPCODE_BSRF(op)		(((op) & 0xf0ff) == 0x0003)
38 #define OPCODE_BSRF_REG(op)	(((op) >> 8) & 0xf)
39 #define OPCODE_JMP(op)		(((op) & 0xf0ff) == 0x402b)
40 #define OPCODE_JMP_REG(op)	(((op) >> 8) & 0xf)
41 #define OPCODE_JSR(op)		(((op) & 0xf0ff) == 0x400b)
42 #define OPCODE_JSR_REG(op)	(((op) >> 8) & 0xf)
43 #define OPCODE_RTS(op)		((op) == 0xb)
44 #define OPCODE_RTE(op)		((op) == 0x2b)
45 
46 #define SR_T_BIT_MASK           0x1
47 #define STEP_OPCODE             0xc33d
48 
49 /* Calculate the new address for after a step */
50 static short *get_step_address(struct pt_regs *linux_regs)
51 {
52 	insn_size_t op = __raw_readw(linux_regs->pc);
53 	long addr;
54 
55 	/* BT */
56 	if (OPCODE_BT(op)) {
57 		if (linux_regs->sr & SR_T_BIT_MASK)
58 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
59 		else
60 			addr = linux_regs->pc + 2;
61 	}
62 
63 	/* BTS */
64 	else if (OPCODE_BTS(op)) {
65 		if (linux_regs->sr & SR_T_BIT_MASK)
66 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
67 		else
68 			addr = linux_regs->pc + 4;	/* Not in delay slot */
69 	}
70 
71 	/* BF */
72 	else if (OPCODE_BF(op)) {
73 		if (!(linux_regs->sr & SR_T_BIT_MASK))
74 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
75 		else
76 			addr = linux_regs->pc + 2;
77 	}
78 
79 	/* BFS */
80 	else if (OPCODE_BFS(op)) {
81 		if (!(linux_regs->sr & SR_T_BIT_MASK))
82 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
83 		else
84 			addr = linux_regs->pc + 4;	/* Not in delay slot */
85 	}
86 
87 	/* BRA */
88 	else if (OPCODE_BRA(op))
89 		addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
90 
91 	/* BRAF */
92 	else if (OPCODE_BRAF(op))
93 		addr = linux_regs->pc + 4
94 		    + linux_regs->regs[OPCODE_BRAF_REG(op)];
95 
96 	/* BSR */
97 	else if (OPCODE_BSR(op))
98 		addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
99 
100 	/* BSRF */
101 	else if (OPCODE_BSRF(op))
102 		addr = linux_regs->pc + 4
103 		    + linux_regs->regs[OPCODE_BSRF_REG(op)];
104 
105 	/* JMP */
106 	else if (OPCODE_JMP(op))
107 		addr = linux_regs->regs[OPCODE_JMP_REG(op)];
108 
109 	/* JSR */
110 	else if (OPCODE_JSR(op))
111 		addr = linux_regs->regs[OPCODE_JSR_REG(op)];
112 
113 	/* RTS */
114 	else if (OPCODE_RTS(op))
115 		addr = linux_regs->pr;
116 
117 	/* RTE */
118 	else if (OPCODE_RTE(op))
119 		addr = linux_regs->regs[15];
120 
121 	/* Other */
122 	else
123 		addr = linux_regs->pc + instruction_size(op);
124 
125 	flush_icache_range(addr, addr + instruction_size(op));
126 	return (short *)addr;
127 }
128 
129 /*
130  * Replace the instruction immediately after the current instruction
131  * (i.e. next in the expected flow of control) with a trap instruction,
132  * so that returning will cause only a single instruction to be executed.
133  * Note that this model is slightly broken for instructions with delay
134  * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
135  * instruction in the delay slot will be executed.
136  */
137 
138 static unsigned long stepped_address;
139 static insn_size_t stepped_opcode;
140 
141 static void do_single_step(struct pt_regs *linux_regs)
142 {
143 	/* Determine where the target instruction will send us to */
144 	unsigned short *addr = get_step_address(linux_regs);
145 
146 	stepped_address = (int)addr;
147 
148 	/* Replace it */
149 	stepped_opcode = __raw_readw((long)addr);
150 	*addr = STEP_OPCODE;
151 
152 	/* Flush and return */
153 	flush_icache_range((long)addr, (long)addr +
154 			   instruction_size(stepped_opcode));
155 }
156 
157 /* Undo a single step */
158 static void undo_single_step(struct pt_regs *linux_regs)
159 {
160 	/* If we have stepped, put back the old instruction */
161 	/* Use stepped_address in case we stopped elsewhere */
162 	if (stepped_opcode != 0) {
163 		__raw_writew(stepped_opcode, stepped_address);
164 		flush_icache_range(stepped_address, stepped_address + 2);
165 	}
166 
167 	stepped_opcode = 0;
168 }
169 
170 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
171 	{ "r0",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
172 	{ "r1",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
173 	{ "r2",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
174 	{ "r3",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
175 	{ "r4",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
176 	{ "r5",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
177 	{ "r6",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
178 	{ "r7",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
179 	{ "r8",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
180 	{ "r9",		GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
181 	{ "r10",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
182 	{ "r11",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
183 	{ "r12",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
184 	{ "r13",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
185 	{ "r14",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
186 	{ "r15",	GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
187 	{ "pc",		GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
188 	{ "pr",		GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
189 	{ "sr",		GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
190 	{ "gbr",	GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
191 	{ "mach",	GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
192 	{ "macl",	GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
193 	{ "vbr",	GDB_SIZEOF_REG, -1 },
194 };
195 
196 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
197 {
198 	if (regno < 0 || regno >= DBG_MAX_REG_NUM)
199 		return -EINVAL;
200 
201 	if (dbg_reg_def[regno].offset != -1)
202 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
203 		       dbg_reg_def[regno].size);
204 
205 	return 0;
206 }
207 
208 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
209 {
210 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
211 		return NULL;
212 
213 	if (dbg_reg_def[regno].size != -1)
214 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
215 		       dbg_reg_def[regno].size);
216 
217 	switch (regno) {
218 	case GDB_VBR:
219 		__asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
220 		break;
221 	}
222 
223 	return dbg_reg_def[regno].name;
224 }
225 
226 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
227 {
228 	struct pt_regs *thread_regs = task_pt_regs(p);
229 	int reg;
230 
231 	/* Initialize to zero */
232 	for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
233 		gdb_regs[reg] = 0;
234 
235 	/*
236 	 * Copy out GP regs 8 to 14.
237 	 *
238 	 * switch_to() relies on SR.RB toggling, so regs 0->7 are banked
239 	 * and need privileged instructions to get to. The r15 value we
240 	 * fetch from the thread info directly.
241 	 */
242 	for (reg = GDB_R8; reg < GDB_R15; reg++)
243 		gdb_regs[reg] = thread_regs->regs[reg];
244 
245 	gdb_regs[GDB_R15] = p->thread.sp;
246 	gdb_regs[GDB_PC] = p->thread.pc;
247 
248 	/*
249 	 * Additional registers we have context for
250 	 */
251 	gdb_regs[GDB_PR] = thread_regs->pr;
252 	gdb_regs[GDB_GBR] = thread_regs->gbr;
253 }
254 
255 int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
256 			       char *remcomInBuffer, char *remcomOutBuffer,
257 			       struct pt_regs *linux_regs)
258 {
259 	unsigned long addr;
260 	char *ptr;
261 
262 	/* Undo any stepping we may have done */
263 	undo_single_step(linux_regs);
264 
265 	switch (remcomInBuffer[0]) {
266 	case 'c':
267 	case 's':
268 		/* try to read optional parameter, pc unchanged if no parm */
269 		ptr = &remcomInBuffer[1];
270 		if (kgdb_hex2long(&ptr, &addr))
271 			linux_regs->pc = addr;
272 	case 'D':
273 	case 'k':
274 		atomic_set(&kgdb_cpu_doing_single_step, -1);
275 
276 		if (remcomInBuffer[0] == 's') {
277 			do_single_step(linux_regs);
278 			kgdb_single_step = 1;
279 
280 			atomic_set(&kgdb_cpu_doing_single_step,
281 				   raw_smp_processor_id());
282 		}
283 
284 		return 0;
285 	}
286 
287 	/* this means that we do not want to exit from the handler: */
288 	return -1;
289 }
290 
291 unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
292 {
293 	if (exception == 60)
294 		return instruction_pointer(regs) - 2;
295 	return instruction_pointer(regs);
296 }
297 
298 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
299 {
300 	regs->pc = ip;
301 }
302 
303 /*
304  * The primary entry points for the kgdb debug trap table entries.
305  */
306 BUILD_TRAP_HANDLER(singlestep)
307 {
308 	unsigned long flags;
309 	TRAP_HANDLER_DECL;
310 
311 	local_irq_save(flags);
312 	regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
313 	kgdb_handle_exception(0, SIGTRAP, 0, regs);
314 	local_irq_restore(flags);
315 }
316 
317 static void kgdb_call_nmi_hook(void *ignored)
318 {
319 	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
320 }
321 
322 void kgdb_roundup_cpus(unsigned long flags)
323 {
324 	local_irq_enable();
325 	smp_call_function(kgdb_call_nmi_hook, NULL, 0);
326 	local_irq_disable();
327 }
328 
329 static int __kgdb_notify(struct die_args *args, unsigned long cmd)
330 {
331 	int ret;
332 
333 	switch (cmd) {
334 	case DIE_BREAKPOINT:
335 		/*
336 		 * This means a user thread is single stepping
337 		 * a system call which should be ignored
338 		 */
339 		if (test_thread_flag(TIF_SINGLESTEP))
340 			return NOTIFY_DONE;
341 
342 		ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
343 					    args->err, args->regs);
344 		if (ret)
345 			return NOTIFY_DONE;
346 
347 		break;
348 	}
349 
350 	return NOTIFY_STOP;
351 }
352 
353 static int
354 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
355 {
356 	unsigned long flags;
357 	int ret;
358 
359 	local_irq_save(flags);
360 	ret = __kgdb_notify(ptr, cmd);
361 	local_irq_restore(flags);
362 
363 	return ret;
364 }
365 
366 static struct notifier_block kgdb_notifier = {
367 	.notifier_call	= kgdb_notify,
368 
369 	/*
370 	 * Lowest-prio notifier priority, we want to be notified last:
371 	 */
372 	.priority	= -INT_MAX,
373 };
374 
375 int kgdb_arch_init(void)
376 {
377 	return register_die_notifier(&kgdb_notifier);
378 }
379 
380 void kgdb_arch_exit(void)
381 {
382 	unregister_die_notifier(&kgdb_notifier);
383 }
384 
385 struct kgdb_arch arch_kgdb_ops = {
386 	/* Breakpoint instruction: trapa #0x3c */
387 #ifdef CONFIG_CPU_LITTLE_ENDIAN
388 	.gdb_bpt_instr		= { 0x3c, 0xc3 },
389 #else
390 	.gdb_bpt_instr		= { 0xc3, 0x3c },
391 #endif
392 };
393