xref: /linux/arch/hexagon/kernel/traps.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel traps/events for Hexagon processor
4  *
5  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/init.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kdebug.h>
15 #include <linux/syscalls.h>
16 #include <linux/signal.h>
17 #include <linux/ptrace.h>
18 #include <asm/traps.h>
19 #include <asm/vm_fault.h>
20 #include <asm/syscall.h>
21 #include <asm/registers.h>
22 #include <asm/unistd.h>
23 #include <asm/sections.h>
24 #ifdef CONFIG_KGDB
25 # include <linux/kgdb.h>
26 #endif
27 
28 #define TRAP_SYSCALL	1
29 #define TRAP_DEBUG	0xdb
30 
31 #ifdef CONFIG_GENERIC_BUG
32 /* Maybe should resemble arch/sh/kernel/traps.c ?? */
33 int is_valid_bugaddr(unsigned long addr)
34 {
35 	return 1;
36 }
37 #endif /* CONFIG_GENERIC_BUG */
38 
39 static const char *ex_name(int ex)
40 {
41 	switch (ex) {
42 	case HVM_GE_C_XPROT:
43 	case HVM_GE_C_XUSER:
44 		return "Execute protection fault";
45 	case HVM_GE_C_RPROT:
46 	case HVM_GE_C_RUSER:
47 		return "Read protection fault";
48 	case HVM_GE_C_WPROT:
49 	case HVM_GE_C_WUSER:
50 		return "Write protection fault";
51 	case HVM_GE_C_XMAL:
52 		return "Misaligned instruction";
53 	case HVM_GE_C_WREG:
54 		return "Multiple writes to same register in packet";
55 	case HVM_GE_C_PCAL:
56 		return "Program counter values that are not properly aligned";
57 	case HVM_GE_C_RMAL:
58 		return "Misaligned data load";
59 	case HVM_GE_C_WMAL:
60 		return "Misaligned data store";
61 	case HVM_GE_C_INVI:
62 	case HVM_GE_C_PRIVI:
63 		return "Illegal instruction";
64 	case HVM_GE_C_BUS:
65 		return "Precise bus error";
66 	case HVM_GE_C_CACHE:
67 		return "Cache error";
68 
69 	case 0xdb:
70 		return "Debugger trap";
71 
72 	default:
73 		return "Unrecognized exception";
74 	}
75 }
76 
77 static void do_show_stack(struct task_struct *task, unsigned long *fp,
78 			  unsigned long ip, const char *loglvl)
79 {
80 	int kstack_depth_to_print = 24;
81 	unsigned long offset, size;
82 	const char *name = NULL;
83 	unsigned long *newfp;
84 	unsigned long low, high;
85 	char tmpstr[128];
86 	char *modname;
87 	int i;
88 
89 	if (task == NULL)
90 		task = current;
91 
92 	printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
93 		task->comm, task_pid_nr(task));
94 
95 	if (fp == NULL) {
96 		if (task == current) {
97 			asm("%0 = r30" : "=r" (fp));
98 		} else {
99 			fp = (unsigned long *)
100 			     ((struct hexagon_switch_stack *)
101 			     task->thread.switch_sp)->fp;
102 		}
103 	}
104 
105 	if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
106 		printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
107 		return;
108 	}
109 
110 	/* Saved link reg is one word above FP */
111 	if (!ip)
112 		ip = *(fp+1);
113 
114 	/* Expect kernel stack to be in-bounds */
115 	low = (unsigned long)task_stack_page(task);
116 	high = low + THREAD_SIZE - 8;
117 	low += sizeof(struct thread_info);
118 
119 	for (i = 0; i < kstack_depth_to_print; i++) {
120 
121 		name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
122 
123 		printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
124 		if (((unsigned long) fp < low) || (high < (unsigned long) fp))
125 			printk(KERN_CONT " (FP out of bounds!)");
126 		if (modname)
127 			printk(KERN_CONT " [%s] ", modname);
128 		printk(KERN_CONT "\n");
129 
130 		newfp = (unsigned long *) *fp;
131 
132 		if (((unsigned long) newfp) & 0x3) {
133 			printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
134 			break;
135 		}
136 
137 		/* Attempt to continue past exception. */
138 		if (0 == newfp) {
139 			struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
140 						+ 8);
141 
142 			if (regs->syscall_nr != -1) {
143 				printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
144 					regs->syscall_nr);
145 				printk(KERN_CONT "  psp: %lx  elr: %lx\n",
146 					 pt_psp(regs), pt_elr(regs));
147 				break;
148 			} else {
149 				/* really want to see more ... */
150 				kstack_depth_to_print += 6;
151 				printk("%s-- %s (0x%lx)  badva: %lx\n", loglvl,
152 					ex_name(pt_cause(regs)), pt_cause(regs),
153 					pt_badva(regs));
154 			}
155 
156 			newfp = (unsigned long *) regs->r30;
157 			ip = pt_elr(regs);
158 		} else {
159 			ip = *(newfp + 1);
160 		}
161 
162 		/* If link reg is null, we are done. */
163 		if (ip == 0x0)
164 			break;
165 
166 		/* If newfp isn't larger, we're tracing garbage. */
167 		if (newfp > fp)
168 			fp = newfp;
169 		else
170 			break;
171 	}
172 }
173 
174 void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
175 {
176 	/* Saved link reg is one word above FP */
177 	do_show_stack(task, fp, 0, loglvl);
178 }
179 
180 int die(const char *str, struct pt_regs *regs, long err)
181 {
182 	static struct {
183 		spinlock_t lock;
184 		int counter;
185 	} die = {
186 		.lock = __SPIN_LOCK_UNLOCKED(die.lock),
187 		.counter = 0
188 	};
189 
190 	console_verbose();
191 	oops_enter();
192 
193 	spin_lock_irq(&die.lock);
194 	bust_spinlocks(1);
195 	printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
196 
197 	if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
198 	    NOTIFY_STOP)
199 		return 1;
200 
201 	print_modules();
202 	show_regs(regs);
203 	do_show_stack(current, &regs->r30, pt_elr(regs), KERN_EMERG);
204 
205 	bust_spinlocks(0);
206 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
207 
208 	spin_unlock_irq(&die.lock);
209 
210 	if (in_interrupt())
211 		panic("Fatal exception in interrupt");
212 
213 	if (panic_on_oops)
214 		panic("Fatal exception");
215 
216 	oops_exit();
217 	make_task_dead(err);
218 	return 0;
219 }
220 
221 int die_if_kernel(char *str, struct pt_regs *regs, long err)
222 {
223 	if (!user_mode(regs))
224 		return die(str, regs, err);
225 	else
226 		return 0;
227 }
228 
229 /*
230  * It's not clear that misaligned fetches are ever recoverable.
231  */
232 static void misaligned_instruction(struct pt_regs *regs)
233 {
234 	die_if_kernel("Misaligned Instruction", regs, 0);
235 	force_sig(SIGBUS);
236 }
237 
238 /*
239  * Misaligned loads and stores, on the other hand, can be
240  * emulated, and probably should be, some day.  But for now
241  * they will be considered fatal.
242  */
243 static void misaligned_data_load(struct pt_regs *regs)
244 {
245 	die_if_kernel("Misaligned Data Load", regs, 0);
246 	force_sig(SIGBUS);
247 }
248 
249 static void misaligned_data_store(struct pt_regs *regs)
250 {
251 	die_if_kernel("Misaligned Data Store", regs, 0);
252 	force_sig(SIGBUS);
253 }
254 
255 static void illegal_instruction(struct pt_regs *regs)
256 {
257 	die_if_kernel("Illegal Instruction", regs, 0);
258 	force_sig(SIGILL);
259 }
260 
261 /*
262  * Precise bus errors may be recoverable with a a retry,
263  * but for now, treat them as irrecoverable.
264  */
265 static void precise_bus_error(struct pt_regs *regs)
266 {
267 	die_if_kernel("Precise Bus Error", regs, 0);
268 	force_sig(SIGBUS);
269 }
270 
271 /*
272  * If anything is to be done here other than panic,
273  * it will probably be complex and migrate to another
274  * source module.  For now, just die.
275  */
276 static void cache_error(struct pt_regs *regs)
277 {
278 	die("Cache Error", regs, 0);
279 }
280 
281 /*
282  * General exception handler
283  */
284 void do_genex(struct pt_regs *regs)
285 {
286 	/*
287 	 * Decode Cause and Dispatch
288 	 */
289 	switch (pt_cause(regs)) {
290 	case HVM_GE_C_XPROT:
291 	case HVM_GE_C_XUSER:
292 		execute_protection_fault(regs);
293 		break;
294 	case HVM_GE_C_RPROT:
295 	case HVM_GE_C_RUSER:
296 		read_protection_fault(regs);
297 		break;
298 	case HVM_GE_C_WPROT:
299 	case HVM_GE_C_WUSER:
300 		write_protection_fault(regs);
301 		break;
302 	case HVM_GE_C_XMAL:
303 		misaligned_instruction(regs);
304 		break;
305 	case HVM_GE_C_WREG:
306 		illegal_instruction(regs);
307 		break;
308 	case HVM_GE_C_PCAL:
309 		misaligned_instruction(regs);
310 		break;
311 	case HVM_GE_C_RMAL:
312 		misaligned_data_load(regs);
313 		break;
314 	case HVM_GE_C_WMAL:
315 		misaligned_data_store(regs);
316 		break;
317 	case HVM_GE_C_INVI:
318 	case HVM_GE_C_PRIVI:
319 		illegal_instruction(regs);
320 		break;
321 	case HVM_GE_C_BUS:
322 		precise_bus_error(regs);
323 		break;
324 	case HVM_GE_C_CACHE:
325 		cache_error(regs);
326 		break;
327 	default:
328 		/* Halt and catch fire */
329 		panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
330 		break;
331 	}
332 }
333 
334 /* Indirect system call dispatch */
335 long sys_syscall(void)
336 {
337 	printk(KERN_ERR "sys_syscall invoked!\n");
338 	return -ENOSYS;
339 }
340 
341 void do_trap0(struct pt_regs *regs)
342 {
343 	syscall_fn syscall;
344 
345 	switch (pt_cause(regs)) {
346 	case TRAP_SYSCALL:
347 		/* System call is trap0 #1 */
348 
349 		/* allow strace to catch syscall args  */
350 		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
351 			ptrace_report_syscall_entry(regs)))
352 			return;  /*  return -ENOSYS somewhere?  */
353 
354 		/* Interrupts should be re-enabled for syscall processing */
355 		__vmsetie(VM_INT_ENABLE);
356 
357 		/*
358 		 * System call number is in r6, arguments in r0..r5.
359 		 * Fortunately, no Linux syscall has more than 6 arguments,
360 		 * and Hexagon ABI passes first 6 arguments in registers.
361 		 * 64-bit arguments are passed in odd/even register pairs.
362 		 * Fortunately, we have no system calls that take more
363 		 * than three arguments with more than one 64-bit value.
364 		 * Should that change, we'd need to redesign to copy
365 		 * between user and kernel stacks.
366 		 */
367 		regs->syscall_nr = regs->r06;
368 
369 		/*
370 		 * GPR R0 carries the first parameter, and is also used
371 		 * to report the return value.  We need a backup of
372 		 * the user's value in case we need to do a late restart
373 		 * of the system call.
374 		 */
375 		regs->restart_r0 = regs->r00;
376 
377 		if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
378 			regs->r00 = -1;
379 		} else {
380 			syscall = (syscall_fn)
381 				  (sys_call_table[regs->syscall_nr]);
382 			regs->r00 = syscall(regs->r00, regs->r01,
383 				   regs->r02, regs->r03,
384 				   regs->r04, regs->r05);
385 		}
386 
387 		/* allow strace to get the syscall return state  */
388 		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
389 			ptrace_report_syscall_exit(regs, 0);
390 
391 		break;
392 	case TRAP_DEBUG:
393 		/* Trap0 0xdb is debug breakpoint */
394 		if (user_mode(regs)) {
395 			/*
396 			 * Some architecures add some per-thread state
397 			 * to distinguish between breakpoint traps and
398 			 * trace traps.  We may want to do that, and
399 			 * set the si_code value appropriately, or we
400 			 * may want to use a different trap0 flavor.
401 			 */
402 			force_sig_fault(SIGTRAP, TRAP_BRKPT,
403 					(void __user *) pt_elr(regs));
404 		} else {
405 #ifdef CONFIG_KGDB
406 			kgdb_handle_exception(pt_cause(regs), SIGTRAP,
407 					      TRAP_BRKPT, regs);
408 #endif
409 		}
410 		break;
411 	}
412 	/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
413 }
414 
415 /*
416  * Machine check exception handler
417  */
418 void do_machcheck(struct pt_regs *regs)
419 {
420 	/* Halt and catch fire */
421 	__vmstop();
422 }
423 
424 /*
425  * Treat this like the old 0xdb trap.
426  */
427 
428 void do_debug_exception(struct pt_regs *regs)
429 {
430 	regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
431 	regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
432 	do_trap0(regs);
433 }
434