xref: /linux/arch/loongarch/kernel/stacktrace.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
193a4fa62SQing Zhang // SPDX-License-Identifier: GPL-2.0
293a4fa62SQing Zhang /*
393a4fa62SQing Zhang  * Stack trace management functions
493a4fa62SQing Zhang  *
593a4fa62SQing Zhang  * Copyright (C) 2022 Loongson Technology Corporation Limited
693a4fa62SQing Zhang  */
793a4fa62SQing Zhang #include <linux/sched.h>
893a4fa62SQing Zhang #include <linux/stacktrace.h>
94d7bf939SQing Zhang #include <linux/uaccess.h>
1093a4fa62SQing Zhang 
1193a4fa62SQing Zhang #include <asm/stacktrace.h>
1293a4fa62SQing Zhang #include <asm/unwind.h>
1393a4fa62SQing Zhang 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)1493a4fa62SQing Zhang void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
1593a4fa62SQing Zhang 		     struct task_struct *task, struct pt_regs *regs)
1693a4fa62SQing Zhang {
1793a4fa62SQing Zhang 	unsigned long addr;
1893a4fa62SQing Zhang 	struct pt_regs dummyregs;
1993a4fa62SQing Zhang 	struct unwind_state state;
2093a4fa62SQing Zhang 
2195bb5b61SEnze Li 	if (!regs) {
2293a4fa62SQing Zhang 		regs = &dummyregs;
2393a4fa62SQing Zhang 
2493a4fa62SQing Zhang 		if (task == current) {
2593a4fa62SQing Zhang 			regs->regs[3] = (unsigned long)__builtin_frame_address(0);
2693a4fa62SQing Zhang 			regs->csr_era = (unsigned long)__builtin_return_address(0);
2793a4fa62SQing Zhang 		} else {
2893a4fa62SQing Zhang 			regs->regs[3] = thread_saved_fp(task);
2993a4fa62SQing Zhang 			regs->csr_era = thread_saved_ra(task);
3093a4fa62SQing Zhang 		}
3193a4fa62SQing Zhang 		regs->regs[1] = 0;
32cb8a2ef0STiezhu Yang 		regs->regs[22] = 0;
3395bb5b61SEnze Li 	}
3495bb5b61SEnze Li 
3593a4fa62SQing Zhang 	for (unwind_start(&state, task, regs);
3697ceddbcSJinyang He 	     !unwind_done(&state); unwind_next_frame(&state)) {
3793a4fa62SQing Zhang 		addr = unwind_get_return_address(&state);
3893a4fa62SQing Zhang 		if (!addr || !consume_entry(cookie, addr))
3993a4fa62SQing Zhang 			break;
4093a4fa62SQing Zhang 	}
4193a4fa62SQing Zhang }
424d7bf939SQing Zhang 
arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task)43*199cc14cSJinyang He int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
44*199cc14cSJinyang He 			     void *cookie, struct task_struct *task)
45*199cc14cSJinyang He {
46*199cc14cSJinyang He 	unsigned long addr;
47*199cc14cSJinyang He 	struct pt_regs dummyregs;
48*199cc14cSJinyang He 	struct pt_regs *regs = &dummyregs;
49*199cc14cSJinyang He 	struct unwind_state state;
50*199cc14cSJinyang He 
51*199cc14cSJinyang He 	if (task == current) {
52*199cc14cSJinyang He 		regs->regs[3] = (unsigned long)__builtin_frame_address(0);
53*199cc14cSJinyang He 		regs->csr_era = (unsigned long)__builtin_return_address(0);
54*199cc14cSJinyang He 	} else {
55*199cc14cSJinyang He 		regs->regs[3] = thread_saved_fp(task);
56*199cc14cSJinyang He 		regs->csr_era = thread_saved_ra(task);
57*199cc14cSJinyang He 	}
58*199cc14cSJinyang He 	regs->regs[1] = 0;
59*199cc14cSJinyang He 	regs->regs[22] = 0;
60*199cc14cSJinyang He 
61*199cc14cSJinyang He 	for (unwind_start(&state, task, regs);
62*199cc14cSJinyang He 	     !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
63*199cc14cSJinyang He 		addr = unwind_get_return_address(&state);
64*199cc14cSJinyang He 
65*199cc14cSJinyang He 		/*
66*199cc14cSJinyang He 		 * A NULL or invalid return address probably means there's some
67*199cc14cSJinyang He 		 * generated code which __kernel_text_address() doesn't know about.
68*199cc14cSJinyang He 		 */
69*199cc14cSJinyang He 		if (!addr)
70*199cc14cSJinyang He 			return -EINVAL;
71*199cc14cSJinyang He 
72*199cc14cSJinyang He 		if (!consume_entry(cookie, addr))
73*199cc14cSJinyang He 			return -EINVAL;
74*199cc14cSJinyang He 	}
75*199cc14cSJinyang He 
76*199cc14cSJinyang He 	/* Check for stack corruption */
77*199cc14cSJinyang He 	if (unwind_error(&state))
78*199cc14cSJinyang He 		return -EINVAL;
79*199cc14cSJinyang He 
80*199cc14cSJinyang He 	return 0;
81*199cc14cSJinyang He }
82*199cc14cSJinyang He 
834d7bf939SQing Zhang static int
copy_stack_frame(unsigned long fp,struct stack_frame * frame)844d7bf939SQing Zhang copy_stack_frame(unsigned long fp, struct stack_frame *frame)
854d7bf939SQing Zhang {
864d7bf939SQing Zhang 	int ret = 1;
874d7bf939SQing Zhang 	unsigned long err;
884d7bf939SQing Zhang 	unsigned long __user *user_frame_tail;
894d7bf939SQing Zhang 
904d7bf939SQing Zhang 	user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
914d7bf939SQing Zhang 	if (!access_ok(user_frame_tail, sizeof(*frame)))
924d7bf939SQing Zhang 		return 0;
934d7bf939SQing Zhang 
944d7bf939SQing Zhang 	pagefault_disable();
954d7bf939SQing Zhang 	err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
964d7bf939SQing Zhang 	if (err || (unsigned long)user_frame_tail >= frame->fp)
974d7bf939SQing Zhang 		ret = 0;
984d7bf939SQing Zhang 	pagefault_enable();
994d7bf939SQing Zhang 
1004d7bf939SQing Zhang 	return ret;
1014d7bf939SQing Zhang }
1024d7bf939SQing Zhang 
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)1034d7bf939SQing Zhang void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
1044d7bf939SQing Zhang 			  const struct pt_regs *regs)
1054d7bf939SQing Zhang {
1064d7bf939SQing Zhang 	unsigned long fp = regs->regs[22];
1074d7bf939SQing Zhang 
1084d7bf939SQing Zhang 	while (fp && !((unsigned long)fp & 0xf)) {
1094d7bf939SQing Zhang 		struct stack_frame frame;
1104d7bf939SQing Zhang 
1114d7bf939SQing Zhang 		frame.fp = 0;
1124d7bf939SQing Zhang 		frame.ra = 0;
1134d7bf939SQing Zhang 		if (!copy_stack_frame(fp, &frame))
1144d7bf939SQing Zhang 			break;
1154d7bf939SQing Zhang 		if (!frame.ra)
1164d7bf939SQing Zhang 			break;
1174d7bf939SQing Zhang 		if (!consume_entry(cookie, frame.ra))
1184d7bf939SQing Zhang 			break;
1194d7bf939SQing Zhang 		fp = frame.fp;
1204d7bf939SQing Zhang 	}
1214d7bf939SQing Zhang }
122