1 /* 2 * Stack trace management functions 3 * 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 */ 6 #include <linux/sched.h> 7 #include <linux/sched/debug.h> 8 #include <linux/sched/task_stack.h> 9 #include <linux/stacktrace.h> 10 #include <linux/export.h> 11 #include <linux/uaccess.h> 12 #include <asm/stacktrace.h> 13 #include <asm/unwind.h> 14 15 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, 16 struct task_struct *task, struct pt_regs *regs) 17 { 18 struct unwind_state state; 19 unsigned long addr; 20 21 if (regs && !consume_entry(cookie, regs->ip, false)) 22 return; 23 24 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); 25 unwind_next_frame(&state)) { 26 addr = unwind_get_return_address(&state); 27 if (!addr || !consume_entry(cookie, addr, false)) 28 break; 29 } 30 } 31 32 /* 33 * This function returns an error if it detects any unreliable features of the 34 * stack. Otherwise it guarantees that the stack trace is reliable. 35 * 36 * If the task is not 'current', the caller *must* ensure the task is inactive. 37 */ 38 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, 39 void *cookie, struct task_struct *task) 40 { 41 struct unwind_state state; 42 struct pt_regs *regs; 43 unsigned long addr; 44 45 for (unwind_start(&state, task, NULL, NULL); 46 !unwind_done(&state) && !unwind_error(&state); 47 unwind_next_frame(&state)) { 48 49 regs = unwind_get_entry_regs(&state, NULL); 50 if (regs) { 51 /* Success path for user tasks */ 52 if (user_mode(regs)) 53 return 0; 54 55 /* 56 * Kernel mode registers on the stack indicate an 57 * in-kernel interrupt or exception (e.g., preemption 58 * or a page fault), which can make frame pointers 59 * unreliable. 60 */ 61 if (IS_ENABLED(CONFIG_FRAME_POINTER)) 62 return -EINVAL; 63 } 64 65 addr = unwind_get_return_address(&state); 66 67 /* 68 * A NULL or invalid return address probably means there's some 69 * generated code which __kernel_text_address() doesn't know 70 * about. 71 */ 72 if (!addr) 73 return -EINVAL; 74 75 if (!consume_entry(cookie, addr, false)) 76 return -EINVAL; 77 } 78 79 /* Check for stack corruption */ 80 if (unwind_error(&state)) 81 return -EINVAL; 82 83 return 0; 84 } 85 86 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ 87 88 struct stack_frame_user { 89 const void __user *next_fp; 90 unsigned long ret_addr; 91 }; 92 93 static int 94 copy_stack_frame(const struct stack_frame_user __user *fp, 95 struct stack_frame_user *frame) 96 { 97 int ret; 98 99 if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) 100 return 0; 101 102 ret = 1; 103 pagefault_disable(); 104 if (__get_user(frame->next_fp, &fp->next_fp) || 105 __get_user(frame->ret_addr, &fp->ret_addr)) 106 ret = 0; 107 pagefault_enable(); 108 109 return ret; 110 } 111 112 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, 113 const struct pt_regs *regs) 114 { 115 const void __user *fp = (const void __user *)regs->bp; 116 117 if (!consume_entry(cookie, regs->ip, false)) 118 return; 119 120 while (1) { 121 struct stack_frame_user frame; 122 123 frame.next_fp = NULL; 124 frame.ret_addr = 0; 125 if (!copy_stack_frame(fp, &frame)) 126 break; 127 if ((unsigned long)fp < regs->sp) 128 break; 129 if (!frame.ret_addr) 130 break; 131 if (!consume_entry(cookie, frame.ret_addr, false)) 132 break; 133 fp = frame.next_fp; 134 } 135 } 136 137