stacktrace.c (9fe64e15148d1ff81fa0bcb5fd58531e3ae2aa3a) | stacktrace.c (af085d9084b48530153f51e6cad19fd0b1a13ed7) |
---|---|
1/* 2 * Stack trace management functions 3 * 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 */ 6#include <linux/sched.h> 7#include <linux/sched/debug.h> 8#include <linux/sched/task_stack.h> --- 62 unchanged lines hidden (view full) --- 71 return; 72 73 __save_stack_trace(trace, tsk, NULL, true); 74 75 put_task_stack(tsk); 76} 77EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 78 | 1/* 2 * Stack trace management functions 3 * 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 */ 6#include <linux/sched.h> 7#include <linux/sched/debug.h> 8#include <linux/sched/task_stack.h> --- 62 unchanged lines hidden (view full) --- 71 return; 72 73 __save_stack_trace(trace, tsk, NULL, true); 74 75 put_task_stack(tsk); 76} 77EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 78 |
79#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE 80 81#define STACKTRACE_DUMP_ONCE(task) ({ \ 82 static bool __section(.data.unlikely) __dumped; \ 83 \ 84 if (!__dumped) { \ 85 __dumped = true; \ 86 WARN_ON(1); \ 87 show_stack(task, NULL); \ 88 } \ 89}) 90 91static int __save_stack_trace_reliable(struct stack_trace *trace, 92 struct task_struct *task) 93{ 94 struct unwind_state state; 95 struct pt_regs *regs; 96 unsigned long addr; 97 98 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); 99 unwind_next_frame(&state)) { 100 101 regs = unwind_get_entry_regs(&state); 102 if (regs) { 103 /* 104 * Kernel mode registers on the stack indicate an 105 * in-kernel interrupt or exception (e.g., preemption 106 * or a page fault), which can make frame pointers 107 * unreliable. 108 */ 109 if (!user_mode(regs)) 110 return -EINVAL; 111 112 /* 113 * The last frame contains the user mode syscall 114 * pt_regs. Skip it and finish the unwind. 115 */ 116 unwind_next_frame(&state); 117 if (!unwind_done(&state)) { 118 STACKTRACE_DUMP_ONCE(task); 119 return -EINVAL; 120 } 121 break; 122 } 123 124 addr = unwind_get_return_address(&state); 125 126 /* 127 * A NULL or invalid return address probably means there's some 128 * generated code which __kernel_text_address() doesn't know 129 * about. 130 */ 131 if (!addr) { 132 STACKTRACE_DUMP_ONCE(task); 133 return -EINVAL; 134 } 135 136 if (save_stack_address(trace, addr, false)) 137 return -EINVAL; 138 } 139 140 /* Check for stack corruption */ 141 if (unwind_error(&state)) { 142 STACKTRACE_DUMP_ONCE(task); 143 return -EINVAL; 144 } 145 146 if (trace->nr_entries < trace->max_entries) 147 trace->entries[trace->nr_entries++] = ULONG_MAX; 148 149 return 0; 150} 151 152/* 153 * This function returns an error if it detects any unreliable features of the 154 * stack. Otherwise it guarantees that the stack trace is reliable. 155 * 156 * If the task is not 'current', the caller *must* ensure the task is inactive. 157 */ 158int save_stack_trace_tsk_reliable(struct task_struct *tsk, 159 struct stack_trace *trace) 160{ 161 int ret; 162 163 if (!try_get_task_stack(tsk)) 164 return -EINVAL; 165 166 ret = __save_stack_trace_reliable(trace, tsk); 167 168 put_task_stack(tsk); 169 170 return ret; 171} 172#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ 173 |
|
79/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ 80 81struct stack_frame_user { 82 const void __user *next_fp; 83 unsigned long ret_addr; 84}; 85 86static int --- 46 unchanged lines hidden (view full) --- 133 * Trace user stack if we are not a kernel thread 134 */ 135 if (current->mm) { 136 __save_stack_trace_user(trace); 137 } 138 if (trace->nr_entries < trace->max_entries) 139 trace->entries[trace->nr_entries++] = ULONG_MAX; 140} | 174/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ 175 176struct stack_frame_user { 177 const void __user *next_fp; 178 unsigned long ret_addr; 179}; 180 181static int --- 46 unchanged lines hidden (view full) --- 228 * Trace user stack if we are not a kernel thread 229 */ 230 if (current->mm) { 231 __save_stack_trace_user(trace); 232 } 233 if (trace->nr_entries < trace->max_entries) 234 trace->entries[trace->nr_entries++] = ULONG_MAX; 235} |
141 | |