1 /* 2 * Stack trace management functions 3 * 4 * Copyright IBM Corp. 2006 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 6 */ 7 8 #include <linux/sched.h> 9 #include <linux/stacktrace.h> 10 #include <linux/kallsyms.h> 11 #include <linux/module.h> 12 13 static unsigned long save_context_stack(struct stack_trace *trace, 14 unsigned long sp, 15 unsigned long low, 16 unsigned long high, 17 int savesched) 18 { 19 struct stack_frame *sf; 20 struct pt_regs *regs; 21 unsigned long addr; 22 23 while(1) { 24 if (sp < low || sp > high) 25 return sp; 26 sf = (struct stack_frame *)sp; 27 while(1) { 28 addr = sf->gprs[8]; 29 if (!trace->skip) 30 trace->entries[trace->nr_entries++] = addr; 31 else 32 trace->skip--; 33 if (trace->nr_entries >= trace->max_entries) 34 return sp; 35 low = sp; 36 sp = sf->back_chain; 37 if (!sp) 38 break; 39 if (sp <= low || sp > high - sizeof(*sf)) 40 return sp; 41 sf = (struct stack_frame *)sp; 42 } 43 /* Zero backchain detected, check for interrupt frame. */ 44 sp = (unsigned long)(sf + 1); 45 if (sp <= low || sp > high - sizeof(*regs)) 46 return sp; 47 regs = (struct pt_regs *)sp; 48 addr = regs->psw.addr; 49 if (savesched || !in_sched_functions(addr)) { 50 if (!trace->skip) 51 trace->entries[trace->nr_entries++] = addr; 52 else 53 trace->skip--; 54 } 55 if (trace->nr_entries >= trace->max_entries) 56 return sp; 57 low = sp; 58 sp = regs->gprs[15]; 59 } 60 } 61 62 static void __save_stack_trace(struct stack_trace *trace, unsigned long sp) 63 { 64 unsigned long new_sp, frame_size; 65 66 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 67 new_sp = save_context_stack(trace, sp, 68 S390_lowcore.panic_stack + frame_size - PAGE_SIZE, 69 S390_lowcore.panic_stack + frame_size, 1); 70 new_sp = save_context_stack(trace, new_sp, 71 S390_lowcore.async_stack + frame_size - ASYNC_SIZE, 72 S390_lowcore.async_stack + frame_size, 1); 73 save_context_stack(trace, new_sp, 74 S390_lowcore.thread_info, 75 S390_lowcore.thread_info + THREAD_SIZE, 1); 76 } 77 78 void save_stack_trace(struct stack_trace *trace) 79 { 80 register unsigned long r15 asm ("15"); 81 unsigned long sp; 82 83 sp = r15; 84 __save_stack_trace(trace, sp); 85 if (trace->nr_entries < trace->max_entries) 86 trace->entries[trace->nr_entries++] = ULONG_MAX; 87 } 88 EXPORT_SYMBOL_GPL(save_stack_trace); 89 90 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 91 { 92 unsigned long sp, low, high; 93 94 sp = tsk->thread.ksp; 95 if (tsk == current) { 96 /* Get current stack pointer. */ 97 asm volatile("la %0,0(15)" : "=a" (sp)); 98 } 99 low = (unsigned long) task_stack_page(tsk); 100 high = (unsigned long) task_pt_regs(tsk); 101 save_context_stack(trace, sp, low, high, 0); 102 if (trace->nr_entries < trace->max_entries) 103 trace->entries[trace->nr_entries++] = ULONG_MAX; 104 } 105 EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 106 107 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) 108 { 109 unsigned long sp; 110 111 sp = kernel_stack_pointer(regs); 112 __save_stack_trace(trace, sp); 113 if (trace->nr_entries < trace->max_entries) 114 trace->entries[trace->nr_entries++] = ULONG_MAX; 115 } 116 EXPORT_SYMBOL_GPL(save_stack_trace_regs); 117