Lines Matching +full:common +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
42 * @common: Common unwind state.
50 struct unwind_state common; member
65 unwind_init_common(&state->common); in kunwind_init()
66 state->task = task; in kunwind_init()
67 state->source = KUNWIND_SOURCE_UNKNOWN; in kunwind_init()
68 state->flags.all = 0; in kunwind_init()
69 state->regs = NULL; in kunwind_init()
85 state->regs = regs; in kunwind_init_from_regs()
86 state->common.fp = regs->regs[29]; in kunwind_init_from_regs()
87 state->common.pc = regs->pc; in kunwind_init_from_regs()
88 state->source = KUNWIND_SOURCE_REGS_PC; in kunwind_init_from_regs()
104 state->common.fp = (unsigned long)__builtin_frame_address(1); in kunwind_init_from_caller()
105 state->common.pc = (unsigned long)__builtin_return_address(0); in kunwind_init_from_caller()
106 state->source = KUNWIND_SOURCE_CALLER; in kunwind_init_from_caller()
125 state->common.fp = thread_saved_fp(task); in kunwind_init_from_task()
126 state->common.pc = thread_saved_pc(task); in kunwind_init_from_task()
127 state->source = KUNWIND_SOURCE_TASK; in kunwind_init_from_task()
134 if (state->task->ret_stack && in kunwind_recover_return_address()
135 (state->common.pc == (unsigned long)return_to_handler)) { in kunwind_recover_return_address()
137 orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx, in kunwind_recover_return_address()
138 state->common.pc, in kunwind_recover_return_address()
139 (void *)state->common.fp); in kunwind_recover_return_address()
140 if (state->common.pc == orig_pc) { in kunwind_recover_return_address()
141 WARN_ON_ONCE(state->task == current); in kunwind_recover_return_address()
142 return -EINVAL; in kunwind_recover_return_address()
144 state->common.pc = orig_pc; in kunwind_recover_return_address()
145 state->flags.fgraph = 1; in kunwind_recover_return_address()
150 if (is_kretprobe_trampoline(state->common.pc)) { in kunwind_recover_return_address()
152 orig_pc = kretprobe_find_ret_addr(state->task, in kunwind_recover_return_address()
153 (void *)state->common.fp, in kunwind_recover_return_address()
154 &state->kr_cur); in kunwind_recover_return_address()
156 return -EINVAL; in kunwind_recover_return_address()
157 state->common.pc = orig_pc; in kunwind_recover_return_address()
158 state->flags.kretprobe = 1; in kunwind_recover_return_address()
169 unsigned long fp = state->common.fp; in kunwind_next_regs_pc()
174 info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs)); in kunwind_next_regs_pc()
176 return -EINVAL; in kunwind_next_regs_pc()
178 unwind_consume_stack(&state->common, info, (unsigned long)regs, in kunwind_next_regs_pc()
181 state->regs = regs; in kunwind_next_regs_pc()
182 state->common.pc = regs->pc; in kunwind_next_regs_pc()
183 state->common.fp = regs->regs[29]; in kunwind_next_regs_pc()
184 state->regs = NULL; in kunwind_next_regs_pc()
185 state->source = KUNWIND_SOURCE_REGS_PC; in kunwind_next_regs_pc()
192 struct task_struct *tsk = state->task; in kunwind_next_frame_record_meta()
193 unsigned long fp = state->common.fp; in kunwind_next_frame_record_meta()
197 info = unwind_find_stack(&state->common, fp, sizeof(*meta)); in kunwind_next_frame_record_meta()
199 return -EINVAL; in kunwind_next_frame_record_meta()
202 switch (READ_ONCE(meta->type)) { in kunwind_next_frame_record_meta()
204 if (meta == &task_pt_regs(tsk)->stackframe) in kunwind_next_frame_record_meta()
205 return -ENOENT; in kunwind_next_frame_record_meta()
207 return -EINVAL; in kunwind_next_frame_record_meta()
212 return -EINVAL; in kunwind_next_frame_record_meta()
219 unsigned long fp = state->common.fp; in kunwind_next_frame_record()
225 return -EINVAL; in kunwind_next_frame_record()
227 info = unwind_find_stack(&state->common, fp, sizeof(*record)); in kunwind_next_frame_record()
229 return -EINVAL; in kunwind_next_frame_record()
232 new_fp = READ_ONCE(record->fp); in kunwind_next_frame_record()
233 new_pc = READ_ONCE(record->lr); in kunwind_next_frame_record()
238 unwind_consume_stack(&state->common, info, fp, sizeof(*record)); in kunwind_next_frame_record()
240 state->common.fp = new_fp; in kunwind_next_frame_record()
241 state->common.pc = new_pc; in kunwind_next_frame_record()
242 state->source = KUNWIND_SOURCE_FRAME; in kunwind_next_frame_record()
259 state->flags.all = 0; in kunwind_next()
261 switch (state->source) { in kunwind_next()
269 err = -EINVAL; in kunwind_next()
275 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc); in kunwind_next()
294 return -EINVAL; in do_kunwind()
296 if (ret == -ENOENT) in do_kunwind()
304 * Per-cpu stacks are only accessible when unwinding the current task in a
305 * non-preemptible context.
350 .common = { in kunwind_stack_walk()
358 return -EINVAL; in kunwind_stack_walk()
378 return data->consume_entry(data->cookie, state->common.pc); in arch_kunwind_consume_entry()
404 if (state->source == KUNWIND_SOURCE_REGS_PC) in arch_reliable_kunwind_consume_entry()
433 return data->consume_entry(data->cookie, state->common.pc, 0, in arch_bpf_unwind_consume_entry()
434 state->common.fp); in arch_bpf_unwind_consume_entry()
450 switch (state->source) { in state_source_string()
462 union unwind_flags flags = state->flags; in dump_backtrace_entry()
467 (void *)state->common.pc, in dump_backtrace_entry()
504 * The struct defined for userspace stack frame in AARCH64 mode.
554 * (struct compat_frame_tail *)(xxx->fp)-1
559 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
593 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; in unwind_compat_user_frame()
601 if (!consume_entry(cookie, regs->pc)) in arch_stack_walk_user()
605 /* AARCH64 mode */ in arch_stack_walk_user()
608 tail = (struct frame_tail __user *)regs->regs[29]; in arch_stack_walk_user()
613 /* AARCH32 compat mode */ in arch_stack_walk_user()
616 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; in arch_stack_walk_user()