/linux/include/linux/ |
H A D | stackdepot.h | 3 * Stack depot - a stack trace storage that avoids duplication. 5 * Stack depot is intended to be used by subsystems that need to store and 6 * later retrieve many potentially duplicated stack traces without wasting 9 * For example, KASAN needs to save allocation and free stack traces for each 10 * object. Storing two stack traces per object requires a lot of memory (e.g. 12 * stack traces often repeat, using stack depot allows to save about 100x space. 28 * Number of bits in the handle that stack depot doesn't use. Users may store 43 /* Compact structure that stores a reference to a stack. */ 64 * only place a stack record onto the freelist iff its 65 * refcount is zero. Because stack records with a zero [all …]
|
/linux/Documentation/arch/x86/ |
H A D | shstk.rst | 4 Control-flow Enforcement Technology (CET) Shadow Stack 14 CET introduces shadow stack and indirect branch tracking (IBT). A shadow stack 15 is a secondary stack allocated from memory which cannot be directly modified by 17 return address to both the normal stack and the shadow stack. Upon 18 function return, the processor pops the shadow stack copy and compares it 19 to the normal stack copy. If the two differ, the processor raises a 22 Stack and Indirect Branch Tracking. Today in the 64-bit kernel, only userspace 23 shadow stack and kernel IBT are supported. 25 Requirements to use Shadow Stack 28 To use userspace shadow stack you need HW that supports it, a kernel [all …]
|
H A D | kernel-stacks.rst | 14 Like all other architectures, x86_64 has a kernel stack for every 17 zombie. While the thread is in user space the kernel stack is empty 25 * Interrupt stack. IRQ_STACK_SIZE 29 kernel switches from the current task to the interrupt stack. Like 32 of every per thread stack. 34 The interrupt stack is also used when processing a softirq. 36 Switching to the kernel interrupt stack is done by software based on a 41 to automatically switch to a new stack for designated events such as 43 events on x86_64. This feature is called the Interrupt Stack Table 46 point to dedicated stacks; each stack can be a different size. [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | verifier_subprog_precision.c | 43 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") 44 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") 45 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") 46 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") 47 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") 48 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") 49 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 103 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6") 104 __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4") 105 __msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3") [all …]
|
H A D | verifier_var_off.c | 36 __description("variable-offset stack read, priv vs unpriv") 38 __msg_unpriv("R2 variable stack access prohibited for !root") 43 /* Fill the top 8 bytes of the stack */ \ in stack_read_priv_vs_unpriv() 55 /* dereference it for a stack read */ \ in stack_read_priv_vs_unpriv() 63 __description("variable-offset stack read, uninitialized") 65 __failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root") 78 /* dereference it for a stack read */ \ in variable_offset_stack_read_uninitialized() 86 __description("variable-offset stack write, priv vs unpriv") 88 /* Check that the maximum stack depth is correctly maintained according to the 91 __log_level(4) __msg("stack depth 16") [all …]
|
H A D | test_global_func_ctx_args.c | 11 static long stack[256]; variable 19 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_typedef_ctx_subprog() 50 return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0); in kprobe_struct_ctx_subprog() 67 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_workaround_ctx_subprog() 83 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_ctx_subprog() 99 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_writable_ctx_subprog() 115 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in perf_event_ctx_subprog() 130 return bpf_get_stack(ctx, stack, sizeof(stack), 0); in subprog_ctx_tag() 142 return bpf_get_stack(ctx1, stack, sizeof(stack), 0) + in subprog_multi_ctx_tags() 144 bpf_get_stack(ctx2, stack, sizeof(stack), 0); in subprog_multi_ctx_tags()
|
H A D | verifier_precision.c | 10 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 11 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") 12 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") 13 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") in bpf_neg() 30 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 31 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 32 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") 33 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") in bpf_end_to_le() 51 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 52 __msg("mark_precise: frame0: regs=r2 stack [all...] |
H A D | verifier_scalar_ids.c | 17 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") 18 __msg("frame0: parent state regs=r0,r1,r2 stack=:") 19 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") 23 __msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10") 24 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") 26 __msg("frame0: parent state regs= stack=:") 57 __msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0") in __flag() 58 __msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") in __flag() 59 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") in __flag() 90 __msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0") in __flag() [all …]
|
/linux/lib/ |
H A D | stackdepot.c | 3 * Stack depot - a stack trace storage that avoids duplication. 5 * Internally, stack depot maintains a hash table of unique stacktraces. The 6 * stack traces themselves are stored contiguously one after another in a set 57 /* Hash table of stored stack records. */ 64 /* Array of memory regions that store stack records. */ 72 /* Freelist of stack records within stack_pools. */ 143 * stack traces being stored in stack depot. in stack_depot_early_init() 242 * Initializes new stack poo 312 struct stack_record *stack; depot_pop_free_pool() local 346 struct stack_record *stack; depot_pop_free() local 383 struct stack_record *stack = NULL; depot_alloc_stack() local 446 struct stack_record *stack; depot_fetch_stack() local 468 depot_free_stack(struct stack_record * stack) depot_free_stack() argument 539 struct stack_record *stack, *ret = NULL; find_stack() local 702 struct stack_record *stack; stack_depot_fetch() local 729 struct stack_record *stack; stack_depot_put() local 747 stack_depot_print(depot_stack_handle_t stack) stack_depot_print() argument [all...] |
/linux/drivers/misc/altera-stapl/ |
H A D | altera.c | 119 /* This function checks if enough parameters are available on the stack. */ 213 long *stack = astate->stack; in altera_execute() local 528 stack[stack_ptr] = stack[stack_ptr - 1]; in altera_execute() 534 swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); in altera_execute() 539 stack[stack_ptr - 1] += stack[stack_ptr]; in altera_execute() 545 stack[stack_ptr - 1] -= stack[stack_ptr]; in altera_execute() 551 stack[stack_ptr - 1] *= stack[stack_ptr]; in altera_execute() 557 stack[stack_ptr - 1] /= stack[stack_ptr]; in altera_execute() 563 stack[stack_ptr - 1] %= stack[stack_ptr]; in altera_execute() 569 stack[stack_ptr - 1] <<= stack[stack_ptr]; in altera_execute() [all …]
|
/linux/tools/testing/selftests/bpf/verifier/ |
H A D | precise.c | 42 mark_precise: frame0: regs=r2 stack= before 25\ 43 mark_precise: frame0: regs=r2 stack= before 24\ 44 mark_precise: frame0: regs=r2 stack= before 23\ 45 mark_precise: frame0: regs=r2 stack= before 22\ 46 mark_precise: frame0: regs=r2 stack= before 20\ 47 mark_precise: frame0: parent state regs=r2,r9 stack=:\ 49 mark_precise: frame0: regs=r2,r9 stack= before 19\ 50 mark_precise: frame0: regs=r9 stack= before 18\ 51 mark_precise: frame0: regs=r8,r9 stack= before 17\ 52 mark_precise: frame0: regs=r0,r9 stack= before 15\ [all …]
|
/linux/Documentation/mm/ |
H A D | vmalloced-kernel-stacks.rst | 4 Virtually Mapped Kernel Stack Support 21 Kernel stack overflows are often hard to debug and make the kernel 25 Virtually mapped kernel stacks with guard pages cause kernel stack 31 causes reliable faults when the stack overflows. The usability of 32 the stack trace after overflow and response to the overflow itself 49 needs to work while the stack points to a virtual address with 51 most likely) needs to ensure that the stack's page table entries 52 are populated before running on a possibly unpopulated stack. 53 - If the stack overflows into a guard page, something reasonable 64 with guard pages. This causes kernel stack overflows to be caught [all …]
|
/linux/drivers/misc/lkdtm/ |
H A D | stackleak.c | 3 * This code tests that the current task stack is properly erased (filled 16 * Check that stackleak tracks the lowest stack pointer and erases the stack 19 * To prevent the lowest stack pointer changing during the test, IRQs are 21 * compiler will create a fixed-size stack frame for this function. 23 * Any non-inlined function may make further use of the stack, altering the 24 * lowest stack pointer and/or clobbering poison values. To avoid spurious 40 * Check that the current and lowest recorded stack pointer values fall in check_stackleak_irqoff() 41 * within the expected task stack boundaries. These tests should never in check_stackleak_irqoff() 47 pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", in check_stackleak_irqoff() 54 pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", in check_stackleak_irqoff() [all …]
|
/linux/arch/openrisc/kernel/ |
H A D | unwinder.c | 28 * the frame pointer should point to a location in the stack after the 40 * Create a stack trace doing scanning which is frame pointer aware. We can 41 * get reliable stack traces by matching the previously found frame 42 * pointer with the top of the stack address every time we find a valid 45 * Ideally the stack parameter will be passed as FP, but it can not be 49 * The OpenRISC stack frame looks something like the following. The 53 * SP -> (top of stack) 58 * FP -> (previous top of stack) / 60 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument 67 while (!kstack_end(stack)) { in unwind_stack() [all …]
|
/linux/kernel/ |
H A D | stacktrace.c | 5 * Stack trace management functions 19 * stack_trace_print - Print the entries in the stack trace 38 * stack_trace_snprint - Print the entries in the stack trace into a buffer 105 * stack_trace_save - Save a stack trace into a storage array 108 * @skipnr: Number of entries to skip at the start of the stack trace 128 * stack_trace_save_tsk - Save a task stack trace into a storage array 132 * @skipnr: Number of entries to skip at the start of the stack trace 157 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array 161 * @skipnr: Number of entries to skip at the start of the stack trace 181 * stack_trace_save_tsk_reliable - Save task stack with verification [all …]
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | build_id.c | 10 static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt) in print_stack() argument 16 switch (stack[i].status) { in print_stack() 23 printf("%02hhx", (unsigned)stack[i].build_id[j]); in print_stack() 24 printf(" OFFSET = %llx", (unsigned long long)stack[i].offset); in print_stack() 27 printf("IP = %llx", (unsigned long long)stack[i].ip); in print_stack() 30 printf("UNEXPECTED STATUS %d ", stack[i].status); in print_stack() 40 struct bpf_stack_build_id *stack; in subtest_nofault() local 59 stack = skel->bss->stack_nofault; in subtest_nofault() 62 print_stack(stack, frame_cnt); in subtest_nofault() 65 ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status"); in subtest_nofault() [all …]
|
/linux/kernel/trace/ |
H A D | trace_stack.c | 58 * The stack tracer looks for a maximum stack at each call from a function. It 59 * registers a callback from ftrace, and in that callback it examines the stack 60 * size. It determines the stack size from the variable passed in, which is the 62 * The stack size is calculated by the address of the local variable to the top 63 * of the current stack. If that size is smaller than the currently saved max 64 * stack size, nothing more is done. 66 * If the size of the stack is greater than the maximum recorded size, then the 70 * saving the function's local variables, the stack will look something like 73 * [ top of stack ] 80 * 31: [ do trace stack here ] [all …]
|
/linux/include/linux/sched/ |
H A D | task_stack.h | 6 * task->stack (kernel stack) handling interfaces: 17 * When accessing the stack of a non-current task that might exit, use 23 return task->stack; in task_stack_page() 31 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack() 33 return task->stack; in end_of_stack() 39 #define task_stack_page(task) ((void *)(task)->stack) 48 * Return the address of the last usable long on the stack. 50 * When the stack grows down, this is just above the thread 53 * When the stack grows up, this is the highest address. 91 void *stack = task_stack_page(current); in object_is_on_stack() local [all …]
|
/linux/arch/x86/entry/ |
H A D | entry_64.S | 15 * at the top of the kernel process stack. 65 * are not needed). SYSCALL does not save anything on the stack 100 /* Construct struct pt_regs on stack */ 143 * Save old stack pointer and switch to trampoline stack. 153 * We are on the trampoline stack. All regs except RDI are live. 190 /* switch stack */ 200 * When switching from a shallower to a deeper call stack 230 * This is the start of the kernel stack; even through there's a 234 * This ensures stack unwinds of kernel threads terminate in a known 248 * Set the stack state to what is expected for the target function [all …]
|
/linux/Documentation/arch/arm64/ |
H A D | gcs.rst | 2 Guarded Control Stack support for AArch64 Linux 6 order to support use of the ARM Guarded Control Stack (GCS) feature. 18 implementation of features that need to collect stack traces such as 21 * When GCS is enabled a separate guarded control stack is maintained by the 23 stores the call stack only, when a procedure call instruction is 32 control stacks with checks to ensure that the new stack is a valid 36 Stack feature, due to sharing of userspace interfaces the ABI refers to 68 * When set PR_SHADOW_STACK_ENABLE flag allocates a Guarded Control Stack 76 by GCSCRE0_EL1.STREn, allowing explicit stores to the Guarded Control Stack. 102 the stack will remain allocated for the lifetime of the thread. At present [all …]
|
/linux/arch/m68k/68000/ |
H A D | entry.S | 95 /* kernel stack, otherwise stack overflow can occur during*/ 131 movel #65,%sp@- /* put vector # on stack*/ 133 3: addql #8,%sp /* pop parameters off stack*/ 142 movel #66,%sp@- /* put vector # on stack*/ 144 3: addql #8,%sp /* pop parameters off stack*/ 153 movel #67,%sp@- /* put vector # on stack*/ 155 3: addql #8,%sp /* pop parameters off stack*/ 164 movel #68,%sp@- /* put vector # on stack*/ 166 3: addql #8,%sp /* pop parameters off stack*/ 175 movel #69,%sp@- /* put vector # on stack*/ [all …]
|
/linux/tools/perf/scripts/python/ |
H A D | gecko.py | 81 class Stack(NamedTuple): class 100 frameTable: interned stack frame ID -> stack frame. 103 stackTable: interned stack ID -> stack. 104 stackMap: (stack prefix ID, leaf stack frame ID) -> interned Stack ID. 105 frameMap: Stack Frame string -> interned Frame ID. 113 stackTable: List[Stack] = field(default_factory=list) 124 stackTable: List[Stack] = field(default_factory=list) 129 """Gets a matching stack, or saves the new stack. Returns a Stack ID.""" 136 self.stackTable.append(Stack(prefix_id=prefix_id, frame_id=frame_id)) 151 """Gets a matching stack frame, or saves the new frame. Returns a Frame ID.""" [all …]
|
/linux/tools/perf/util/ |
H A D | thread-stack.c | 3 * thread-stack.c: Synthesize a thread's stack using call / return events 22 #include "thread-stack.h" 40 * struct thread_stack_entry - thread stack entry. 68 * struct thread_stack - thread stack constructed from 'call' and 'return' 70 * @stack: array that holds the stack 71 * @cnt: number of entries in the stack 72 * @sz: current maximum stack size 83 * @br_stack_rb: branch stack (ring buffer) 84 * @br_stack_sz: maximum branch stack size 89 struct thread_stack_entry *stack; member [all …]
|
/linux/security/ |
H A D | Kconfig.hardening | 22 prompt "Initialize kernel stack variables at function entry" 27 This option enables initialization of stack variables at 39 bool "no automatic stack variable initialization (weakest)" 41 Disable automatic stack variable initialization. 43 classes of uninitialized stack variable exploits 51 Initializes everything on the stack (including padding) 53 all classes of uninitialized stack variable exploits and 71 Initializes everything on the stack (including padding) 73 classes of uninitialized stack variable exploits and 86 bool "Poison kernel stack before returning from syscalls" [all …]
|
/linux/arch/sh/kernel/ |
H A D | unwinder.c | 7 * This file provides arbitration code for stack unwinders. 9 * Multiple stack unwinders can be available on a system, usually with 20 * This is the most basic stack unwinder an architecture can 22 * RISC CPUs, it can be implemented by looking through the stack for 26 * construct more accurate stack traces. 30 .name = "stack-reader", 40 * "curr_unwinder" points to the stack unwinder currently in use. This 59 * select_unwinder - Select the best registered stack unwinder. 63 * Select the stack unwinder with the best rating. This is useful for 81 * Enqueue the stack unwinder sorted by rating. [all …]
|