Home
last modified time | relevance | path

Searched full:stacks (Results 1 – 25 of 173) sorted by relevance

1234567

/linux/Documentation/arch/x86/
H A Dkernel-stacks.rst4 Kernel Stacks
7 Kernel stacks on x86-64 bit
15 active thread. These thread stacks are THREAD_SIZE (4*PAGE_SIZE) big.
16 These stacks contain useful data as long as a thread is alive or a
20 In addition to the per thread stacks, there are specialized stacks
21 associated with each CPU. These stacks are only used while the kernel
23 specialized stacks contain no useful data. The main CPU stacks are:
30 the split thread and interrupt stacks on i386, this gives more room
38 hardware stacks cannot nest without races.
46 point to dedicated stacks; each stack can be a different size.
[all …]
/linux/Documentation/mm/
H A Dvmalloced-kernel-stacks.rst15 series that introduced the `Virtually Mapped Kernel Stacks feature
25 Virtually mapped kernel stacks with guard pages cause kernel stack
30 support for virtually mapped stacks with guard pages. This feature
42 Architectures that can support Virtually Mapped Kernel Stacks should
45 - vmalloc space must be large enough to hold many kernel stacks. This
47 - Stacks in vmalloc space need to work reliably. For example, if
61 mapped task stacks. This option depends on HAVE_ARCH_VMAP_STACK.
63 - Enable this if you want the use virtually-mapped kernel stacks
94 - Allocated stacks are cached and later reused by new threads, so memcg
95 accounting is performed manually on assigning/releasing stacks to tasks.
[all …]
H A Dpage_owner.rst27 It can also be used to show all the stacks and their current number of
76 cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt
77 cat stacks.txt
/linux/lib/
H A Dref_tracker.c28 } stacks[]; member
37 stats = kmalloc(struct_size(stats, stacks, limit), in ref_tracker_get_stats()
50 if (stats->stacks[i].stack_handle == stack) in ref_tracker_get_stats()
55 stats->stacks[i].stack_handle = stack; in ref_tracker_get_stats()
56 stats->stacks[i].count = 0; in ref_tracker_get_stats()
59 ++stats->stacks[i].count; in ref_tracker_get_stats()
107 stack = stats->stacks[i].stack_handle; in __ref_tracker_dir_pr_ostream()
111 stats->stacks[i].count, stats->total, sbuf); in __ref_tracker_dir_pr_ostream()
112 skipped -= stats->stacks[i].count; in __ref_tracker_dir_pr_ostream()
/linux/arch/sh/
H A DKconfig.debug37 bool "Use 4Kb for kernel stacks instead of 8Kb"
44 will also use IRQ stacks to compensate for the reduced stackspace.
47 bool "Use separate kernel stacks when processing interrupts"
50 If you say Y here the kernel will use separate kernel stacks
52 overflowing the process kernel stacks.
/linux/Documentation/arch/arm64/
H A Dgcs.rst32 control stacks with checks to ensure that the new stack is a valid
37 shadow stacks rather than GCS.
61 2. Enabling and disabling Guarded Control Stacks
113 3. Allocation of Guarded Control Stacks
129 * Additional Guarded Control Stacks can be allocated using the
132 * Stacks allocated using map_shadow_stack() can optionally have an end of
140 * Stacks allocated using map_shadow_stack() must have a size which is a
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dstacktrace.c123 struct stack_info stacks[] = { in pkvm_save_backtrace() local
128 .stacks = stacks, in pkvm_save_backtrace()
129 .nr_stacks = ARRAY_SIZE(stacks), in pkvm_save_backtrace()
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_device.c51 ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks"); in panthor_clk_init()
52 if (IS_ERR(ptdev->clks.stacks)) in panthor_clk_init()
54 PTR_ERR(ptdev->clks.stacks), in panthor_clk_init()
55 "get 'stacks' clock failed"); in panthor_clk_init()
468 ret = clk_prepare_enable(ptdev->clks.stacks); in panthor_device_resume()
516 clk_disable_unprepare(ptdev->clks.stacks); in panthor_device_resume()
565 clk_disable_unprepare(ptdev->clks.stacks); in panthor_device_suspend()
H A Dpanthor_device.h107 /** @stacks: Stacks clock. This clock is optional. */
108 struct clk *stacks; member
/linux/arch/x86/kernel/
H A Didt.c61 * stacks work only after cpu_init().
80 * cpu_init() is invoked. Interrupt stacks cannot be used at that point and
218 * On X8664 these traps do not use interrupt stacks as they can't work
243 * stacks work only after cpu_init().
252 * On X8664 this does not use interrupt stacks as they can't work before
H A Dfred.c17 * "the stack you pointed me to is broken." Thus, always change stacks
78 * The purpose of separate stacks for NMI, #DB and #MC *in the kernel* in cpu_init_fred_rsps()
88 /* The FRED equivalents to IST stacks... */ in cpu_init_fred_rsps()
H A Ddumpstack.c201 * Iterate through the stacks, starting with the current stack pointer. in show_trace_log_lvl()
204 * x86-64 can have several stacks: in show_trace_log_lvl()
207 * - HW exception stacks (double fault, nmi, debug, mce) in show_trace_log_lvl()
210 * x86-32 can have up to four stacks: in show_trace_log_lvl()
H A Dirq_32.c6 * entry, irq-stacks and irq statistics code. All the remaining
108 * Allocate per-cpu stacks for hardirq and softirq processing
/linux/arch/arm64/kvm/
H A Dstacktrace.c182 struct stack_info stacks[] = { in hyp_dump_backtrace()
187 .stacks = stacks, in hyp_dump_backtrace()
188 .nr_stacks = ARRAY_SIZE(stacks), in hyp_dump_backtrace()
181 struct stack_info stacks[] = { hyp_dump_backtrace() local
/linux/arch/powerpc/include/asm/
H A Dirq.h38 * Per-cpu stacks for handling critical, debug and machine check
47 * Per-cpu stacks for handling hard and soft interrupts.
/linux/samples/fprobe/
H A Dfprobe_example.c44 unsigned long stacks[BACKTRACE_DEPTH]; in show_backtrace() local
47 len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2); in show_backtrace()
48 stack_trace_print(stacks, len, 24); in show_backtrace()
/linux/arch/arm64/kernel/
H A Dstacktrace.c299 * Per-cpu stacks are only accessible when unwinding the current task in a
310 * SDEI stacks are only accessible when unwinding the current task in an NMI
332 struct stack_info stacks[] = { in kunwind_stack_walk() local
348 .stacks = stacks, in kunwind_stack_walk()
349 .nr_stacks = ARRAY_SIZE(stacks), in kunwind_stack_walk()
H A Dsdei.c26 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
27 * register, meaning SDEI has to switch to its own stack. We need two stacks as
32 * For now, we allocate stacks when the driver is probed.
/linux/Documentation/devicetree/bindings/gpu/
H A Darm,mali-valhall-csf.yaml48 - const: stacks
126 clock-names = "core", "coregroup", "stacks";
/linux/arch/x86/include/asm/
H A Dcpu_entry_area.h35 /* The exception stacks' physical storage. No guard pages required */
116 * Exception stacks used for IST entries with guard pages.
/linux/tools/perf/scripts/python/
H A Dflamegraph.py39 const stacks = [/** @flamegraph_json **/];
45 .datum(stacks[0])
203 output_fn = self.args.output or "stacks.json"
H A Dstackcollapse.py15 # perf script report stackcollapse > out.stacks-folded
48 help="do not separate stacks according to comm"),
/linux/include/linux/
H A Dbinfmts.h135 #define EXSTACK_DISABLE_X 1 /* Disable executable stacks */
136 #define EXSTACK_ENABLE_X 2 /* Enable executable stacks */
/linux/tools/perf/util/
H A Dbpf_lock_contention.c125 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64)); in lock_contention_prepare()
135 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries); in lock_contention_prepare()
137 bpf_map__set_max_entries(skel->maps.stacks, 1); in lock_contention_prepare()
553 stack = bpf_map__fd(skel->maps.stacks);
/linux/arch/arc/
H A DKconfig.debug4 bool "Use 16Kb for kernel stacks instead of 8Kb"

1234567