xref: /linux/lib/stackdepot.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic stack depot for storing stack traces.
4  *
5  * Some debugging tools need to save stack traces of certain events which can
6  * be later presented to the user. For example, KASAN needs to safe alloc and
7  * free stacks for each object, but storing two stack traces per object
8  * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9  * that).
10  *
11  * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12  * and free stacks repeat a lot, we save about 100x space.
13  * Stacks are never removed from depot, so we store them contiguously one after
14  * another in a contiguos memory allocation.
15  *
16  * Author: Alexander Potapenko <glider@google.com>
17  * Copyright (C) 2016 Google, Inc.
18  *
19  * Based on code by Dmitry Chernenkov.
20  */
21 
22 #include <linux/gfp.h>
23 #include <linux/interrupt.h>
24 #include <linux/jhash.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/percpu.h>
28 #include <linux/printk.h>
29 #include <linux/slab.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 
35 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
36 
37 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
38 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
39 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
40 #define STACK_ALLOC_ALIGN 4
41 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
42 					STACK_ALLOC_ALIGN)
43 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
44 		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
45 #define STACK_ALLOC_SLABS_CAP 8192
46 #define STACK_ALLOC_MAX_SLABS \
47 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
48 	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
49 
50 /* The compact structure to store the reference to stacks. */
51 union handle_parts {
52 	depot_stack_handle_t handle;
53 	struct {
54 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
55 		u32 offset : STACK_ALLOC_OFFSET_BITS;
56 		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
57 	};
58 };
59 
60 struct stack_record {
61 	struct stack_record *next;	/* Link in the hashtable */
62 	u32 hash;			/* Hash in the hastable */
63 	u32 size;			/* Number of frames in the stack */
64 	union handle_parts handle;
65 	unsigned long entries[];	/* Variable-sized array of entries. */
66 };
67 
68 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
69 
70 static int depot_index;
71 static int next_slab_inited;
72 static size_t depot_offset;
73 static DEFINE_SPINLOCK(depot_lock);
74 
75 static bool init_stack_slab(void **prealloc)
76 {
77 	if (!*prealloc)
78 		return false;
79 	/*
80 	 * This smp_load_acquire() pairs with smp_store_release() to
81 	 * |next_slab_inited| below and in depot_alloc_stack().
82 	 */
83 	if (smp_load_acquire(&next_slab_inited))
84 		return true;
85 	if (stack_slabs[depot_index] == NULL) {
86 		stack_slabs[depot_index] = *prealloc;
87 		*prealloc = NULL;
88 	} else {
89 		/* If this is the last depot slab, do not touch the next one. */
90 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
91 			stack_slabs[depot_index + 1] = *prealloc;
92 			*prealloc = NULL;
93 		}
94 		/*
95 		 * This smp_store_release pairs with smp_load_acquire() from
96 		 * |next_slab_inited| above and in stack_depot_save().
97 		 */
98 		smp_store_release(&next_slab_inited, 1);
99 	}
100 	return true;
101 }
102 
103 /* Allocation of a new stack in raw storage */
104 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
105 		u32 hash, void **prealloc, gfp_t alloc_flags)
106 {
107 	struct stack_record *stack;
108 	size_t required_size = struct_size(stack, entries, size);
109 
110 	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
111 
112 	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
113 		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
114 			WARN_ONCE(1, "Stack depot reached limit capacity");
115 			return NULL;
116 		}
117 		depot_index++;
118 		depot_offset = 0;
119 		/*
120 		 * smp_store_release() here pairs with smp_load_acquire() from
121 		 * |next_slab_inited| in stack_depot_save() and
122 		 * init_stack_slab().
123 		 */
124 		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
125 			smp_store_release(&next_slab_inited, 0);
126 	}
127 	init_stack_slab(prealloc);
128 	if (stack_slabs[depot_index] == NULL)
129 		return NULL;
130 
131 	stack = stack_slabs[depot_index] + depot_offset;
132 
133 	stack->hash = hash;
134 	stack->size = size;
135 	stack->handle.slabindex = depot_index;
136 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
137 	stack->handle.valid = 1;
138 	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
139 	depot_offset += required_size;
140 
141 	return stack;
142 }
143 
144 #define STACK_HASH_ORDER 20
145 #define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
146 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147 #define STACK_HASH_SEED 0x9747b28c
148 
149 static struct stack_record *stack_table[STACK_HASH_SIZE] = {
150 	[0 ...	STACK_HASH_SIZE - 1] = NULL
151 };
152 
153 /* Calculate hash for a stack */
154 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
155 {
156 	return jhash2((u32 *)entries,
157 		      array_size(size,  sizeof(*entries)) / sizeof(u32),
158 		      STACK_HASH_SEED);
159 }
160 
161 /* Use our own, non-instrumented version of memcmp().
162  *
163  * We actually don't care about the order, just the equality.
164  */
165 static inline
166 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
167 			unsigned int n)
168 {
169 	for ( ; n-- ; u1++, u2++) {
170 		if (*u1 != *u2)
171 			return 1;
172 	}
173 	return 0;
174 }
175 
176 /* Find a stack that is equal to the one stored in entries in the hash */
177 static inline struct stack_record *find_stack(struct stack_record *bucket,
178 					     unsigned long *entries, int size,
179 					     u32 hash)
180 {
181 	struct stack_record *found;
182 
183 	for (found = bucket; found; found = found->next) {
184 		if (found->hash == hash &&
185 		    found->size == size &&
186 		    !stackdepot_memcmp(entries, found->entries, size))
187 			return found;
188 	}
189 	return NULL;
190 }
191 
192 /**
193  * stack_depot_fetch - Fetch stack entries from a depot
194  *
195  * @handle:		Stack depot handle which was returned from
196  *			stack_depot_save().
197  * @entries:		Pointer to store the entries address
198  *
199  * Return: The number of trace entries for this depot.
200  */
201 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
202 			       unsigned long **entries)
203 {
204 	union handle_parts parts = { .handle = handle };
205 	void *slab;
206 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
207 	struct stack_record *stack;
208 
209 	*entries = NULL;
210 	if (parts.slabindex > depot_index) {
211 		WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
212 			parts.slabindex, depot_index, handle);
213 		return 0;
214 	}
215 	slab = stack_slabs[parts.slabindex];
216 	if (!slab)
217 		return 0;
218 	stack = slab + offset;
219 
220 	*entries = stack->entries;
221 	return stack->size;
222 }
223 EXPORT_SYMBOL_GPL(stack_depot_fetch);
224 
225 /**
226  * stack_depot_save - Save a stack trace from an array
227  *
228  * @entries:		Pointer to storage array
229  * @nr_entries:		Size of the storage array
230  * @alloc_flags:	Allocation gfp flags
231  *
232  * Return: The handle of the stack struct stored in depot
233  */
234 depot_stack_handle_t stack_depot_save(unsigned long *entries,
235 				      unsigned int nr_entries,
236 				      gfp_t alloc_flags)
237 {
238 	struct stack_record *found = NULL, **bucket;
239 	depot_stack_handle_t retval = 0;
240 	struct page *page = NULL;
241 	void *prealloc = NULL;
242 	unsigned long flags;
243 	u32 hash;
244 
245 	if (unlikely(nr_entries == 0))
246 		goto fast_exit;
247 
248 	hash = hash_stack(entries, nr_entries);
249 	bucket = &stack_table[hash & STACK_HASH_MASK];
250 
251 	/*
252 	 * Fast path: look the stack trace up without locking.
253 	 * The smp_load_acquire() here pairs with smp_store_release() to
254 	 * |bucket| below.
255 	 */
256 	found = find_stack(smp_load_acquire(bucket), entries,
257 			   nr_entries, hash);
258 	if (found)
259 		goto exit;
260 
261 	/*
262 	 * Check if the current or the next stack slab need to be initialized.
263 	 * If so, allocate the memory - we won't be able to do that under the
264 	 * lock.
265 	 *
266 	 * The smp_load_acquire() here pairs with smp_store_release() to
267 	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
268 	 */
269 	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
270 		/*
271 		 * Zero out zone modifiers, as we don't have specific zone
272 		 * requirements. Keep the flags related to allocation in atomic
273 		 * contexts and I/O.
274 		 */
275 		alloc_flags &= ~GFP_ZONEMASK;
276 		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
277 		alloc_flags |= __GFP_NOWARN;
278 		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
279 		if (page)
280 			prealloc = page_address(page);
281 	}
282 
283 	spin_lock_irqsave(&depot_lock, flags);
284 
285 	found = find_stack(*bucket, entries, nr_entries, hash);
286 	if (!found) {
287 		struct stack_record *new =
288 			depot_alloc_stack(entries, nr_entries,
289 					  hash, &prealloc, alloc_flags);
290 		if (new) {
291 			new->next = *bucket;
292 			/*
293 			 * This smp_store_release() pairs with
294 			 * smp_load_acquire() from |bucket| above.
295 			 */
296 			smp_store_release(bucket, new);
297 			found = new;
298 		}
299 	} else if (prealloc) {
300 		/*
301 		 * We didn't need to store this stack trace, but let's keep
302 		 * the preallocated memory for the future.
303 		 */
304 		WARN_ON(!init_stack_slab(&prealloc));
305 	}
306 
307 	spin_unlock_irqrestore(&depot_lock, flags);
308 exit:
309 	if (prealloc) {
310 		/* Nobody used this memory, ok to free it. */
311 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
312 	}
313 	if (found)
314 		retval = found->handle.handle;
315 fast_exit:
316 	return retval;
317 }
318 EXPORT_SYMBOL_GPL(stack_depot_save);
319 
320 static inline int in_irqentry_text(unsigned long ptr)
321 {
322 	return (ptr >= (unsigned long)&__irqentry_text_start &&
323 		ptr < (unsigned long)&__irqentry_text_end) ||
324 		(ptr >= (unsigned long)&__softirqentry_text_start &&
325 		 ptr < (unsigned long)&__softirqentry_text_end);
326 }
327 
328 unsigned int filter_irq_stacks(unsigned long *entries,
329 					     unsigned int nr_entries)
330 {
331 	unsigned int i;
332 
333 	for (i = 0; i < nr_entries; i++) {
334 		if (in_irqentry_text(entries[i])) {
335 			/* Include the irqentry function into the stack. */
336 			return i + 1;
337 		}
338 	}
339 	return nr_entries;
340 }
341 EXPORT_SYMBOL_GPL(filter_irq_stacks);
342