xref: /linux/mm/kfence/report.c (revision fb12940f51d96ead10f9c0fd578e69b8de10ca81)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE reporting.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #include <linux/stdarg.h>
9 
10 #include <linux/kernel.h>
11 #include <linux/lockdep.h>
12 #include <linux/math.h>
13 #include <linux/printk.h>
14 #include <linux/sched/debug.h>
15 #include <linux/seq_file.h>
16 #include <linux/stacktrace.h>
17 #include <linux/string.h>
18 #include <trace/events/error_report.h>
19 
20 #include <asm/kfence.h>
21 
22 #include "kfence.h"
23 
24 /* May be overridden by <asm/kfence.h>. */
25 #ifndef ARCH_FUNC_PREFIX
26 #define ARCH_FUNC_PREFIX ""
27 #endif
28 
29 extern bool no_hash_pointers;
30 
31 /* Helper function to either print to a seq_file or to console. */
32 __printf(2, 3)
33 static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
34 {
35 	va_list args;
36 
37 	va_start(args, fmt);
38 	if (seq)
39 		seq_vprintf(seq, fmt, args);
40 	else
41 		vprintk(fmt, args);
42 	va_end(args);
43 }
44 
45 /*
46  * Get the number of stack entries to skip to get out of MM internals. @type is
47  * optional, and if set to NULL, assumes an allocation or free stack.
48  */
49 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
50 			    const enum kfence_error_type *type)
51 {
52 	char buf[64];
53 	int skipnr, fallback = 0;
54 
55 	if (type) {
56 		/* Depending on error type, find different stack entries. */
57 		switch (*type) {
58 		case KFENCE_ERROR_UAF:
59 		case KFENCE_ERROR_OOB:
60 		case KFENCE_ERROR_INVALID:
61 			/*
62 			 * kfence_handle_page_fault() may be called with pt_regs
63 			 * set to NULL; in that case we'll simply show the full
64 			 * stack trace.
65 			 */
66 			return 0;
67 		case KFENCE_ERROR_CORRUPTION:
68 		case KFENCE_ERROR_INVALID_FREE:
69 			break;
70 		}
71 	}
72 
73 	for (skipnr = 0; skipnr < num_entries; skipnr++) {
74 		int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
75 
76 		if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
77 		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
78 		    !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
79 			/*
80 			 * In case of tail calls from any of the below
81 			 * to any of the above.
82 			 */
83 			fallback = skipnr + 1;
84 		}
85 
86 		/* Also the *_bulk() variants by only checking prefixes. */
87 		if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
88 		    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
89 		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
90 		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
91 		    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
92 			goto found;
93 	}
94 	if (fallback < num_entries)
95 		return fallback;
96 found:
97 	skipnr++;
98 	return skipnr < num_entries ? skipnr : 0;
99 }
100 
101 static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
102 			       bool show_alloc)
103 {
104 	const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
105 	u64 ts_sec = track->ts_nsec;
106 	unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
107 
108 	/* Timestamp matches printk timestamp format. */
109 	seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
110 		       show_alloc ? "allocated" : "freed", track->pid,
111 		       track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
112 
113 	if (track->num_stack_entries) {
114 		/* Skip allocation/free internals stack. */
115 		int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
116 
117 		/* stack_trace_seq_print() does not exist; open code our own. */
118 		for (; i < track->num_stack_entries; i++)
119 			seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
120 	} else {
121 		seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
122 	}
123 }
124 
125 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
126 {
127 	const int size = abs(meta->size);
128 	const unsigned long start = meta->addr;
129 	const struct kmem_cache *const cache = meta->cache;
130 
131 	lockdep_assert_held(&meta->lock);
132 
133 	if (meta->state == KFENCE_OBJECT_UNUSED) {
134 		seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
135 		return;
136 	}
137 
138 	seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
139 		       meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
140 		       size, (cache && cache->name) ? cache->name : "<destroyed>");
141 
142 	kfence_print_stack(seq, meta, true);
143 
144 	if (meta->state == KFENCE_OBJECT_FREED) {
145 		seq_con_printf(seq, "\n");
146 		kfence_print_stack(seq, meta, false);
147 	}
148 }
149 
150 /*
151  * Show bytes at @addr that are different from the expected canary values, up to
152  * @max_bytes.
153  */
154 static void print_diff_canary(unsigned long address, size_t bytes_to_show,
155 			      const struct kfence_metadata *meta)
156 {
157 	const unsigned long show_until_addr = address + bytes_to_show;
158 	const u8 *cur, *end;
159 
160 	/* Do not show contents of object nor read into following guard page. */
161 	end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
162 						: min(show_until_addr, PAGE_ALIGN(address)));
163 
164 	pr_cont("[");
165 	for (cur = (const u8 *)address; cur < end; cur++) {
166 		if (*cur == KFENCE_CANARY_PATTERN(cur))
167 			pr_cont(" .");
168 		else if (no_hash_pointers)
169 			pr_cont(" 0x%02x", *cur);
170 		else /* Do not leak kernel memory in non-debug builds. */
171 			pr_cont(" !");
172 	}
173 	pr_cont(" ]");
174 }
175 
176 static const char *get_access_type(bool is_write)
177 {
178 	return is_write ? "write" : "read";
179 }
180 
181 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
182 			 const struct kfence_metadata *meta, enum kfence_error_type type)
183 {
184 	unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
185 	const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
186 	int num_stack_entries;
187 	int skipnr = 0;
188 
189 	if (regs) {
190 		num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
191 	} else {
192 		num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
193 		skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
194 	}
195 
196 	/* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
197 	if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
198 		return;
199 
200 	if (meta)
201 		lockdep_assert_held(&meta->lock);
202 	/*
203 	 * Because we may generate reports in printk-unfriendly parts of the
204 	 * kernel, such as scheduler code, the use of printk() could deadlock.
205 	 * Until such time that all printing code here is safe in all parts of
206 	 * the kernel, accept the risk, and just get our message out (given the
207 	 * system might already behave unpredictably due to the memory error).
208 	 * As such, also disable lockdep to hide warnings, and avoid disabling
209 	 * lockdep for the rest of the kernel.
210 	 */
211 	lockdep_off();
212 
213 	pr_err("==================================================================\n");
214 	/* Print report header. */
215 	switch (type) {
216 	case KFENCE_ERROR_OOB: {
217 		const bool left_of_object = address < meta->addr;
218 
219 		pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
220 		       (void *)stack_entries[skipnr]);
221 		pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
222 		       get_access_type(is_write), (void *)address,
223 		       left_of_object ? meta->addr - address : address - meta->addr,
224 		       left_of_object ? "left" : "right", object_index);
225 		break;
226 	}
227 	case KFENCE_ERROR_UAF:
228 		pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
229 		       (void *)stack_entries[skipnr]);
230 		pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
231 		       get_access_type(is_write), (void *)address, object_index);
232 		break;
233 	case KFENCE_ERROR_CORRUPTION:
234 		pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
235 		pr_err("Corrupted memory at 0x%p ", (void *)address);
236 		print_diff_canary(address, 16, meta);
237 		pr_cont(" (in kfence-#%td):\n", object_index);
238 		break;
239 	case KFENCE_ERROR_INVALID:
240 		pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
241 		       (void *)stack_entries[skipnr]);
242 		pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
243 		       (void *)address);
244 		break;
245 	case KFENCE_ERROR_INVALID_FREE:
246 		pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
247 		pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
248 		       object_index);
249 		break;
250 	}
251 
252 	/* Print stack trace and object info. */
253 	stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
254 
255 	if (meta) {
256 		pr_err("\n");
257 		kfence_print_object(NULL, meta);
258 	}
259 
260 	/* Print report footer. */
261 	pr_err("\n");
262 	if (no_hash_pointers && regs)
263 		show_regs(regs);
264 	else
265 		dump_stack_print_info(KERN_ERR);
266 	trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
267 	pr_err("==================================================================\n");
268 
269 	lockdep_on();
270 
271 	if (panic_on_warn)
272 		panic("panic_on_warn set ...\n");
273 
274 	/* We encountered a memory safety error, taint the kernel! */
275 	add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
276 }
277 
278 #ifdef CONFIG_PRINTK
279 static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
280 {
281 	int i, j;
282 
283 	i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
284 	for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
285 		kp_stack[j] = (void *)track->stack_entries[i];
286 	if (j < KS_ADDRS_COUNT)
287 		kp_stack[j] = NULL;
288 }
289 
290 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
291 {
292 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
293 	unsigned long flags;
294 
295 	if (!meta)
296 		return false;
297 
298 	/*
299 	 * If state is UNUSED at least show the pointer requested; the rest
300 	 * would be garbage data.
301 	 */
302 	kpp->kp_ptr = object;
303 
304 	/* Requesting info an a never-used object is almost certainly a bug. */
305 	if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
306 		return true;
307 
308 	raw_spin_lock_irqsave(&meta->lock, flags);
309 
310 	kpp->kp_slab = slab;
311 	kpp->kp_slab_cache = meta->cache;
312 	kpp->kp_objp = (void *)meta->addr;
313 	kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
314 	if (meta->state == KFENCE_OBJECT_FREED)
315 		kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
316 	/* get_stack_skipnr() ensures the first entry is outside allocator. */
317 	kpp->kp_ret = kpp->kp_stack[0];
318 
319 	raw_spin_unlock_irqrestore(&meta->lock, flags);
320 
321 	return true;
322 }
323 #endif
324