1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE reporting.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #include <linux/stdarg.h>
9
10 #include <linux/kernel.h>
11 #include <linux/lockdep.h>
12 #include <linux/math.h>
13 #include <linux/printk.h>
14 #include <linux/sched/debug.h>
15 #include <linux/seq_file.h>
16 #include <linux/sprintf.h>
17 #include <linux/stacktrace.h>
18 #include <linux/string.h>
19 #include <linux/sched/clock.h>
20 #include <trace/events/error_report.h>
21
22 #include <asm/kfence.h>
23
24 #include "kfence.h"
25
26 /* May be overridden by <asm/kfence.h>. */
27 #ifndef ARCH_FUNC_PREFIX
28 #define ARCH_FUNC_PREFIX ""
29 #endif
30
31 /* Helper function to either print to a seq_file or to console. */
32 __printf(2, 3)
seq_con_printf(struct seq_file * seq,const char * fmt,...)33 static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
34 {
35 va_list args;
36
37 va_start(args, fmt);
38 if (seq)
39 seq_vprintf(seq, fmt, args);
40 else
41 vprintk(fmt, args);
42 va_end(args);
43 }
44
45 /*
46 * Get the number of stack entries to skip to get out of MM internals. @type is
47 * optional, and if set to NULL, assumes an allocation or free stack.
48 */
get_stack_skipnr(const unsigned long stack_entries[],int num_entries,const enum kfence_error_type * type)49 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
50 const enum kfence_error_type *type)
51 {
52 char buf[64];
53 int skipnr, fallback = 0;
54
55 if (type) {
56 /* Depending on error type, find different stack entries. */
57 switch (*type) {
58 case KFENCE_ERROR_UAF:
59 case KFENCE_ERROR_OOB:
60 case KFENCE_ERROR_INVALID:
61 /*
62 * kfence_handle_page_fault() may be called with pt_regs
63 * set to NULL; in that case we'll simply show the full
64 * stack trace.
65 */
66 return 0;
67 case KFENCE_ERROR_CORRUPTION:
68 case KFENCE_ERROR_INVALID_FREE:
69 break;
70 }
71 }
72
73 for (skipnr = 0; skipnr < num_entries; skipnr++) {
74 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
75
76 if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
77 str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
78 str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
79 !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
80 /*
81 * In case of tail calls from any of the below to any of
82 * the above, optimized by the compiler such that the
83 * stack trace would omit the initial entry point below.
84 */
85 fallback = skipnr + 1;
86 }
87
88 /*
89 * The below list should only include the initial entry points
90 * into the slab allocators. Includes the *_bulk() variants by
91 * checking prefixes.
92 */
93 if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
94 str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
95 str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
96 str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
97 goto found;
98 }
99 if (fallback < num_entries)
100 return fallback;
101 found:
102 skipnr++;
103 return skipnr < num_entries ? skipnr : 0;
104 }
105
kfence_print_stack(struct seq_file * seq,const struct kfence_metadata * meta,bool show_alloc)106 static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
107 bool show_alloc)
108 {
109 const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
110 u64 ts_sec = track->ts_nsec;
111 unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
112 u64 interval_nsec = local_clock() - track->ts_nsec;
113 unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
114
115 /* Timestamp matches printk timestamp format. */
116 seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
117 show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ?
118 "rcu freeing" : "freed", track->pid,
119 track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
120 (unsigned long)interval_nsec, rem_interval_nsec / 1000);
121
122 if (track->num_stack_entries) {
123 /* Skip allocation/free internals stack. */
124 int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
125
126 /* stack_trace_seq_print() does not exist; open code our own. */
127 for (; i < track->num_stack_entries; i++)
128 seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
129 } else {
130 seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
131 }
132 }
133
kfence_print_object(struct seq_file * seq,const struct kfence_metadata * meta)134 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
135 {
136 const int size = abs(meta->size);
137 const unsigned long start = meta->addr;
138 const struct kmem_cache *const cache = meta->cache;
139
140 lockdep_assert_held(&meta->lock);
141
142 if (meta->state == KFENCE_OBJECT_UNUSED) {
143 seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
144 return;
145 }
146
147 seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
148 meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
149 size, (cache && cache->name) ? cache->name : "<destroyed>");
150
151 kfence_print_stack(seq, meta, true);
152
153 if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) {
154 seq_con_printf(seq, "\n");
155 kfence_print_stack(seq, meta, false);
156 }
157 }
158
159 /*
160 * Show bytes at @addr that are different from the expected canary values, up to
161 * @max_bytes.
162 */
print_diff_canary(unsigned long address,size_t bytes_to_show,const struct kfence_metadata * meta)163 static void print_diff_canary(unsigned long address, size_t bytes_to_show,
164 const struct kfence_metadata *meta)
165 {
166 const unsigned long show_until_addr = address + bytes_to_show;
167 const u8 *cur, *end;
168
169 /* Do not show contents of object nor read into following guard page. */
170 end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
171 : min(show_until_addr, PAGE_ALIGN(address)));
172
173 pr_cont("[");
174 for (cur = (const u8 *)address; cur < end; cur++) {
175 if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
176 pr_cont(" .");
177 else if (no_hash_pointers)
178 pr_cont(" 0x%02x", *cur);
179 else /* Do not leak kernel memory in non-debug builds. */
180 pr_cont(" !");
181 }
182 pr_cont(" ]");
183 }
184
get_access_type(bool is_write)185 static const char *get_access_type(bool is_write)
186 {
187 return is_write ? "write" : "read";
188 }
189
kfence_report_error(unsigned long address,bool is_write,struct pt_regs * regs,const struct kfence_metadata * meta,enum kfence_error_type type)190 void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
191 const struct kfence_metadata *meta, enum kfence_error_type type)
192 {
193 unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
194 const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
195 int num_stack_entries;
196 int skipnr = 0;
197
198 if (regs) {
199 num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
200 } else {
201 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
202 skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
203 }
204
205 /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
206 if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
207 return;
208
209 if (meta)
210 lockdep_assert_held(&meta->lock);
211 /*
212 * Because we may generate reports in printk-unfriendly parts of the
213 * kernel, such as scheduler code, the use of printk() could deadlock.
214 * Until such time that all printing code here is safe in all parts of
215 * the kernel, accept the risk, and just get our message out (given the
216 * system might already behave unpredictably due to the memory error).
217 * As such, also disable lockdep to hide warnings, and avoid disabling
218 * lockdep for the rest of the kernel.
219 */
220 lockdep_off();
221
222 pr_err("==================================================================\n");
223 /* Print report header. */
224 switch (type) {
225 case KFENCE_ERROR_OOB: {
226 const bool left_of_object = address < meta->addr;
227
228 pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
229 (void *)stack_entries[skipnr]);
230 pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
231 get_access_type(is_write), (void *)address,
232 left_of_object ? meta->addr - address : address - meta->addr,
233 left_of_object ? "left" : "right", object_index);
234 break;
235 }
236 case KFENCE_ERROR_UAF:
237 pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
238 (void *)stack_entries[skipnr]);
239 pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
240 get_access_type(is_write), (void *)address, object_index);
241 break;
242 case KFENCE_ERROR_CORRUPTION:
243 pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
244 pr_err("Corrupted memory at 0x%p ", (void *)address);
245 print_diff_canary(address, 16, meta);
246 pr_cont(" (in kfence-#%td):\n", object_index);
247 break;
248 case KFENCE_ERROR_INVALID:
249 pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
250 (void *)stack_entries[skipnr]);
251 pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
252 (void *)address);
253 break;
254 case KFENCE_ERROR_INVALID_FREE:
255 pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
256 pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
257 object_index);
258 break;
259 }
260
261 /* Print stack trace and object info. */
262 stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
263
264 if (meta) {
265 pr_err("\n");
266 kfence_print_object(NULL, meta);
267 }
268
269 /* Print report footer. */
270 pr_err("\n");
271 if (no_hash_pointers && regs)
272 show_regs(regs);
273 else
274 dump_stack_print_info(KERN_ERR);
275 trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
276 pr_err("==================================================================\n");
277
278 lockdep_on();
279
280 check_panic_on_warn("KFENCE");
281
282 /* We encountered a memory safety error, taint the kernel! */
283 add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
284 }
285
286 #ifdef CONFIG_PRINTK
kfence_to_kp_stack(const struct kfence_track * track,void ** kp_stack)287 static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
288 {
289 int i, j;
290
291 i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
292 for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
293 kp_stack[j] = (void *)track->stack_entries[i];
294 if (j < KS_ADDRS_COUNT)
295 kp_stack[j] = NULL;
296 }
297
__kfence_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab)298 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
299 {
300 struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
301 unsigned long flags;
302
303 if (!meta)
304 return false;
305
306 /*
307 * If state is UNUSED at least show the pointer requested; the rest
308 * would be garbage data.
309 */
310 kpp->kp_ptr = object;
311
312 /* Requesting info an a never-used object is almost certainly a bug. */
313 if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
314 return true;
315
316 raw_spin_lock_irqsave(&meta->lock, flags);
317
318 kpp->kp_slab = slab;
319 kpp->kp_slab_cache = meta->cache;
320 kpp->kp_objp = (void *)meta->addr;
321 kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
322 if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING)
323 kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
324 /* get_stack_skipnr() ensures the first entry is outside allocator. */
325 kpp->kp_ret = kpp->kp_stack[0];
326
327 raw_spin_unlock_irqrestore(&meta->lock, flags);
328
329 return true;
330 }
331 #endif
332