1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains common KASAN error reporting code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12 #include <kunit/test.h>
13 #include <kunit/visibility.h>
14 #include <linux/bitops.h>
15 #include <linux/ftrace.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/lockdep.h>
19 #include <linux/mm.h>
20 #include <linux/printk.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/stackdepot.h>
24 #include <linux/stacktrace.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <linux/vmalloc.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/uaccess.h>
32 #include <trace/events/error_report.h>
33
34 #include <asm/sections.h>
35
36 #include "kasan.h"
37 #include "../slab.h"
38
39 static unsigned long kasan_flags;
40
41 #define KASAN_BIT_REPORTED 0
42 #define KASAN_BIT_MULTI_SHOT 1
43
44 enum kasan_arg_fault {
45 KASAN_ARG_FAULT_DEFAULT,
46 KASAN_ARG_FAULT_REPORT,
47 KASAN_ARG_FAULT_PANIC,
48 KASAN_ARG_FAULT_PANIC_ON_WRITE,
49 };
50
51 static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;
52
53 /* kasan.fault=report/panic */
early_kasan_fault(char * arg)54 static int __init early_kasan_fault(char *arg)
55 {
56 if (!arg)
57 return -EINVAL;
58
59 if (!strcmp(arg, "report"))
60 kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
61 else if (!strcmp(arg, "panic"))
62 kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
63 else if (!strcmp(arg, "panic_on_write"))
64 kasan_arg_fault = KASAN_ARG_FAULT_PANIC_ON_WRITE;
65 else
66 return -EINVAL;
67
68 return 0;
69 }
70 early_param("kasan.fault", early_kasan_fault);
71
kasan_set_multi_shot(char * str)72 static int __init kasan_set_multi_shot(char *str)
73 {
74 set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
75 return 1;
76 }
77 __setup("kasan_multi_shot", kasan_set_multi_shot);
78
79 /*
80 * This function is used to check whether KASAN reports are suppressed for
81 * software KASAN modes via kasan_disable/enable_current() critical sections.
82 *
83 * This is done to avoid:
84 * 1. False-positive reports when accessing slab metadata,
85 * 2. Deadlocking when poisoned memory is accessed by the reporting code.
86 *
87 * Hardware Tag-Based KASAN instead relies on:
88 * For #1: Resetting tags via kasan_reset_tag().
89 * For #2: Suppression of tag checks via CPU, see report_suppress_start/end().
90 */
report_suppressed_sw(void)91 static bool report_suppressed_sw(void)
92 {
93 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
94 if (current->kasan_depth)
95 return true;
96 #endif
97 return false;
98 }
99
report_suppress_start(void)100 static void report_suppress_start(void)
101 {
102 #ifdef CONFIG_KASAN_HW_TAGS
103 /*
104 * Disable preemption for the duration of printing a KASAN report, as
105 * hw_suppress_tag_checks_start() disables checks on the current CPU.
106 */
107 preempt_disable();
108 hw_suppress_tag_checks_start();
109 #else
110 kasan_disable_current();
111 #endif
112 }
113
report_suppress_stop(void)114 static void report_suppress_stop(void)
115 {
116 #ifdef CONFIG_KASAN_HW_TAGS
117 hw_suppress_tag_checks_stop();
118 preempt_enable();
119 #else
120 kasan_enable_current();
121 #endif
122 }
123
124 /*
125 * Used to avoid reporting more than one KASAN bug unless kasan_multi_shot
126 * is enabled. Note that KASAN tests effectively enable kasan_multi_shot
127 * for their duration.
128 */
report_enabled(void)129 static bool report_enabled(void)
130 {
131 if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
132 return true;
133 return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
134 }
135
136 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
137
kasan_save_enable_multi_shot(void)138 VISIBLE_IF_KUNIT bool kasan_save_enable_multi_shot(void)
139 {
140 return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
141 }
142 EXPORT_SYMBOL_IF_KUNIT(kasan_save_enable_multi_shot);
143
kasan_restore_multi_shot(bool enabled)144 VISIBLE_IF_KUNIT void kasan_restore_multi_shot(bool enabled)
145 {
146 if (!enabled)
147 clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
148 }
149 EXPORT_SYMBOL_IF_KUNIT(kasan_restore_multi_shot);
150
151 #endif
152
153 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
154
155 /*
156 * Whether the KASAN KUnit test suite is currently being executed.
157 * Updated in kasan_test.c.
158 */
159 static bool kasan_kunit_executing;
160
kasan_kunit_test_suite_start(void)161 VISIBLE_IF_KUNIT void kasan_kunit_test_suite_start(void)
162 {
163 WRITE_ONCE(kasan_kunit_executing, true);
164 }
165 EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_start);
166
kasan_kunit_test_suite_end(void)167 VISIBLE_IF_KUNIT void kasan_kunit_test_suite_end(void)
168 {
169 WRITE_ONCE(kasan_kunit_executing, false);
170 }
171 EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_end);
172
kasan_kunit_test_suite_executing(void)173 static bool kasan_kunit_test_suite_executing(void)
174 {
175 return READ_ONCE(kasan_kunit_executing);
176 }
177
178 #else /* CONFIG_KASAN_KUNIT_TEST */
179
kasan_kunit_test_suite_executing(void)180 static inline bool kasan_kunit_test_suite_executing(void) { return false; }
181
182 #endif /* CONFIG_KASAN_KUNIT_TEST */
183
184 #if IS_ENABLED(CONFIG_KUNIT)
185
fail_non_kasan_kunit_test(void)186 static void fail_non_kasan_kunit_test(void)
187 {
188 struct kunit *test;
189
190 if (kasan_kunit_test_suite_executing())
191 return;
192
193 test = current->kunit_test;
194 if (test)
195 kunit_set_failure(test);
196 }
197
198 #else /* CONFIG_KUNIT */
199
fail_non_kasan_kunit_test(void)200 static inline void fail_non_kasan_kunit_test(void) { }
201
202 #endif /* CONFIG_KUNIT */
203
204 static DEFINE_RAW_SPINLOCK(report_lock);
205
start_report(unsigned long * flags,bool sync)206 static void start_report(unsigned long *flags, bool sync)
207 {
208 fail_non_kasan_kunit_test();
209 /* Respect the /proc/sys/kernel/traceoff_on_warning interface. */
210 disable_trace_on_warning();
211 /* Do not allow LOCKDEP mangling KASAN reports. */
212 lockdep_off();
213 /* Make sure we don't end up in loop. */
214 report_suppress_start();
215 raw_spin_lock_irqsave(&report_lock, *flags);
216 pr_err("==================================================================\n");
217 }
218
end_report(unsigned long * flags,const void * addr,bool is_write)219 static void end_report(unsigned long *flags, const void *addr, bool is_write)
220 {
221 if (addr)
222 trace_error_report_end(ERROR_DETECTOR_KASAN,
223 (unsigned long)addr);
224 pr_err("==================================================================\n");
225 raw_spin_unlock_irqrestore(&report_lock, *flags);
226 if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
227 check_panic_on_warn("KASAN");
228 switch (kasan_arg_fault) {
229 case KASAN_ARG_FAULT_DEFAULT:
230 case KASAN_ARG_FAULT_REPORT:
231 break;
232 case KASAN_ARG_FAULT_PANIC:
233 panic("kasan.fault=panic set ...\n");
234 break;
235 case KASAN_ARG_FAULT_PANIC_ON_WRITE:
236 if (is_write)
237 panic("kasan.fault=panic_on_write set ...\n");
238 break;
239 }
240 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
241 lockdep_on();
242 report_suppress_stop();
243 }
244
print_error_description(struct kasan_report_info * info)245 static void print_error_description(struct kasan_report_info *info)
246 {
247 pr_err("BUG: KASAN: %s in %pS\n", info->bug_type, (void *)info->ip);
248
249 if (info->type != KASAN_REPORT_ACCESS) {
250 pr_err("Free of addr %px by task %s/%d\n",
251 info->access_addr, current->comm, task_pid_nr(current));
252 return;
253 }
254
255 if (info->access_size)
256 pr_err("%s of size %zu at addr %px by task %s/%d\n",
257 info->is_write ? "Write" : "Read", info->access_size,
258 info->access_addr, current->comm, task_pid_nr(current));
259 else
260 pr_err("%s at addr %px by task %s/%d\n",
261 info->is_write ? "Write" : "Read",
262 info->access_addr, current->comm, task_pid_nr(current));
263 }
264
print_track(struct kasan_track * track,const char * prefix)265 static void print_track(struct kasan_track *track, const char *prefix)
266 {
267 #ifdef CONFIG_KASAN_EXTRA_INFO
268 u64 ts_nsec = track->timestamp;
269 unsigned long rem_usec;
270
271 ts_nsec <<= 9;
272 rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
273
274 pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
275 prefix, track->pid, track->cpu,
276 (unsigned long)ts_nsec, rem_usec);
277 #else
278 pr_err("%s by task %u:\n", prefix, track->pid);
279 #endif /* CONFIG_KASAN_EXTRA_INFO */
280 if (track->stack)
281 stack_depot_print(track->stack);
282 else
283 pr_err("(stack is not available)\n");
284 }
285
addr_to_page(const void * addr)286 static inline struct page *addr_to_page(const void *addr)
287 {
288 if (virt_addr_valid(addr))
289 return virt_to_head_page(addr);
290 return NULL;
291 }
292
describe_object_addr(const void * addr,struct kasan_report_info * info)293 static void describe_object_addr(const void *addr, struct kasan_report_info *info)
294 {
295 unsigned long access_addr = (unsigned long)addr;
296 unsigned long object_addr = (unsigned long)info->object;
297 const char *rel_type, *region_state = "";
298 int rel_bytes;
299
300 pr_err("The buggy address belongs to the object at %px\n"
301 " which belongs to the cache %s of size %d\n",
302 info->object, info->cache->name, info->cache->object_size);
303
304 if (access_addr < object_addr) {
305 rel_type = "to the left";
306 rel_bytes = object_addr - access_addr;
307 } else if (access_addr >= object_addr + info->alloc_size) {
308 rel_type = "to the right";
309 rel_bytes = access_addr - (object_addr + info->alloc_size);
310 } else {
311 rel_type = "inside";
312 rel_bytes = access_addr - object_addr;
313 }
314
315 /*
316 * Tag-Based modes use the stack ring to infer the bug type, but the
317 * memory region state description is generated based on the metadata.
318 * Thus, defining the region state as below can contradict the metadata.
319 * Fixing this requires further improvements, so only infer the state
320 * for the Generic mode.
321 */
322 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
323 if (strcmp(info->bug_type, "slab-out-of-bounds") == 0)
324 region_state = "allocated ";
325 else if (strcmp(info->bug_type, "slab-use-after-free") == 0)
326 region_state = "freed ";
327 }
328
329 pr_err("The buggy address is located %d bytes %s of\n"
330 " %s%zu-byte region [%px, %px)\n",
331 rel_bytes, rel_type, region_state, info->alloc_size,
332 (void *)object_addr, (void *)(object_addr + info->alloc_size));
333 }
334
describe_object_stacks(struct kasan_report_info * info)335 static void describe_object_stacks(struct kasan_report_info *info)
336 {
337 if (info->alloc_track.stack) {
338 print_track(&info->alloc_track, "Allocated");
339 pr_err("\n");
340 }
341
342 if (info->free_track.stack) {
343 print_track(&info->free_track, "Freed");
344 pr_err("\n");
345 }
346
347 kasan_print_aux_stacks(info->cache, info->object);
348 }
349
describe_object(const void * addr,struct kasan_report_info * info)350 static void describe_object(const void *addr, struct kasan_report_info *info)
351 {
352 if (kasan_stack_collection_enabled())
353 describe_object_stacks(info);
354 describe_object_addr(addr, info);
355 }
356
kernel_or_module_addr(const void * addr)357 static inline bool kernel_or_module_addr(const void *addr)
358 {
359 if (is_kernel((unsigned long)addr))
360 return true;
361 if (is_module_address((unsigned long)addr))
362 return true;
363 return false;
364 }
365
init_task_stack_addr(const void * addr)366 static inline bool init_task_stack_addr(const void *addr)
367 {
368 return addr >= (void *)&init_thread_union.stack &&
369 (addr <= (void *)&init_thread_union.stack +
370 sizeof(init_thread_union.stack));
371 }
372
print_address_description(void * addr,u8 tag,struct kasan_report_info * info)373 static void print_address_description(void *addr, u8 tag,
374 struct kasan_report_info *info)
375 {
376 struct page *page = addr_to_page(addr);
377
378 dump_stack_lvl(KERN_ERR);
379 pr_err("\n");
380
381 if (info->cache && info->object) {
382 describe_object(addr, info);
383 pr_err("\n");
384 }
385
386 if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
387 pr_err("The buggy address belongs to the variable:\n");
388 pr_err(" %pS\n", addr);
389 pr_err("\n");
390 }
391
392 if (object_is_on_stack(addr)) {
393 /*
394 * Currently, KASAN supports printing frame information only
395 * for accesses to the task's own stack.
396 */
397 kasan_print_address_stack_frame(addr);
398 pr_err("\n");
399 }
400
401 if (is_vmalloc_addr(addr)) {
402 struct vm_struct *va = find_vm_area(addr);
403
404 if (va) {
405 pr_err("The buggy address belongs to the virtual mapping at\n"
406 " [%px, %px) created by:\n"
407 " %pS\n",
408 va->addr, va->addr + va->size, va->caller);
409 pr_err("\n");
410
411 page = vmalloc_to_page(addr);
412 }
413 }
414
415 if (page) {
416 pr_err("The buggy address belongs to the physical page:\n");
417 dump_page(page, "kasan: bad access detected");
418 pr_err("\n");
419 }
420 }
421
meta_row_is_guilty(const void * row,const void * addr)422 static bool meta_row_is_guilty(const void *row, const void *addr)
423 {
424 return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
425 }
426
meta_pointer_offset(const void * row,const void * addr)427 static int meta_pointer_offset(const void *row, const void *addr)
428 {
429 /*
430 * Memory state around the buggy address:
431 * ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
432 * ...
433 *
434 * The length of ">ff00ff00ff00ff00: " is
435 * 3 + (BITS_PER_LONG / 8) * 2 chars.
436 * The length of each granule metadata is 2 bytes
437 * plus 1 byte for space.
438 */
439 return 3 + (BITS_PER_LONG / 8) * 2 +
440 (addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
441 }
442
print_memory_metadata(const void * addr)443 static void print_memory_metadata(const void *addr)
444 {
445 int i;
446 void *row;
447
448 row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
449 - META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
450
451 pr_err("Memory state around the buggy address:\n");
452
453 for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
454 char buffer[4 + (BITS_PER_LONG / 8) * 2];
455 char metadata[META_BYTES_PER_ROW];
456
457 snprintf(buffer, sizeof(buffer),
458 (i == 0) ? ">%px: " : " %px: ", row);
459
460 /*
461 * We should not pass a shadow pointer to generic
462 * function, because generic functions may try to
463 * access kasan mapping for the passed address.
464 */
465 kasan_metadata_fetch_row(&metadata[0], row);
466
467 print_hex_dump(KERN_ERR, buffer,
468 DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
469 metadata, META_BYTES_PER_ROW, 0);
470
471 if (meta_row_is_guilty(row, addr))
472 pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
473
474 row += META_MEM_BYTES_PER_ROW;
475 }
476 }
477
print_report(struct kasan_report_info * info)478 static void print_report(struct kasan_report_info *info)
479 {
480 void *addr = kasan_reset_tag((void *)info->access_addr);
481 u8 tag = get_tag((void *)info->access_addr);
482
483 print_error_description(info);
484 if (addr_has_metadata(addr))
485 kasan_print_tags(tag, info->first_bad_addr);
486 pr_err("\n");
487
488 if (addr_has_metadata(addr)) {
489 print_address_description(addr, tag, info);
490 print_memory_metadata(info->first_bad_addr);
491 } else {
492 dump_stack_lvl(KERN_ERR);
493 }
494 }
495
complete_report_info(struct kasan_report_info * info)496 static void complete_report_info(struct kasan_report_info *info)
497 {
498 void *addr = kasan_reset_tag((void *)info->access_addr);
499 struct slab *slab;
500
501 if (info->type == KASAN_REPORT_ACCESS)
502 info->first_bad_addr = kasan_find_first_bad_addr(
503 (void *)info->access_addr, info->access_size);
504 else
505 info->first_bad_addr = addr;
506
507 slab = kasan_addr_to_slab(addr);
508 if (slab) {
509 info->cache = slab->slab_cache;
510 info->object = nearest_obj(info->cache, slab, addr);
511
512 /* Try to determine allocation size based on the metadata. */
513 info->alloc_size = kasan_get_alloc_size(info->object, info->cache);
514 /* Fallback to the object size if failed. */
515 if (!info->alloc_size)
516 info->alloc_size = info->cache->object_size;
517 } else
518 info->cache = info->object = NULL;
519
520 switch (info->type) {
521 case KASAN_REPORT_INVALID_FREE:
522 info->bug_type = "invalid-free";
523 break;
524 case KASAN_REPORT_DOUBLE_FREE:
525 info->bug_type = "double-free";
526 break;
527 default:
528 /* bug_type filled in by kasan_complete_mode_report_info. */
529 break;
530 }
531
532 /* Fill in mode-specific report info fields. */
533 kasan_complete_mode_report_info(info);
534 }
535
kasan_report_invalid_free(void * ptr,unsigned long ip,enum kasan_report_type type)536 void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type)
537 {
538 unsigned long flags;
539 struct kasan_report_info info;
540
541 /*
542 * Do not check report_suppressed_sw(), as an invalid-free cannot be
543 * caused by accessing poisoned memory and thus should not be suppressed
544 * by kasan_disable/enable_current() critical sections.
545 *
546 * Note that for Hardware Tag-Based KASAN, kasan_report_invalid_free()
547 * is triggered by explicit tag checks and not by the ones performed by
548 * the CPU. Thus, reporting invalid-free is not suppressed as well.
549 */
550 if (unlikely(!report_enabled()))
551 return;
552
553 start_report(&flags, true);
554
555 __memset(&info, 0, sizeof(info));
556 info.type = type;
557 info.access_addr = ptr;
558 info.access_size = 0;
559 info.is_write = false;
560 info.ip = ip;
561
562 complete_report_info(&info);
563
564 print_report(&info);
565
566 /*
567 * Invalid free is considered a "write" since the allocator's metadata
568 * updates involves writes.
569 */
570 end_report(&flags, ptr, true);
571 }
572
573 /*
574 * kasan_report() is the only reporting function that uses
575 * user_access_save/restore(): kasan_report_invalid_free() cannot be called
576 * from a UACCESS region, and kasan_report_async() is not used on x86.
577 */
kasan_report(const void * addr,size_t size,bool is_write,unsigned long ip)578 bool kasan_report(const void *addr, size_t size, bool is_write,
579 unsigned long ip)
580 {
581 bool ret = true;
582 unsigned long ua_flags = user_access_save();
583 unsigned long irq_flags;
584 struct kasan_report_info info;
585
586 if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) {
587 ret = false;
588 goto out;
589 }
590
591 start_report(&irq_flags, true);
592
593 __memset(&info, 0, sizeof(info));
594 info.type = KASAN_REPORT_ACCESS;
595 info.access_addr = addr;
596 info.access_size = size;
597 info.is_write = is_write;
598 info.ip = ip;
599
600 complete_report_info(&info);
601
602 print_report(&info);
603
604 end_report(&irq_flags, (void *)addr, is_write);
605
606 out:
607 user_access_restore(ua_flags);
608
609 return ret;
610 }
611
612 #ifdef CONFIG_KASAN_HW_TAGS
kasan_report_async(void)613 void kasan_report_async(void)
614 {
615 unsigned long flags;
616
617 /*
618 * Do not check report_suppressed_sw(), as
619 * kasan_disable/enable_current() critical sections do not affect
620 * Hardware Tag-Based KASAN.
621 */
622 if (unlikely(!report_enabled()))
623 return;
624
625 start_report(&flags, false);
626 pr_err("BUG: KASAN: invalid-access\n");
627 pr_err("Asynchronous fault: no details available\n");
628 pr_err("\n");
629 dump_stack_lvl(KERN_ERR);
630 /*
631 * Conservatively set is_write=true, because no details are available.
632 * In this mode, kasan.fault=panic_on_write is like kasan.fault=panic.
633 */
634 end_report(&flags, NULL, true);
635 }
636 #endif /* CONFIG_KASAN_HW_TAGS */
637
638 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
639 /*
640 * With compiler-based KASAN modes, accesses to bogus pointers (outside of the
641 * mapped kernel address space regions) cause faults when KASAN tries to check
642 * the shadow memory before the actual memory access. This results in cryptic
643 * GPF reports, which are hard for users to interpret. This hook helps users to
644 * figure out what the original bogus pointer was.
645 */
kasan_non_canonical_hook(unsigned long addr)646 void kasan_non_canonical_hook(unsigned long addr)
647 {
648 unsigned long orig_addr;
649 const char *bug_type;
650
651 /*
652 * All addresses that came as a result of the memory-to-shadow mapping
653 * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET.
654 */
655 if (addr < KASAN_SHADOW_OFFSET)
656 return;
657
658 orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr);
659
660 /*
661 * For faults near the shadow address for NULL, we can be fairly certain
662 * that this is a KASAN shadow memory access.
663 * For faults that correspond to the shadow for low or high canonical
664 * addresses, we can still be pretty sure: these shadow regions are a
665 * fairly narrow chunk of the address space.
666 * But the shadow for non-canonical addresses is a really large chunk
667 * of the address space. For this case, we still print the decoded
668 * address, but make it clear that this is not necessarily what's
669 * actually going on.
670 */
671 if (orig_addr < PAGE_SIZE)
672 bug_type = "null-ptr-deref";
673 else if (orig_addr < TASK_SIZE)
674 bug_type = "probably user-memory-access";
675 else if (addr_in_shadow((void *)addr))
676 bug_type = "probably wild-memory-access";
677 else
678 bug_type = "maybe wild-memory-access";
679 pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
680 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
681 }
682 #endif
683