1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/string.h> 4 #include <linux/mm.h> 5 #include <linux/highmem.h> 6 #include <linux/page_ext.h> 7 #include <linux/poison.h> 8 #include <linux/ratelimit.h> 9 #include <linux/kasan.h> 10 11 bool _page_poisoning_enabled_early; 12 EXPORT_SYMBOL(_page_poisoning_enabled_early); 13 DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled); 14 EXPORT_SYMBOL(_page_poisoning_enabled); 15 16 static int __init early_page_poison_param(char *buf) 17 { 18 return kstrtobool(buf, &_page_poisoning_enabled_early); 19 } 20 early_param("page_poison", early_page_poison_param); 21 22 static void poison_page(struct page *page) 23 { 24 void *addr = kmap_atomic(page); 25 26 /* KASAN still think the page is in-use, so skip it. */ 27 kasan_disable_current(); 28 memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE); 29 kasan_enable_current(); 30 kunmap_atomic(addr); 31 } 32 33 void __kernel_poison_pages(struct page *page, int n) 34 { 35 int i; 36 37 for (i = 0; i < n; i++) 38 poison_page(page + i); 39 } 40 41 static bool single_bit_flip(unsigned char a, unsigned char b) 42 { 43 unsigned char error = a ^ b; 44 45 return error && !(error & (error - 1)); 46 } 47 48 static void check_poison_mem(unsigned char *mem, size_t bytes) 49 { 50 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); 51 unsigned char *start; 52 unsigned char *end; 53 54 start = memchr_inv(mem, PAGE_POISON, bytes); 55 if (!start) 56 return; 57 58 for (end = mem + bytes - 1; end > start; end--) { 59 if (*end != PAGE_POISON) 60 break; 61 } 62 63 if (!__ratelimit(&ratelimit)) 64 return; 65 else if (start == end && single_bit_flip(*start, PAGE_POISON)) 66 pr_err("pagealloc: single bit error\n"); 67 else 68 pr_err("pagealloc: memory corruption\n"); 69 70 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, 71 end - start + 1, 1); 72 dump_stack(); 73 } 74 75 static void unpoison_page(struct page *page) 76 { 77 void *addr; 78 79 addr = kmap_atomic(page); 80 /* 81 * Page poisoning when enabled poisons each and every page 82 * that is freed to buddy. Thus no extra check is done to 83 * see if a page was poisoned. 84 */ 85 check_poison_mem(addr, PAGE_SIZE); 86 kunmap_atomic(addr); 87 } 88 89 void __kernel_unpoison_pages(struct page *page, int n) 90 { 91 int i; 92 93 for (i = 0; i < n; i++) 94 unpoison_page(page + i); 95 } 96 97 #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC 98 void __kernel_map_pages(struct page *page, int numpages, int enable) 99 { 100 /* This function does nothing, all work is done via poison pages */ 101 } 102 #endif 103