1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/debug.c 4 * 5 * mm/ specific debug routines. 6 * 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/trace_events.h> 12 #include <linux/memcontrol.h> 13 #include <trace/events/mmflags.h> 14 #include <linux/migrate.h> 15 #include <linux/page_owner.h> 16 #include <linux/ctype.h> 17 18 #include "internal.h" 19 #include <trace/events/migrate.h> 20 21 /* 22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can 23 * be used to populate migrate_reason_names[]. 24 */ 25 #undef EM 26 #undef EMe 27 #define EM(a, b) b, 28 #define EMe(a, b) b 29 30 const char *migrate_reason_names[MR_TYPES] = { 31 MIGRATE_REASON 32 }; 33 34 const struct trace_print_flags pageflag_names[] = { 35 __def_pageflag_names, 36 {0, NULL} 37 }; 38 39 const struct trace_print_flags pagetype_names[] = { 40 __def_pagetype_names, 41 {0, NULL} 42 }; 43 44 const struct trace_print_flags gfpflag_names[] = { 45 __def_gfpflag_names, 46 {0, NULL} 47 }; 48 49 const struct trace_print_flags vmaflag_names[] = { 50 __def_vmaflag_names, 51 {0, NULL} 52 }; 53 54 static void __dump_folio(struct folio *folio, struct page *page, 55 unsigned long pfn, unsigned long idx) 56 { 57 struct address_space *mapping = folio_mapping(folio); 58 int mapcount = atomic_read(&page->_mapcount) + 1; 59 char *type = ""; 60 61 /* Open-code page_mapcount() to avoid looking up a stale folio */ 62 if (mapcount < 0) 63 mapcount = 0; 64 if (folio_test_large(folio)) 65 mapcount += folio_entire_mapcount(folio); 66 67 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", 68 folio_ref_count(folio), mapcount, mapping, 69 folio->index + idx, pfn); 70 if (folio_test_large(folio)) { 71 pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", 72 folio_order(folio), 73 folio_entire_mapcount(folio), 74 folio_nr_pages_mapped(folio), 75 atomic_read(&folio->_pincount)); 76 } 77 78 #ifdef CONFIG_MEMCG 79 if (folio->memcg_data) 80 pr_warn("memcg:%lx\n", folio->memcg_data); 81 #endif 82 if (folio_test_ksm(folio)) 83 type = "ksm "; 84 else if (folio_test_anon(folio)) 85 type = "anon "; 86 else if (mapping) 87 dump_mapping(mapping); 88 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); 89 90 /* 91 * Accessing the pageblock without the zone lock. It could change to 92 * "isolate" again in the meantime, but since we are just dumping the 93 * state for debugging, it should be fine to accept a bit of 94 * inaccuracy here due to racing. 95 */ 96 pr_warn("%sflags: %pGp%s\n", type, &folio->flags, 97 is_migrate_cma_folio(folio, pfn) ? " CMA" : ""); 98 if (page_has_type(&folio->page)) 99 pr_warn("page_type: %pGt\n", &folio->page.page_type); 100 101 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, 102 sizeof(unsigned long), page, 103 sizeof(struct page), false); 104 if (folio_test_large(folio)) 105 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, 106 sizeof(unsigned long), folio, 107 2 * sizeof(struct page), false); 108 } 109 110 static void __dump_page(const struct page *page) 111 { 112 struct folio *foliop, folio; 113 struct page precise; 114 unsigned long pfn = page_to_pfn(page); 115 unsigned long idx, nr_pages = 1; 116 int loops = 5; 117 118 again: 119 memcpy(&precise, page, sizeof(*page)); 120 foliop = page_folio(&precise); 121 if (foliop == (struct folio *)&precise) { 122 idx = 0; 123 if (!folio_test_large(foliop)) 124 goto dump; 125 foliop = (struct folio *)page; 126 } else { 127 idx = folio_page_idx(foliop, page); 128 } 129 130 if (idx < MAX_FOLIO_NR_PAGES) { 131 memcpy(&folio, foliop, 2 * sizeof(struct page)); 132 nr_pages = folio_nr_pages(&folio); 133 foliop = &folio; 134 } 135 136 if (idx > nr_pages) { 137 if (loops-- > 0) 138 goto again; 139 pr_warn("page does not match folio\n"); 140 precise.compound_head &= ~1UL; 141 foliop = (struct folio *)&precise; 142 idx = 0; 143 } 144 145 dump: 146 __dump_folio(foliop, &precise, pfn, idx); 147 } 148 149 void dump_page(const struct page *page, const char *reason) 150 { 151 if (PagePoisoned(page)) 152 pr_warn("page:%p is uninitialized and poisoned", page); 153 else 154 __dump_page(page); 155 if (reason) 156 pr_warn("page dumped because: %s\n", reason); 157 dump_page_owner(page); 158 } 159 EXPORT_SYMBOL(dump_page); 160 161 #ifdef CONFIG_DEBUG_VM 162 163 void dump_vma(const struct vm_area_struct *vma) 164 { 165 pr_emerg("vma %px start %px end %px mm %px\n" 166 "prot %lx anon_vma %px vm_ops %px\n" 167 "pgoff %lx file %px private_data %px\n" 168 "flags: %#lx(%pGv)\n", 169 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, 170 (unsigned long)pgprot_val(vma->vm_page_prot), 171 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, 172 vma->vm_file, vma->vm_private_data, 173 vma->vm_flags, &vma->vm_flags); 174 } 175 EXPORT_SYMBOL(dump_vma); 176 177 void dump_mm(const struct mm_struct *mm) 178 { 179 pr_emerg("mm %px task_size %lu\n" 180 "mmap_base %lu mmap_legacy_base %lu\n" 181 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 182 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 183 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" 184 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 185 "start_brk %lx brk %lx start_stack %lx\n" 186 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 187 "binfmt %px flags %lx\n" 188 #ifdef CONFIG_AIO 189 "ioctx_table %px\n" 190 #endif 191 #ifdef CONFIG_MEMCG 192 "owner %px " 193 #endif 194 "exe_file %px\n" 195 #ifdef CONFIG_MMU_NOTIFIER 196 "notifier_subscriptions %px\n" 197 #endif 198 #ifdef CONFIG_NUMA_BALANCING 199 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 200 #endif 201 "tlb_flush_pending %d\n" 202 "def_flags: %#lx(%pGv)\n", 203 204 mm, mm->task_size, 205 mm->mmap_base, mm->mmap_legacy_base, 206 mm->pgd, atomic_read(&mm->mm_users), 207 atomic_read(&mm->mm_count), 208 mm_pgtables_bytes(mm), 209 mm->map_count, 210 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 211 (u64)atomic64_read(&mm->pinned_vm), 212 mm->data_vm, mm->exec_vm, mm->stack_vm, 213 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 214 mm->start_brk, mm->brk, mm->start_stack, 215 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 216 mm->binfmt, mm->flags, 217 #ifdef CONFIG_AIO 218 mm->ioctx_table, 219 #endif 220 #ifdef CONFIG_MEMCG 221 mm->owner, 222 #endif 223 mm->exe_file, 224 #ifdef CONFIG_MMU_NOTIFIER 225 mm->notifier_subscriptions, 226 #endif 227 #ifdef CONFIG_NUMA_BALANCING 228 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 229 #endif 230 atomic_read(&mm->tlb_flush_pending), 231 mm->def_flags, &mm->def_flags 232 ); 233 } 234 EXPORT_SYMBOL(dump_mm); 235 236 static bool page_init_poisoning __read_mostly = true; 237 238 static int __init setup_vm_debug(char *str) 239 { 240 bool __page_init_poisoning = true; 241 242 /* 243 * Calling vm_debug with no arguments is equivalent to requesting 244 * to enable all debugging options we can control. 245 */ 246 if (*str++ != '=' || !*str) 247 goto out; 248 249 __page_init_poisoning = false; 250 if (*str == '-') 251 goto out; 252 253 while (*str) { 254 switch (tolower(*str)) { 255 case'p': 256 __page_init_poisoning = true; 257 break; 258 default: 259 pr_err("vm_debug option '%c' unknown. skipped\n", 260 *str); 261 } 262 263 str++; 264 } 265 out: 266 if (page_init_poisoning && !__page_init_poisoning) 267 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); 268 269 page_init_poisoning = __page_init_poisoning; 270 271 return 1; 272 } 273 __setup("vm_debug", setup_vm_debug); 274 275 void page_init_poison(struct page *page, size_t size) 276 { 277 if (page_init_poisoning) 278 memset(page, PAGE_POISON_PATTERN, size); 279 } 280 281 void vma_iter_dump_tree(const struct vma_iterator *vmi) 282 { 283 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 284 mas_dump(&vmi->mas); 285 mt_dump(vmi->mas.tree, mt_dump_hex); 286 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 287 } 288 289 #endif /* CONFIG_DEBUG_VM */ 290