xref: /linux/mm/debug.c (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17 
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20 
21 /*
22  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23  * be used to populate migrate_reason_names[].
24  */
25 #undef EM
26 #undef EMe
27 #define EM(a, b)	b,
28 #define EMe(a, b)	b
29 
30 const char *migrate_reason_names[MR_TYPES] = {
31 	MIGRATE_REASON
32 };
33 
34 const struct trace_print_flags pageflag_names[] = {
35 	__def_pageflag_names,
36 	{0, NULL}
37 };
38 
39 const struct trace_print_flags gfpflag_names[] = {
40 	__def_gfpflag_names,
41 	{0, NULL}
42 };
43 
44 const struct trace_print_flags vmaflag_names[] = {
45 	__def_vmaflag_names,
46 	{0, NULL}
47 };
48 
49 #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] =  __stringify(_name)
50 
51 static const char *page_type_names[] = {
52 	DEF_PAGETYPE_NAME(slab),
53 	DEF_PAGETYPE_NAME(hugetlb),
54 	DEF_PAGETYPE_NAME(offline),
55 	DEF_PAGETYPE_NAME(guard),
56 	DEF_PAGETYPE_NAME(table),
57 	DEF_PAGETYPE_NAME(buddy),
58 	DEF_PAGETYPE_NAME(unaccepted),
59 };
60 
page_type_name(unsigned int page_type)61 static const char *page_type_name(unsigned int page_type)
62 {
63 	unsigned i = (page_type >> 24) - 0xf0;
64 
65 	if (i >= ARRAY_SIZE(page_type_names))
66 		return "unknown";
67 	return page_type_names[i];
68 }
69 
__dump_folio(struct folio * folio,struct page * page,unsigned long pfn,unsigned long idx)70 static void __dump_folio(struct folio *folio, struct page *page,
71 		unsigned long pfn, unsigned long idx)
72 {
73 	struct address_space *mapping = folio_mapping(folio);
74 	int mapcount = atomic_read(&page->_mapcount) + 1;
75 	char *type = "";
76 
77 	if (page_mapcount_is_type(mapcount))
78 		mapcount = 0;
79 
80 	pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
81 			folio_ref_count(folio), mapcount, mapping,
82 			folio->index + idx, pfn);
83 	if (folio_test_large(folio)) {
84 		int pincount = 0;
85 
86 		if (folio_has_pincount(folio))
87 			pincount = atomic_read(&folio->_pincount);
88 
89 		pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
90 				folio_order(folio),
91 				folio_mapcount(folio),
92 				folio_entire_mapcount(folio),
93 				folio_nr_pages_mapped(folio),
94 				pincount);
95 	}
96 
97 #ifdef CONFIG_MEMCG
98 	if (folio->memcg_data)
99 		pr_warn("memcg:%lx\n", folio->memcg_data);
100 #endif
101 	if (folio_test_ksm(folio))
102 		type = "ksm ";
103 	else if (folio_test_anon(folio))
104 		type = "anon ";
105 	else if (mapping)
106 		dump_mapping(mapping);
107 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
108 
109 	/*
110 	 * Accessing the pageblock without the zone lock. It could change to
111 	 * "isolate" again in the meantime, but since we are just dumping the
112 	 * state for debugging, it should be fine to accept a bit of
113 	 * inaccuracy here due to racing.
114 	 */
115 	pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
116 		is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
117 	if (page_has_type(&folio->page))
118 		pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
119 				page_type_name(folio->page.page_type));
120 
121 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
122 			sizeof(unsigned long), page,
123 			sizeof(struct page), false);
124 	if (folio_test_large(folio))
125 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
126 			sizeof(unsigned long), folio,
127 			2 * sizeof(struct page), false);
128 }
129 
__dump_page(const struct page * page)130 static void __dump_page(const struct page *page)
131 {
132 	struct folio *foliop, folio;
133 	struct page precise;
134 	unsigned long head;
135 	unsigned long pfn = page_to_pfn(page);
136 	unsigned long idx, nr_pages = 1;
137 	int loops = 5;
138 
139 again:
140 	memcpy(&precise, page, sizeof(*page));
141 	head = precise.compound_head;
142 	if ((head & 1) == 0) {
143 		foliop = (struct folio *)&precise;
144 		idx = 0;
145 		if (!folio_test_large(foliop))
146 			goto dump;
147 		foliop = (struct folio *)page;
148 	} else {
149 		foliop = (struct folio *)(head - 1);
150 		idx = folio_page_idx(foliop, page);
151 	}
152 
153 	if (idx < MAX_FOLIO_NR_PAGES) {
154 		memcpy(&folio, foliop, 2 * sizeof(struct page));
155 		nr_pages = folio_nr_pages(&folio);
156 		if (nr_pages > 1)
157 			memcpy(&folio.__page_2, &foliop->__page_2,
158 			       sizeof(struct page));
159 		foliop = &folio;
160 	}
161 
162 	if (idx > nr_pages) {
163 		if (loops-- > 0)
164 			goto again;
165 		pr_warn("page does not match folio\n");
166 		precise.compound_head &= ~1UL;
167 		foliop = (struct folio *)&precise;
168 		idx = 0;
169 	}
170 
171 dump:
172 	__dump_folio(foliop, &precise, pfn, idx);
173 }
174 
dump_page(const struct page * page,const char * reason)175 void dump_page(const struct page *page, const char *reason)
176 {
177 	if (PagePoisoned(page))
178 		pr_warn("page:%p is uninitialized and poisoned\n", page);
179 	else
180 		__dump_page(page);
181 	if (reason)
182 		pr_warn("page dumped because: %s\n", reason);
183 	dump_page_owner(page);
184 }
185 EXPORT_SYMBOL(dump_page);
186 
187 #ifdef CONFIG_DEBUG_VM
188 
dump_vma(const struct vm_area_struct * vma)189 void dump_vma(const struct vm_area_struct *vma)
190 {
191 	pr_emerg("vma %px start %px end %px mm %px\n"
192 		"prot %lx anon_vma %px vm_ops %px\n"
193 		"pgoff %lx file %px private_data %px\n"
194 #ifdef CONFIG_PER_VMA_LOCK
195 		"refcnt %x\n"
196 #endif
197 		"flags: %#lx(%pGv)\n",
198 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
199 		(unsigned long)pgprot_val(vma->vm_page_prot),
200 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
201 		vma->vm_file, vma->vm_private_data,
202 #ifdef CONFIG_PER_VMA_LOCK
203 		refcount_read(&vma->vm_refcnt),
204 #endif
205 		vma->vm_flags, &vma->vm_flags);
206 }
207 EXPORT_SYMBOL(dump_vma);
208 
dump_mm(const struct mm_struct * mm)209 void dump_mm(const struct mm_struct *mm)
210 {
211 	pr_emerg("mm %px task_size %lu\n"
212 		"mmap_base %lu mmap_legacy_base %lu\n"
213 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
214 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
215 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
216 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
217 		"start_brk %lx brk %lx start_stack %lx\n"
218 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
219 		"binfmt %px flags %lx\n"
220 #ifdef CONFIG_AIO
221 		"ioctx_table %px\n"
222 #endif
223 #ifdef CONFIG_MEMCG
224 		"owner %px "
225 #endif
226 		"exe_file %px\n"
227 #ifdef CONFIG_MMU_NOTIFIER
228 		"notifier_subscriptions %px\n"
229 #endif
230 #ifdef CONFIG_NUMA_BALANCING
231 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
232 #endif
233 		"tlb_flush_pending %d\n"
234 		"def_flags: %#lx(%pGv)\n",
235 
236 		mm, mm->task_size,
237 		mm->mmap_base, mm->mmap_legacy_base,
238 		mm->pgd, atomic_read(&mm->mm_users),
239 		atomic_read(&mm->mm_count),
240 		mm_pgtables_bytes(mm),
241 		mm->map_count,
242 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
243 		(u64)atomic64_read(&mm->pinned_vm),
244 		mm->data_vm, mm->exec_vm, mm->stack_vm,
245 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
246 		mm->start_brk, mm->brk, mm->start_stack,
247 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
248 		mm->binfmt, mm->flags,
249 #ifdef CONFIG_AIO
250 		mm->ioctx_table,
251 #endif
252 #ifdef CONFIG_MEMCG
253 		mm->owner,
254 #endif
255 		mm->exe_file,
256 #ifdef CONFIG_MMU_NOTIFIER
257 		mm->notifier_subscriptions,
258 #endif
259 #ifdef CONFIG_NUMA_BALANCING
260 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
261 #endif
262 		atomic_read(&mm->tlb_flush_pending),
263 		mm->def_flags, &mm->def_flags
264 	);
265 }
266 EXPORT_SYMBOL(dump_mm);
267 
dump_vmg(const struct vma_merge_struct * vmg,const char * reason)268 void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
269 {
270 	if (reason)
271 		pr_warn("vmg %px dumped because: %s\n", vmg, reason);
272 
273 	if (!vmg) {
274 		pr_warn("vmg %px state: (NULL)\n", vmg);
275 		return;
276 	}
277 
278 	pr_warn("vmg %px state: mm %px pgoff %lx\n"
279 		"vmi %px [%lx,%lx)\n"
280 		"prev %px middle %px next %px target %px\n"
281 		"start %lx end %lx flags %lx\n"
282 		"file %px anon_vma %px policy %px\n"
283 		"uffd_ctx %px\n"
284 		"anon_name %px\n"
285 		"state %x\n"
286 		"just_expand %d\n"
287 		"__adjust_middle_start %d __adjust_next_start %d\n"
288 		"__remove_middle %d __remove_next %d\n",
289 		vmg, vmg->mm, vmg->pgoff,
290 		vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
291 		vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
292 		vmg->prev, vmg->middle, vmg->next, vmg->target,
293 		vmg->start, vmg->end, vmg->flags,
294 		vmg->file, vmg->anon_vma, vmg->policy,
295 #ifdef CONFIG_USERFAULTFD
296 		vmg->uffd_ctx.ctx,
297 #else
298 		(void *)0,
299 #endif
300 		vmg->anon_name,
301 		(int)vmg->state,
302 		vmg->just_expand,
303 		vmg->__adjust_middle_start, vmg->__adjust_next_start,
304 		vmg->__remove_middle, vmg->__remove_next);
305 
306 	if (vmg->mm) {
307 		pr_warn("vmg %px mm:\n", vmg);
308 		dump_mm(vmg->mm);
309 	} else {
310 		pr_warn("vmg %px mm: (NULL)\n", vmg);
311 	}
312 
313 	if (vmg->prev) {
314 		pr_warn("vmg %px prev:\n", vmg);
315 		dump_vma(vmg->prev);
316 	} else {
317 		pr_warn("vmg %px prev: (NULL)\n", vmg);
318 	}
319 
320 	if (vmg->middle) {
321 		pr_warn("vmg %px middle:\n", vmg);
322 		dump_vma(vmg->middle);
323 	} else {
324 		pr_warn("vmg %px middle: (NULL)\n", vmg);
325 	}
326 
327 	if (vmg->next) {
328 		pr_warn("vmg %px next:\n", vmg);
329 		dump_vma(vmg->next);
330 	} else {
331 		pr_warn("vmg %px next: (NULL)\n", vmg);
332 	}
333 
334 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
335 	if (vmg->vmi) {
336 		pr_warn("vmg %px vmi:\n", vmg);
337 		vma_iter_dump_tree(vmg->vmi);
338 	} else {
339 		pr_warn("vmg %px vmi: (NULL)\n", vmg);
340 	}
341 #endif
342 }
343 EXPORT_SYMBOL(dump_vmg);
344 
345 static bool page_init_poisoning __read_mostly = true;
346 
setup_vm_debug(char * str)347 static int __init setup_vm_debug(char *str)
348 {
349 	bool __page_init_poisoning = true;
350 
351 	/*
352 	 * Calling vm_debug with no arguments is equivalent to requesting
353 	 * to enable all debugging options we can control.
354 	 */
355 	if (*str++ != '=' || !*str)
356 		goto out;
357 
358 	__page_init_poisoning = false;
359 	if (*str == '-')
360 		goto out;
361 
362 	while (*str) {
363 		switch (tolower(*str)) {
364 		case'p':
365 			__page_init_poisoning = true;
366 			break;
367 		default:
368 			pr_err("vm_debug option '%c' unknown. skipped\n",
369 			       *str);
370 		}
371 
372 		str++;
373 	}
374 out:
375 	if (page_init_poisoning && !__page_init_poisoning)
376 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
377 
378 	page_init_poisoning = __page_init_poisoning;
379 
380 	return 1;
381 }
382 __setup("vm_debug", setup_vm_debug);
383 
page_init_poison(struct page * page,size_t size)384 void page_init_poison(struct page *page, size_t size)
385 {
386 	if (page_init_poisoning)
387 		memset(page, PAGE_POISON_PATTERN, size);
388 }
389 
vma_iter_dump_tree(const struct vma_iterator * vmi)390 void vma_iter_dump_tree(const struct vma_iterator *vmi)
391 {
392 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
393 	mas_dump(&vmi->mas);
394 	mt_dump(vmi->mas.tree, mt_dump_hex);
395 #endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
396 }
397 
398 #endif		/* CONFIG_DEBUG_VM */
399