xref: /linux/mm/debug.c (revision 75a96701dc0f678d877dd9b5d624005364870472)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * mm/debug.c
4   *
5   * mm/ specific debug routines.
6   *
7   */
8  
9  #include <linux/kernel.h>
10  #include <linux/mm.h>
11  #include <linux/trace_events.h>
12  #include <linux/memcontrol.h>
13  #include <trace/events/mmflags.h>
14  #include <linux/migrate.h>
15  #include <linux/page_owner.h>
16  #include <linux/ctype.h>
17  
18  #include "internal.h"
19  #include <trace/events/migrate.h>
20  
21  /*
22   * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23   * be used to populate migrate_reason_names[].
24   */
25  #undef EM
26  #undef EMe
27  #define EM(a, b)	b,
28  #define EMe(a, b)	b
29  
30  const char *migrate_reason_names[MR_TYPES] = {
31  	MIGRATE_REASON
32  };
33  
34  const struct trace_print_flags pageflag_names[] = {
35  	__def_pageflag_names,
36  	{0, NULL}
37  };
38  
39  const struct trace_print_flags gfpflag_names[] = {
40  	__def_gfpflag_names,
41  	{0, NULL}
42  };
43  
44  const struct trace_print_flags vmaflag_names[] = {
45  	__def_vmaflag_names,
46  	{0, NULL}
47  };
48  
49  #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] =  __stringify(_name)
50  
51  static const char *page_type_names[] = {
52  	DEF_PAGETYPE_NAME(slab),
53  	DEF_PAGETYPE_NAME(hugetlb),
54  	DEF_PAGETYPE_NAME(offline),
55  	DEF_PAGETYPE_NAME(guard),
56  	DEF_PAGETYPE_NAME(table),
57  	DEF_PAGETYPE_NAME(buddy),
58  	DEF_PAGETYPE_NAME(unaccepted),
59  };
60  
61  static const char *page_type_name(unsigned int page_type)
62  {
63  	unsigned i = (page_type >> 24) - 0xf0;
64  
65  	if (i >= ARRAY_SIZE(page_type_names))
66  		return "unknown";
67  	return page_type_names[i];
68  }
69  
70  static void __dump_folio(struct folio *folio, struct page *page,
71  		unsigned long pfn, unsigned long idx)
72  {
73  	struct address_space *mapping = folio_mapping(folio);
74  	int mapcount = atomic_read(&page->_mapcount);
75  	char *type = "";
76  
77  	mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
78  	pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
79  			folio_ref_count(folio), mapcount, mapping,
80  			folio->index + idx, pfn);
81  	if (folio_test_large(folio)) {
82  		pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
83  				folio_order(folio),
84  				folio_mapcount(folio),
85  				folio_entire_mapcount(folio),
86  				folio_nr_pages_mapped(folio),
87  				atomic_read(&folio->_pincount));
88  	}
89  
90  #ifdef CONFIG_MEMCG
91  	if (folio->memcg_data)
92  		pr_warn("memcg:%lx\n", folio->memcg_data);
93  #endif
94  	if (folio_test_ksm(folio))
95  		type = "ksm ";
96  	else if (folio_test_anon(folio))
97  		type = "anon ";
98  	else if (mapping)
99  		dump_mapping(mapping);
100  	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
101  
102  	/*
103  	 * Accessing the pageblock without the zone lock. It could change to
104  	 * "isolate" again in the meantime, but since we are just dumping the
105  	 * state for debugging, it should be fine to accept a bit of
106  	 * inaccuracy here due to racing.
107  	 */
108  	pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
109  		is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
110  	if (page_has_type(&folio->page))
111  		pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
112  				page_type_name(folio->page.page_type));
113  
114  	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
115  			sizeof(unsigned long), page,
116  			sizeof(struct page), false);
117  	if (folio_test_large(folio))
118  		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
119  			sizeof(unsigned long), folio,
120  			2 * sizeof(struct page), false);
121  }
122  
123  static void __dump_page(const struct page *page)
124  {
125  	struct folio *foliop, folio;
126  	struct page precise;
127  	unsigned long head;
128  	unsigned long pfn = page_to_pfn(page);
129  	unsigned long idx, nr_pages = 1;
130  	int loops = 5;
131  
132  again:
133  	memcpy(&precise, page, sizeof(*page));
134  	head = precise.compound_head;
135  	if ((head & 1) == 0) {
136  		foliop = (struct folio *)&precise;
137  		idx = 0;
138  		if (!folio_test_large(foliop))
139  			goto dump;
140  		foliop = (struct folio *)page;
141  	} else {
142  		foliop = (struct folio *)(head - 1);
143  		idx = folio_page_idx(foliop, page);
144  	}
145  
146  	if (idx < MAX_FOLIO_NR_PAGES) {
147  		memcpy(&folio, foliop, 2 * sizeof(struct page));
148  		nr_pages = folio_nr_pages(&folio);
149  		foliop = &folio;
150  	}
151  
152  	if (idx > nr_pages) {
153  		if (loops-- > 0)
154  			goto again;
155  		pr_warn("page does not match folio\n");
156  		precise.compound_head &= ~1UL;
157  		foliop = (struct folio *)&precise;
158  		idx = 0;
159  	}
160  
161  dump:
162  	__dump_folio(foliop, &precise, pfn, idx);
163  }
164  
165  void dump_page(const struct page *page, const char *reason)
166  {
167  	if (PagePoisoned(page))
168  		pr_warn("page:%p is uninitialized and poisoned", page);
169  	else
170  		__dump_page(page);
171  	if (reason)
172  		pr_warn("page dumped because: %s\n", reason);
173  	dump_page_owner(page);
174  }
175  EXPORT_SYMBOL(dump_page);
176  
177  #ifdef CONFIG_DEBUG_VM
178  
179  void dump_vma(const struct vm_area_struct *vma)
180  {
181  	pr_emerg("vma %px start %px end %px mm %px\n"
182  		"prot %lx anon_vma %px vm_ops %px\n"
183  		"pgoff %lx file %px private_data %px\n"
184  		"flags: %#lx(%pGv)\n",
185  		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
186  		(unsigned long)pgprot_val(vma->vm_page_prot),
187  		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
188  		vma->vm_file, vma->vm_private_data,
189  		vma->vm_flags, &vma->vm_flags);
190  }
191  EXPORT_SYMBOL(dump_vma);
192  
193  void dump_mm(const struct mm_struct *mm)
194  {
195  	pr_emerg("mm %px task_size %lu\n"
196  		"mmap_base %lu mmap_legacy_base %lu\n"
197  		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
198  		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
199  		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
200  		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
201  		"start_brk %lx brk %lx start_stack %lx\n"
202  		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
203  		"binfmt %px flags %lx\n"
204  #ifdef CONFIG_AIO
205  		"ioctx_table %px\n"
206  #endif
207  #ifdef CONFIG_MEMCG
208  		"owner %px "
209  #endif
210  		"exe_file %px\n"
211  #ifdef CONFIG_MMU_NOTIFIER
212  		"notifier_subscriptions %px\n"
213  #endif
214  #ifdef CONFIG_NUMA_BALANCING
215  		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
216  #endif
217  		"tlb_flush_pending %d\n"
218  		"def_flags: %#lx(%pGv)\n",
219  
220  		mm, mm->task_size,
221  		mm->mmap_base, mm->mmap_legacy_base,
222  		mm->pgd, atomic_read(&mm->mm_users),
223  		atomic_read(&mm->mm_count),
224  		mm_pgtables_bytes(mm),
225  		mm->map_count,
226  		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
227  		(u64)atomic64_read(&mm->pinned_vm),
228  		mm->data_vm, mm->exec_vm, mm->stack_vm,
229  		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
230  		mm->start_brk, mm->brk, mm->start_stack,
231  		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
232  		mm->binfmt, mm->flags,
233  #ifdef CONFIG_AIO
234  		mm->ioctx_table,
235  #endif
236  #ifdef CONFIG_MEMCG
237  		mm->owner,
238  #endif
239  		mm->exe_file,
240  #ifdef CONFIG_MMU_NOTIFIER
241  		mm->notifier_subscriptions,
242  #endif
243  #ifdef CONFIG_NUMA_BALANCING
244  		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
245  #endif
246  		atomic_read(&mm->tlb_flush_pending),
247  		mm->def_flags, &mm->def_flags
248  	);
249  }
250  EXPORT_SYMBOL(dump_mm);
251  
252  static bool page_init_poisoning __read_mostly = true;
253  
254  static int __init setup_vm_debug(char *str)
255  {
256  	bool __page_init_poisoning = true;
257  
258  	/*
259  	 * Calling vm_debug with no arguments is equivalent to requesting
260  	 * to enable all debugging options we can control.
261  	 */
262  	if (*str++ != '=' || !*str)
263  		goto out;
264  
265  	__page_init_poisoning = false;
266  	if (*str == '-')
267  		goto out;
268  
269  	while (*str) {
270  		switch (tolower(*str)) {
271  		case'p':
272  			__page_init_poisoning = true;
273  			break;
274  		default:
275  			pr_err("vm_debug option '%c' unknown. skipped\n",
276  			       *str);
277  		}
278  
279  		str++;
280  	}
281  out:
282  	if (page_init_poisoning && !__page_init_poisoning)
283  		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
284  
285  	page_init_poisoning = __page_init_poisoning;
286  
287  	return 1;
288  }
289  __setup("vm_debug", setup_vm_debug);
290  
291  void page_init_poison(struct page *page, size_t size)
292  {
293  	if (page_init_poisoning)
294  		memset(page, PAGE_POISON_PATTERN, size);
295  }
296  
297  void vma_iter_dump_tree(const struct vma_iterator *vmi)
298  {
299  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
300  	mas_dump(&vmi->mas);
301  	mt_dump(vmi->mas.tree, mt_dump_hex);
302  #endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
303  }
304  
305  #endif		/* CONFIG_DEBUG_VM */
306