xref: /linux/mm/debug.c (revision 1553a1c48281243359a9529a10ddb551f3b967ab)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17 
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20 
21 /*
22  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23  * be used to populate migrate_reason_names[].
24  */
25 #undef EM
26 #undef EMe
27 #define EM(a, b)	b,
28 #define EMe(a, b)	b
29 
30 const char *migrate_reason_names[MR_TYPES] = {
31 	MIGRATE_REASON
32 };
33 
34 const struct trace_print_flags pageflag_names[] = {
35 	__def_pageflag_names,
36 	{0, NULL}
37 };
38 
39 const struct trace_print_flags pagetype_names[] = {
40 	__def_pagetype_names,
41 	{0, NULL}
42 };
43 
44 const struct trace_print_flags gfpflag_names[] = {
45 	__def_gfpflag_names,
46 	{0, NULL}
47 };
48 
49 const struct trace_print_flags vmaflag_names[] = {
50 	__def_vmaflag_names,
51 	{0, NULL}
52 };
53 
54 static void __dump_folio(struct folio *folio, struct page *page,
55 		unsigned long pfn, unsigned long idx)
56 {
57 	struct address_space *mapping = folio_mapping(folio);
58 	int mapcount = 0;
59 	char *type = "";
60 
61 	/*
62 	 * page->_mapcount space in struct page is used by slab pages to
63 	 * encode own info, and we must avoid calling page_folio() again.
64 	 */
65 	if (!folio_test_slab(folio)) {
66 		mapcount = atomic_read(&page->_mapcount) + 1;
67 		if (folio_test_large(folio))
68 			mapcount += folio_entire_mapcount(folio);
69 	}
70 
71 	pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
72 			folio_ref_count(folio), mapcount, mapping,
73 			folio->index + idx, pfn);
74 	if (folio_test_large(folio)) {
75 		pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
76 				folio_order(folio),
77 				folio_entire_mapcount(folio),
78 				folio_nr_pages_mapped(folio),
79 				atomic_read(&folio->_pincount));
80 	}
81 
82 #ifdef CONFIG_MEMCG
83 	if (folio->memcg_data)
84 		pr_warn("memcg:%lx\n", folio->memcg_data);
85 #endif
86 	if (folio_test_ksm(folio))
87 		type = "ksm ";
88 	else if (folio_test_anon(folio))
89 		type = "anon ";
90 	else if (mapping)
91 		dump_mapping(mapping);
92 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
93 
94 	/*
95 	 * Accessing the pageblock without the zone lock. It could change to
96 	 * "isolate" again in the meantime, but since we are just dumping the
97 	 * state for debugging, it should be fine to accept a bit of
98 	 * inaccuracy here due to racing.
99 	 */
100 	pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
101 		is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
102 	pr_warn("page_type: %pGt\n", &folio->page.page_type);
103 
104 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
105 			sizeof(unsigned long), page,
106 			sizeof(struct page), false);
107 	if (folio_test_large(folio))
108 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
109 			sizeof(unsigned long), folio,
110 			2 * sizeof(struct page), false);
111 }
112 
113 static void __dump_page(const struct page *page)
114 {
115 	struct folio *foliop, folio;
116 	struct page precise;
117 	unsigned long pfn = page_to_pfn(page);
118 	unsigned long idx, nr_pages = 1;
119 	int loops = 5;
120 
121 again:
122 	memcpy(&precise, page, sizeof(*page));
123 	foliop = page_folio(&precise);
124 	if (foliop == (struct folio *)&precise) {
125 		idx = 0;
126 		if (!folio_test_large(foliop))
127 			goto dump;
128 		foliop = (struct folio *)page;
129 	} else {
130 		idx = folio_page_idx(foliop, page);
131 	}
132 
133 	if (idx < MAX_FOLIO_NR_PAGES) {
134 		memcpy(&folio, foliop, 2 * sizeof(struct page));
135 		nr_pages = folio_nr_pages(&folio);
136 		foliop = &folio;
137 	}
138 
139 	if (idx > nr_pages) {
140 		if (loops-- > 0)
141 			goto again;
142 		pr_warn("page does not match folio\n");
143 		precise.compound_head &= ~1UL;
144 		foliop = (struct folio *)&precise;
145 		idx = 0;
146 	}
147 
148 dump:
149 	__dump_folio(foliop, &precise, pfn, idx);
150 }
151 
152 void dump_page(const struct page *page, const char *reason)
153 {
154 	if (PagePoisoned(page))
155 		pr_warn("page:%p is uninitialized and poisoned", page);
156 	else
157 		__dump_page(page);
158 	if (reason)
159 		pr_warn("page dumped because: %s\n", reason);
160 	dump_page_owner(page);
161 }
162 EXPORT_SYMBOL(dump_page);
163 
164 #ifdef CONFIG_DEBUG_VM
165 
166 void dump_vma(const struct vm_area_struct *vma)
167 {
168 	pr_emerg("vma %px start %px end %px mm %px\n"
169 		"prot %lx anon_vma %px vm_ops %px\n"
170 		"pgoff %lx file %px private_data %px\n"
171 		"flags: %#lx(%pGv)\n",
172 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
173 		(unsigned long)pgprot_val(vma->vm_page_prot),
174 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
175 		vma->vm_file, vma->vm_private_data,
176 		vma->vm_flags, &vma->vm_flags);
177 }
178 EXPORT_SYMBOL(dump_vma);
179 
180 void dump_mm(const struct mm_struct *mm)
181 {
182 	pr_emerg("mm %px task_size %lu\n"
183 #ifdef CONFIG_MMU
184 		"get_unmapped_area %px\n"
185 #endif
186 		"mmap_base %lu mmap_legacy_base %lu\n"
187 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
188 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
189 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
190 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
191 		"start_brk %lx brk %lx start_stack %lx\n"
192 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
193 		"binfmt %px flags %lx\n"
194 #ifdef CONFIG_AIO
195 		"ioctx_table %px\n"
196 #endif
197 #ifdef CONFIG_MEMCG
198 		"owner %px "
199 #endif
200 		"exe_file %px\n"
201 #ifdef CONFIG_MMU_NOTIFIER
202 		"notifier_subscriptions %px\n"
203 #endif
204 #ifdef CONFIG_NUMA_BALANCING
205 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
206 #endif
207 		"tlb_flush_pending %d\n"
208 		"def_flags: %#lx(%pGv)\n",
209 
210 		mm, mm->task_size,
211 #ifdef CONFIG_MMU
212 		mm->get_unmapped_area,
213 #endif
214 		mm->mmap_base, mm->mmap_legacy_base,
215 		mm->pgd, atomic_read(&mm->mm_users),
216 		atomic_read(&mm->mm_count),
217 		mm_pgtables_bytes(mm),
218 		mm->map_count,
219 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
220 		(u64)atomic64_read(&mm->pinned_vm),
221 		mm->data_vm, mm->exec_vm, mm->stack_vm,
222 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
223 		mm->start_brk, mm->brk, mm->start_stack,
224 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
225 		mm->binfmt, mm->flags,
226 #ifdef CONFIG_AIO
227 		mm->ioctx_table,
228 #endif
229 #ifdef CONFIG_MEMCG
230 		mm->owner,
231 #endif
232 		mm->exe_file,
233 #ifdef CONFIG_MMU_NOTIFIER
234 		mm->notifier_subscriptions,
235 #endif
236 #ifdef CONFIG_NUMA_BALANCING
237 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
238 #endif
239 		atomic_read(&mm->tlb_flush_pending),
240 		mm->def_flags, &mm->def_flags
241 	);
242 }
243 EXPORT_SYMBOL(dump_mm);
244 
245 static bool page_init_poisoning __read_mostly = true;
246 
247 static int __init setup_vm_debug(char *str)
248 {
249 	bool __page_init_poisoning = true;
250 
251 	/*
252 	 * Calling vm_debug with no arguments is equivalent to requesting
253 	 * to enable all debugging options we can control.
254 	 */
255 	if (*str++ != '=' || !*str)
256 		goto out;
257 
258 	__page_init_poisoning = false;
259 	if (*str == '-')
260 		goto out;
261 
262 	while (*str) {
263 		switch (tolower(*str)) {
264 		case'p':
265 			__page_init_poisoning = true;
266 			break;
267 		default:
268 			pr_err("vm_debug option '%c' unknown. skipped\n",
269 			       *str);
270 		}
271 
272 		str++;
273 	}
274 out:
275 	if (page_init_poisoning && !__page_init_poisoning)
276 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
277 
278 	page_init_poisoning = __page_init_poisoning;
279 
280 	return 1;
281 }
282 __setup("vm_debug", setup_vm_debug);
283 
284 void page_init_poison(struct page *page, size_t size)
285 {
286 	if (page_init_poisoning)
287 		memset(page, PAGE_POISON_PATTERN, size);
288 }
289 
290 void vma_iter_dump_tree(const struct vma_iterator *vmi)
291 {
292 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
293 	mas_dump(&vmi->mas);
294 	mt_dump(vmi->mas.tree, mt_dump_hex);
295 #endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
296 }
297 
298 #endif		/* CONFIG_DEBUG_VM */
299