xref: /linux/arch/powerpc/mm/ptdump/ptdump.c (revision c5d3cdad688ed75fb311a3a671eb30ba7106d7d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2016, Rashmica Gupta, IBM Corp.
4  *
5  * This traverses the kernel pagetables and dumps the
6  * information about the used sections of memory to
7  * /sys/kernel/debug/kernel_pagetables.
8  *
9  * Derived from the arm64 implementation:
10  * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
11  * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
12  */
13 #include <linux/debugfs.h>
14 #include <linux/fs.h>
15 #include <linux/hugetlb.h>
16 #include <linux/io.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <asm/fixmap.h>
22 #include <asm/pgtable.h>
23 #include <linux/const.h>
24 #include <asm/page.h>
25 #include <asm/pgalloc.h>
26 
27 #include <mm/mmu_decl.h>
28 
29 #include "ptdump.h"
30 
31 /*
32  * To visualise what is happening,
33  *
34  *  - PTRS_PER_P** = how many entries there are in the corresponding P**
35  *  - P**_SHIFT = how many bits of the address we use to index into the
36  * corresponding P**
37  *  - P**_SIZE is how much memory we can access through the table - not the
38  * size of the table itself.
39  * P**={PGD, PUD, PMD, PTE}
40  *
41  *
42  * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
43  * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
44  * a page.
45  *
46  * In the case where there are only 3 levels, the PUD is folded into the
47  * PGD: every PUD has only one entry which points to the PMD.
48  *
49  * The page dumper groups page table entries of the same type into a single
50  * description. It uses pg_state to track the range information while
51  * iterating over the PTE entries. When the continuity is broken it then
52  * dumps out a description of the range - ie PTEs that are virtually contiguous
53  * with the same PTE flags are chunked together. This is to make it clear how
54  * different areas of the kernel virtual memory are used.
55  *
56  */
57 struct pg_state {
58 	struct seq_file *seq;
59 	const struct addr_marker *marker;
60 	unsigned long start_address;
61 	unsigned long start_pa;
62 	unsigned long last_pa;
63 	unsigned int level;
64 	u64 current_flags;
65 	bool check_wx;
66 	unsigned long wx_pages;
67 };
68 
69 struct addr_marker {
70 	unsigned long start_address;
71 	const char *name;
72 };
73 
74 static struct addr_marker address_markers[] = {
75 	{ 0,	"Start of kernel VM" },
76 	{ 0,	"vmalloc() Area" },
77 	{ 0,	"vmalloc() End" },
78 #ifdef CONFIG_PPC64
79 	{ 0,	"isa I/O start" },
80 	{ 0,	"isa I/O end" },
81 	{ 0,	"phb I/O start" },
82 	{ 0,	"phb I/O end" },
83 	{ 0,	"I/O remap start" },
84 	{ 0,	"I/O remap end" },
85 	{ 0,	"vmemmap start" },
86 #else
87 	{ 0,	"Early I/O remap start" },
88 	{ 0,	"Early I/O remap end" },
89 #ifdef CONFIG_HIGHMEM
90 	{ 0,	"Highmem PTEs start" },
91 	{ 0,	"Highmem PTEs end" },
92 #endif
93 	{ 0,	"Fixmap start" },
94 	{ 0,	"Fixmap end" },
95 #endif
96 #ifdef CONFIG_KASAN
97 	{ 0,	"kasan shadow mem start" },
98 	{ 0,	"kasan shadow mem end" },
99 #endif
100 	{ -1,	NULL },
101 };
102 
103 #define pt_dump_seq_printf(m, fmt, args...)	\
104 ({						\
105 	if (m)					\
106 		seq_printf(m, fmt, ##args);	\
107 })
108 
109 #define pt_dump_seq_putc(m, c)		\
110 ({					\
111 	if (m)				\
112 		seq_putc(m, c);		\
113 })
114 
115 static void dump_flag_info(struct pg_state *st, const struct flag_info
116 		*flag, u64 pte, int num)
117 {
118 	unsigned int i;
119 
120 	for (i = 0; i < num; i++, flag++) {
121 		const char *s = NULL;
122 		u64 val;
123 
124 		/* flag not defined so don't check it */
125 		if (flag->mask == 0)
126 			continue;
127 		/* Some 'flags' are actually values */
128 		if (flag->is_val) {
129 			val = pte & flag->val;
130 			if (flag->shift)
131 				val = val >> flag->shift;
132 			pt_dump_seq_printf(st->seq, "  %s:%llx", flag->set, val);
133 		} else {
134 			if ((pte & flag->mask) == flag->val)
135 				s = flag->set;
136 			else
137 				s = flag->clear;
138 			if (s)
139 				pt_dump_seq_printf(st->seq, "  %s", s);
140 		}
141 		st->current_flags &= ~flag->mask;
142 	}
143 	if (st->current_flags != 0)
144 		pt_dump_seq_printf(st->seq, "  unknown flags:%llx", st->current_flags);
145 }
146 
147 static void dump_addr(struct pg_state *st, unsigned long addr)
148 {
149 	static const char units[] = "KMGTPE";
150 	const char *unit = units;
151 	unsigned long delta;
152 
153 #ifdef CONFIG_PPC64
154 #define REG		"0x%016lx"
155 #else
156 #define REG		"0x%08lx"
157 #endif
158 
159 	pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
160 	if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
161 		pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
162 		delta = PAGE_SIZE >> 10;
163 	} else {
164 		pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
165 		delta = (addr - st->start_address) >> 10;
166 	}
167 	/* Work out what appropriate unit to use */
168 	while (!(delta & 1023) && unit[1]) {
169 		delta >>= 10;
170 		unit++;
171 	}
172 	pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
173 
174 }
175 
176 static void note_prot_wx(struct pg_state *st, unsigned long addr)
177 {
178 	pte_t pte = __pte(st->current_flags);
179 
180 	if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
181 		return;
182 
183 	if (!pte_write(pte) || !pte_exec(pte))
184 		return;
185 
186 	WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
187 		  (void *)st->start_address, (void *)st->start_address);
188 
189 	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
190 }
191 
192 static void note_page(struct pg_state *st, unsigned long addr,
193 	       unsigned int level, u64 val)
194 {
195 	u64 flag = val & pg_level[level].mask;
196 	u64 pa = val & PTE_RPN_MASK;
197 
198 	/* At first no level is set */
199 	if (!st->level) {
200 		st->level = level;
201 		st->current_flags = flag;
202 		st->start_address = addr;
203 		st->start_pa = pa;
204 		st->last_pa = pa;
205 		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
206 	/*
207 	 * Dump the section of virtual memory when:
208 	 *   - the PTE flags from one entry to the next differs.
209 	 *   - we change levels in the tree.
210 	 *   - the address is in a different section of memory and is thus
211 	 *   used for a different purpose, regardless of the flags.
212 	 *   - the pa of this page is not adjacent to the last inspected page
213 	 */
214 	} else if (flag != st->current_flags || level != st->level ||
215 		   addr >= st->marker[1].start_address ||
216 		   (pa != st->last_pa + PAGE_SIZE &&
217 		    (pa != st->start_pa || st->start_pa != st->last_pa))) {
218 
219 		/* Check the PTE flags */
220 		if (st->current_flags) {
221 			note_prot_wx(st, addr);
222 			dump_addr(st, addr);
223 
224 			/* Dump all the flags */
225 			if (pg_level[st->level].flag)
226 				dump_flag_info(st, pg_level[st->level].flag,
227 					  st->current_flags,
228 					  pg_level[st->level].num);
229 
230 			pt_dump_seq_putc(st->seq, '\n');
231 		}
232 
233 		/*
234 		 * Address indicates we have passed the end of the
235 		 * current section of virtual memory
236 		 */
237 		while (addr >= st->marker[1].start_address) {
238 			st->marker++;
239 			pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
240 		}
241 		st->start_address = addr;
242 		st->start_pa = pa;
243 		st->last_pa = pa;
244 		st->current_flags = flag;
245 		st->level = level;
246 	} else {
247 		st->last_pa = pa;
248 	}
249 }
250 
251 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
252 {
253 	pte_t *pte = pte_offset_kernel(pmd, 0);
254 	unsigned long addr;
255 	unsigned int i;
256 
257 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
258 		addr = start + i * PAGE_SIZE;
259 		note_page(st, addr, 4, pte_val(*pte));
260 
261 	}
262 }
263 
264 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
265 {
266 	pmd_t *pmd = pmd_offset(pud, 0);
267 	unsigned long addr;
268 	unsigned int i;
269 
270 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
271 		addr = start + i * PMD_SIZE;
272 		if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
273 			/* pmd exists */
274 			walk_pte(st, pmd, addr);
275 		else
276 			note_page(st, addr, 3, pmd_val(*pmd));
277 	}
278 }
279 
280 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
281 {
282 	pud_t *pud = pud_offset(pgd, 0);
283 	unsigned long addr;
284 	unsigned int i;
285 
286 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
287 		addr = start + i * PUD_SIZE;
288 		if (!pud_none(*pud) && !pud_is_leaf(*pud))
289 			/* pud exists */
290 			walk_pmd(st, pud, addr);
291 		else
292 			note_page(st, addr, 2, pud_val(*pud));
293 	}
294 }
295 
296 static void walk_pagetables(struct pg_state *st)
297 {
298 	unsigned int i;
299 	unsigned long addr = st->start_address & PGDIR_MASK;
300 	pgd_t *pgd = pgd_offset_k(addr);
301 
302 	/*
303 	 * Traverse the linux pagetable structure and dump pages that are in
304 	 * the hash pagetable.
305 	 */
306 	for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
307 		if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
308 			/* pgd exists */
309 			walk_pud(st, pgd, addr);
310 		else
311 			note_page(st, addr, 1, pgd_val(*pgd));
312 	}
313 }
314 
315 static void populate_markers(void)
316 {
317 	int i = 0;
318 
319 	address_markers[i++].start_address = PAGE_OFFSET;
320 	address_markers[i++].start_address = VMALLOC_START;
321 	address_markers[i++].start_address = VMALLOC_END;
322 #ifdef CONFIG_PPC64
323 	address_markers[i++].start_address = ISA_IO_BASE;
324 	address_markers[i++].start_address = ISA_IO_END;
325 	address_markers[i++].start_address = PHB_IO_BASE;
326 	address_markers[i++].start_address = PHB_IO_END;
327 	address_markers[i++].start_address = IOREMAP_BASE;
328 	address_markers[i++].start_address = IOREMAP_END;
329 	/* What is the ifdef about? */
330 #ifdef CONFIG_PPC_BOOK3S_64
331 	address_markers[i++].start_address =  H_VMEMMAP_START;
332 #else
333 	address_markers[i++].start_address =  VMEMMAP_BASE;
334 #endif
335 #else /* !CONFIG_PPC64 */
336 	address_markers[i++].start_address = ioremap_bot;
337 	address_markers[i++].start_address = IOREMAP_TOP;
338 #ifdef CONFIG_HIGHMEM
339 	address_markers[i++].start_address = PKMAP_BASE;
340 	address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
341 #endif
342 	address_markers[i++].start_address = FIXADDR_START;
343 	address_markers[i++].start_address = FIXADDR_TOP;
344 #ifdef CONFIG_KASAN
345 	address_markers[i++].start_address = KASAN_SHADOW_START;
346 	address_markers[i++].start_address = KASAN_SHADOW_END;
347 #endif
348 #endif /* CONFIG_PPC64 */
349 }
350 
351 static int ptdump_show(struct seq_file *m, void *v)
352 {
353 	struct pg_state st = {
354 		.seq = m,
355 		.marker = address_markers,
356 		.start_address = PAGE_OFFSET,
357 	};
358 
359 #ifdef CONFIG_PPC64
360 	if (!radix_enabled())
361 		st.start_address = KERN_VIRT_START;
362 #endif
363 
364 	/* Traverse kernel page tables */
365 	walk_pagetables(&st);
366 	note_page(&st, 0, 0, 0);
367 	return 0;
368 }
369 
370 
371 static int ptdump_open(struct inode *inode, struct file *file)
372 {
373 	return single_open(file, ptdump_show, NULL);
374 }
375 
376 static const struct file_operations ptdump_fops = {
377 	.open		= ptdump_open,
378 	.read		= seq_read,
379 	.llseek		= seq_lseek,
380 	.release	= single_release,
381 };
382 
383 static void build_pgtable_complete_mask(void)
384 {
385 	unsigned int i, j;
386 
387 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
388 		if (pg_level[i].flag)
389 			for (j = 0; j < pg_level[i].num; j++)
390 				pg_level[i].mask |= pg_level[i].flag[j].mask;
391 }
392 
393 #ifdef CONFIG_PPC_DEBUG_WX
394 void ptdump_check_wx(void)
395 {
396 	struct pg_state st = {
397 		.seq = NULL,
398 		.marker = address_markers,
399 		.check_wx = true,
400 		.start_address = PAGE_OFFSET,
401 	};
402 
403 #ifdef CONFIG_PPC64
404 	if (!radix_enabled())
405 		st.start_address = KERN_VIRT_START;
406 #endif
407 
408 	walk_pagetables(&st);
409 
410 	if (st.wx_pages)
411 		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
412 			st.wx_pages);
413 	else
414 		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
415 }
416 #endif
417 
418 static int ptdump_init(void)
419 {
420 	populate_markers();
421 	build_pgtable_complete_mask();
422 	debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
423 			    &ptdump_fops);
424 	return 0;
425 }
426 device_initcall(ptdump_init);
427