xref: /linux/arch/powerpc/mm/ptdump/ptdump.c (revision 9052e9c95d908d6c3d7570aadc8898e1d871c8bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2016, Rashmica Gupta, IBM Corp.
4  *
5  * This traverses the kernel pagetables and dumps the
6  * information about the used sections of memory to
7  * /sys/kernel/debug/kernel_pagetables.
8  *
9  * Derived from the arm64 implementation:
10  * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
11  * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
12  */
13 #include <linux/debugfs.h>
14 #include <linux/fs.h>
15 #include <linux/hugetlb.h>
16 #include <linux/io.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
19 #include <linux/ptdump.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <asm/fixmap.h>
23 #include <linux/const.h>
24 #include <asm/page.h>
25 #include <asm/hugetlb.h>
26 
27 #include <mm/mmu_decl.h>
28 
29 #include "ptdump.h"
30 
31 /*
32  * To visualise what is happening,
33  *
34  *  - PTRS_PER_P** = how many entries there are in the corresponding P**
35  *  - P**_SHIFT = how many bits of the address we use to index into the
36  * corresponding P**
37  *  - P**_SIZE is how much memory we can access through the table - not the
38  * size of the table itself.
39  * P**={PGD, PUD, PMD, PTE}
40  *
41  *
42  * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
43  * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
44  * a page.
45  *
46  * In the case where there are only 3 levels, the PUD is folded into the
47  * PGD: every PUD has only one entry which points to the PMD.
48  *
49  * The page dumper groups page table entries of the same type into a single
50  * description. It uses pg_state to track the range information while
51  * iterating over the PTE entries. When the continuity is broken it then
52  * dumps out a description of the range - ie PTEs that are virtually contiguous
53  * with the same PTE flags are chunked together. This is to make it clear how
54  * different areas of the kernel virtual memory are used.
55  *
56  */
57 struct pg_state {
58 	struct ptdump_state ptdump;
59 	struct seq_file *seq;
60 	const struct addr_marker *marker;
61 	unsigned long start_address;
62 	unsigned long start_pa;
63 	int level;
64 	u64 current_flags;
65 	bool check_wx;
66 	unsigned long wx_pages;
67 };
68 
69 struct addr_marker {
70 	unsigned long start_address;
71 	const char *name;
72 };
73 
74 static struct addr_marker address_markers[] = {
75 	{ 0,	"Start of kernel VM" },
76 #ifdef MODULES_VADDR
77 	{ 0,	"modules start" },
78 	{ 0,	"modules end" },
79 #endif
80 	{ 0,	"vmalloc() Area" },
81 	{ 0,	"vmalloc() End" },
82 #ifdef CONFIG_PPC64
83 	{ 0,	"isa I/O start" },
84 	{ 0,	"isa I/O end" },
85 	{ 0,	"phb I/O start" },
86 	{ 0,	"phb I/O end" },
87 	{ 0,	"I/O remap start" },
88 	{ 0,	"I/O remap end" },
89 	{ 0,	"vmemmap start" },
90 #else
91 	{ 0,	"Early I/O remap start" },
92 	{ 0,	"Early I/O remap end" },
93 #ifdef CONFIG_HIGHMEM
94 	{ 0,	"Highmem PTEs start" },
95 	{ 0,	"Highmem PTEs end" },
96 #endif
97 	{ 0,	"Fixmap start" },
98 	{ 0,	"Fixmap end" },
99 #endif
100 #ifdef CONFIG_KASAN
101 	{ 0,	"kasan shadow mem start" },
102 	{ 0,	"kasan shadow mem end" },
103 #endif
104 	{ -1,	NULL },
105 };
106 
107 static struct ptdump_range ptdump_range[] __ro_after_init = {
108 	{TASK_SIZE_MAX, ~0UL},
109 	{0, 0}
110 };
111 
112 #define pt_dump_seq_printf(m, fmt, args...)	\
113 ({						\
114 	if (m)					\
115 		seq_printf(m, fmt, ##args);	\
116 })
117 
118 #define pt_dump_seq_putc(m, c)		\
119 ({					\
120 	if (m)				\
121 		seq_putc(m, c);		\
122 })
123 
124 void pt_dump_size(struct seq_file *m, unsigned long size)
125 {
126 	static const char units[] = "KMGTPE";
127 	const char *unit = units;
128 
129 	/* Work out what appropriate unit to use */
130 	while (!(size & 1023) && unit[1]) {
131 		size >>= 10;
132 		unit++;
133 	}
134 	pt_dump_seq_printf(m, "%9lu%c ", size, *unit);
135 }
136 
137 static void dump_flag_info(struct pg_state *st, const struct flag_info
138 		*flag, u64 pte, int num)
139 {
140 	unsigned int i;
141 
142 	for (i = 0; i < num; i++, flag++) {
143 		const char *s = NULL;
144 		u64 val;
145 
146 		/* flag not defined so don't check it */
147 		if (flag->mask == 0)
148 			continue;
149 		/* Some 'flags' are actually values */
150 		if (flag->is_val) {
151 			val = pte & flag->val;
152 			if (flag->shift)
153 				val = val >> flag->shift;
154 			pt_dump_seq_printf(st->seq, "  %s:%llx", flag->set, val);
155 		} else {
156 			if ((pte & flag->mask) == flag->val)
157 				s = flag->set;
158 			else
159 				s = flag->clear;
160 			if (s)
161 				pt_dump_seq_printf(st->seq, "  %s", s);
162 		}
163 		st->current_flags &= ~flag->mask;
164 	}
165 	if (st->current_flags != 0)
166 		pt_dump_seq_printf(st->seq, "  unknown flags:%llx", st->current_flags);
167 }
168 
169 static void dump_addr(struct pg_state *st, unsigned long addr)
170 {
171 #ifdef CONFIG_PPC64
172 #define REG		"0x%016lx"
173 #else
174 #define REG		"0x%08lx"
175 #endif
176 
177 	pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
178 	pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
179 	pt_dump_size(st->seq, (addr - st->start_address) >> 10);
180 }
181 
182 static void note_prot_wx(struct pg_state *st, unsigned long addr)
183 {
184 	pte_t pte = __pte(st->current_flags);
185 
186 	if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
187 		return;
188 
189 	if (!pte_write(pte) || !pte_exec(pte))
190 		return;
191 
192 	WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
193 		  (void *)st->start_address, (void *)st->start_address);
194 
195 	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
196 }
197 
198 static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val)
199 {
200 	u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
201 	u64 pa = val & PTE_RPN_MASK;
202 
203 	st->level = level;
204 	st->current_flags = flag;
205 	st->start_address = addr;
206 	st->start_pa = pa;
207 
208 	while (addr >= st->marker[1].start_address) {
209 		st->marker++;
210 		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
211 	}
212 }
213 
214 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
215 {
216 	u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
217 	struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
218 
219 	/* At first no level is set */
220 	if (st->level == -1) {
221 		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
222 		note_page_update_state(st, addr, level, val);
223 	/*
224 	 * Dump the section of virtual memory when:
225 	 *   - the PTE flags from one entry to the next differs.
226 	 *   - we change levels in the tree.
227 	 *   - the address is in a different section of memory and is thus
228 	 *   used for a different purpose, regardless of the flags.
229 	 */
230 	} else if (flag != st->current_flags || level != st->level ||
231 		   addr >= st->marker[1].start_address) {
232 
233 		/* Check the PTE flags */
234 		if (st->current_flags) {
235 			note_prot_wx(st, addr);
236 			dump_addr(st, addr);
237 
238 			/* Dump all the flags */
239 			if (pg_level[st->level].flag)
240 				dump_flag_info(st, pg_level[st->level].flag,
241 					  st->current_flags,
242 					  pg_level[st->level].num);
243 
244 			pt_dump_seq_putc(st->seq, '\n');
245 		}
246 
247 		/*
248 		 * Address indicates we have passed the end of the
249 		 * current section of virtual memory
250 		 */
251 		note_page_update_state(st, addr, level, val);
252 	}
253 }
254 
255 static void populate_markers(void)
256 {
257 	int i = 0;
258 
259 #ifdef CONFIG_PPC64
260 	address_markers[i++].start_address = PAGE_OFFSET;
261 #else
262 	address_markers[i++].start_address = TASK_SIZE;
263 #endif
264 #ifdef MODULES_VADDR
265 	address_markers[i++].start_address = MODULES_VADDR;
266 	address_markers[i++].start_address = MODULES_END;
267 #endif
268 	address_markers[i++].start_address = VMALLOC_START;
269 	address_markers[i++].start_address = VMALLOC_END;
270 #ifdef CONFIG_PPC64
271 	address_markers[i++].start_address = ISA_IO_BASE;
272 	address_markers[i++].start_address = ISA_IO_END;
273 	address_markers[i++].start_address = PHB_IO_BASE;
274 	address_markers[i++].start_address = PHB_IO_END;
275 	address_markers[i++].start_address = IOREMAP_BASE;
276 	address_markers[i++].start_address = IOREMAP_END;
277 	/* What is the ifdef about? */
278 #ifdef CONFIG_PPC_BOOK3S_64
279 	address_markers[i++].start_address =  H_VMEMMAP_START;
280 #else
281 	address_markers[i++].start_address =  VMEMMAP_BASE;
282 #endif
283 #else /* !CONFIG_PPC64 */
284 	address_markers[i++].start_address = ioremap_bot;
285 	address_markers[i++].start_address = IOREMAP_TOP;
286 #ifdef CONFIG_HIGHMEM
287 	address_markers[i++].start_address = PKMAP_BASE;
288 	address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
289 #endif
290 	address_markers[i++].start_address = FIXADDR_START;
291 	address_markers[i++].start_address = FIXADDR_TOP;
292 #ifdef CONFIG_KASAN
293 	address_markers[i++].start_address = KASAN_SHADOW_START;
294 	address_markers[i++].start_address = KASAN_SHADOW_END;
295 #endif
296 #endif /* CONFIG_PPC64 */
297 }
298 
299 static int ptdump_show(struct seq_file *m, void *v)
300 {
301 	struct pg_state st = {
302 		.seq = m,
303 		.marker = address_markers,
304 		.level = -1,
305 		.ptdump = {
306 			.note_page = note_page,
307 			.range = ptdump_range,
308 		}
309 	};
310 
311 	/* Traverse kernel page tables */
312 	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
313 	return 0;
314 }
315 
316 DEFINE_SHOW_ATTRIBUTE(ptdump);
317 
318 static void build_pgtable_complete_mask(void)
319 {
320 	unsigned int i, j;
321 
322 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
323 		if (pg_level[i].flag)
324 			for (j = 0; j < pg_level[i].num; j++)
325 				pg_level[i].mask |= pg_level[i].flag[j].mask;
326 }
327 
328 #ifdef CONFIG_DEBUG_WX
329 void ptdump_check_wx(void)
330 {
331 	struct pg_state st = {
332 		.seq = NULL,
333 		.marker = (struct addr_marker[]) {
334 			{ 0, NULL},
335 			{ -1, NULL},
336 		},
337 		.level = -1,
338 		.check_wx = true,
339 		.ptdump = {
340 			.note_page = note_page,
341 			.range = ptdump_range,
342 		}
343 	};
344 
345 	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
346 
347 	if (st.wx_pages)
348 		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
349 			st.wx_pages);
350 	else
351 		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
352 }
353 #endif
354 
355 static int __init ptdump_init(void)
356 {
357 #ifdef CONFIG_PPC64
358 	if (!radix_enabled())
359 		ptdump_range[0].start = KERN_VIRT_START;
360 	else
361 		ptdump_range[0].start = PAGE_OFFSET;
362 
363 	ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD);
364 #endif
365 
366 	populate_markers();
367 	build_pgtable_complete_mask();
368 
369 	if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS))
370 		debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
371 
372 	return 0;
373 }
374 device_initcall(ptdump_init);
375