1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2016, Rashmica Gupta, IBM Corp. 4 * 5 * This traverses the kernel pagetables and dumps the 6 * information about the used sections of memory to 7 * /sys/kernel/debug/kernel_pagetables. 8 * 9 * Derived from the arm64 implementation: 10 * Copyright (c) 2014, The Linux Foundation, Laura Abbott. 11 * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. 12 */ 13 #include <linux/debugfs.h> 14 #include <linux/fs.h> 15 #include <linux/hugetlb.h> 16 #include <linux/io.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <asm/fixmap.h> 22 #include <linux/const.h> 23 #include <asm/page.h> 24 #include <asm/hugetlb.h> 25 26 #include <mm/mmu_decl.h> 27 28 #include "ptdump.h" 29 30 /* 31 * To visualise what is happening, 32 * 33 * - PTRS_PER_P** = how many entries there are in the corresponding P** 34 * - P**_SHIFT = how many bits of the address we use to index into the 35 * corresponding P** 36 * - P**_SIZE is how much memory we can access through the table - not the 37 * size of the table itself. 38 * P**={PGD, PUD, PMD, PTE} 39 * 40 * 41 * Each entry of the PGD points to a PUD. Each entry of a PUD points to a 42 * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to 43 * a page. 44 * 45 * In the case where there are only 3 levels, the PUD is folded into the 46 * PGD: every PUD has only one entry which points to the PMD. 47 * 48 * The page dumper groups page table entries of the same type into a single 49 * description. It uses pg_state to track the range information while 50 * iterating over the PTE entries. When the continuity is broken it then 51 * dumps out a description of the range - ie PTEs that are virtually contiguous 52 * with the same PTE flags are chunked together. This is to make it clear how 53 * different areas of the kernel virtual memory are used. 54 * 55 */ 56 struct pg_state { 57 struct seq_file *seq; 58 const struct addr_marker *marker; 59 unsigned long start_address; 60 unsigned long start_pa; 61 unsigned long last_pa; 62 unsigned long page_size; 63 unsigned int level; 64 u64 current_flags; 65 bool check_wx; 66 unsigned long wx_pages; 67 }; 68 69 struct addr_marker { 70 unsigned long start_address; 71 const char *name; 72 }; 73 74 static struct addr_marker address_markers[] = { 75 { 0, "Start of kernel VM" }, 76 { 0, "vmalloc() Area" }, 77 { 0, "vmalloc() End" }, 78 #ifdef CONFIG_PPC64 79 { 0, "isa I/O start" }, 80 { 0, "isa I/O end" }, 81 { 0, "phb I/O start" }, 82 { 0, "phb I/O end" }, 83 { 0, "I/O remap start" }, 84 { 0, "I/O remap end" }, 85 { 0, "vmemmap start" }, 86 #else 87 { 0, "Early I/O remap start" }, 88 { 0, "Early I/O remap end" }, 89 #ifdef CONFIG_HIGHMEM 90 { 0, "Highmem PTEs start" }, 91 { 0, "Highmem PTEs end" }, 92 #endif 93 { 0, "Fixmap start" }, 94 { 0, "Fixmap end" }, 95 #endif 96 #ifdef CONFIG_KASAN 97 { 0, "kasan shadow mem start" }, 98 { 0, "kasan shadow mem end" }, 99 #endif 100 { -1, NULL }, 101 }; 102 103 #define pt_dump_seq_printf(m, fmt, args...) \ 104 ({ \ 105 if (m) \ 106 seq_printf(m, fmt, ##args); \ 107 }) 108 109 #define pt_dump_seq_putc(m, c) \ 110 ({ \ 111 if (m) \ 112 seq_putc(m, c); \ 113 }) 114 115 void pt_dump_size(struct seq_file *m, unsigned long size) 116 { 117 static const char units[] = "KMGTPE"; 118 const char *unit = units; 119 120 /* Work out what appropriate unit to use */ 121 while (!(size & 1023) && unit[1]) { 122 size >>= 10; 123 unit++; 124 } 125 pt_dump_seq_printf(m, "%9lu%c ", size, *unit); 126 } 127 128 static void dump_flag_info(struct pg_state *st, const struct flag_info 129 *flag, u64 pte, int num) 130 { 131 unsigned int i; 132 133 for (i = 0; i < num; i++, flag++) { 134 const char *s = NULL; 135 u64 val; 136 137 /* flag not defined so don't check it */ 138 if (flag->mask == 0) 139 continue; 140 /* Some 'flags' are actually values */ 141 if (flag->is_val) { 142 val = pte & flag->val; 143 if (flag->shift) 144 val = val >> flag->shift; 145 pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val); 146 } else { 147 if ((pte & flag->mask) == flag->val) 148 s = flag->set; 149 else 150 s = flag->clear; 151 if (s) 152 pt_dump_seq_printf(st->seq, " %s", s); 153 } 154 st->current_flags &= ~flag->mask; 155 } 156 if (st->current_flags != 0) 157 pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags); 158 } 159 160 static void dump_addr(struct pg_state *st, unsigned long addr) 161 { 162 unsigned long delta; 163 164 #ifdef CONFIG_PPC64 165 #define REG "0x%016lx" 166 #else 167 #define REG "0x%08lx" 168 #endif 169 170 pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); 171 if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) { 172 pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); 173 delta = st->page_size >> 10; 174 } else { 175 pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); 176 delta = (addr - st->start_address) >> 10; 177 } 178 pt_dump_size(st->seq, delta); 179 } 180 181 static void note_prot_wx(struct pg_state *st, unsigned long addr) 182 { 183 pte_t pte = __pte(st->current_flags); 184 185 if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) 186 return; 187 188 if (!pte_write(pte) || !pte_exec(pte)) 189 return; 190 191 WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", 192 (void *)st->start_address, (void *)st->start_address); 193 194 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 195 } 196 197 static void note_page(struct pg_state *st, unsigned long addr, 198 unsigned int level, u64 val, unsigned long page_size) 199 { 200 u64 flag = val & pg_level[level].mask; 201 u64 pa = val & PTE_RPN_MASK; 202 203 /* At first no level is set */ 204 if (!st->level) { 205 st->level = level; 206 st->current_flags = flag; 207 st->start_address = addr; 208 st->start_pa = pa; 209 st->last_pa = pa; 210 st->page_size = page_size; 211 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 212 /* 213 * Dump the section of virtual memory when: 214 * - the PTE flags from one entry to the next differs. 215 * - we change levels in the tree. 216 * - the address is in a different section of memory and is thus 217 * used for a different purpose, regardless of the flags. 218 * - the pa of this page is not adjacent to the last inspected page 219 */ 220 } else if (flag != st->current_flags || level != st->level || 221 addr >= st->marker[1].start_address || 222 (pa != st->last_pa + st->page_size && 223 (pa != st->start_pa || st->start_pa != st->last_pa))) { 224 225 /* Check the PTE flags */ 226 if (st->current_flags) { 227 note_prot_wx(st, addr); 228 dump_addr(st, addr); 229 230 /* Dump all the flags */ 231 if (pg_level[st->level].flag) 232 dump_flag_info(st, pg_level[st->level].flag, 233 st->current_flags, 234 pg_level[st->level].num); 235 236 pt_dump_seq_putc(st->seq, '\n'); 237 } 238 239 /* 240 * Address indicates we have passed the end of the 241 * current section of virtual memory 242 */ 243 while (addr >= st->marker[1].start_address) { 244 st->marker++; 245 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 246 } 247 st->start_address = addr; 248 st->start_pa = pa; 249 st->last_pa = pa; 250 st->page_size = page_size; 251 st->current_flags = flag; 252 st->level = level; 253 } else { 254 st->last_pa = pa; 255 } 256 } 257 258 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 259 { 260 pte_t *pte = pte_offset_kernel(pmd, 0); 261 unsigned long addr; 262 unsigned int i; 263 264 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 265 addr = start + i * PAGE_SIZE; 266 note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE); 267 268 } 269 } 270 271 static void walk_hugepd(struct pg_state *st, hugepd_t *phpd, unsigned long start, 272 int pdshift, int level) 273 { 274 #ifdef CONFIG_ARCH_HAS_HUGEPD 275 unsigned int i; 276 int shift = hugepd_shift(*phpd); 277 int ptrs_per_hpd = pdshift - shift > 0 ? 1 << (pdshift - shift) : 1; 278 279 if (start & ((1 << shift) - 1)) 280 return; 281 282 for (i = 0; i < ptrs_per_hpd; i++) { 283 unsigned long addr = start + (i << shift); 284 pte_t *pte = hugepte_offset(*phpd, addr, pdshift); 285 286 note_page(st, addr, level + 1, pte_val(*pte), 1 << shift); 287 } 288 #endif 289 } 290 291 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 292 { 293 pmd_t *pmd = pmd_offset(pud, 0); 294 unsigned long addr; 295 unsigned int i; 296 297 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 298 addr = start + i * PMD_SIZE; 299 if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd)) 300 /* pmd exists */ 301 walk_pte(st, pmd, addr); 302 else 303 note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); 304 } 305 } 306 307 static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start) 308 { 309 pud_t *pud = pud_offset(p4d, 0); 310 unsigned long addr; 311 unsigned int i; 312 313 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 314 addr = start + i * PUD_SIZE; 315 if (!pud_none(*pud) && !pud_is_leaf(*pud)) 316 /* pud exists */ 317 walk_pmd(st, pud, addr); 318 else 319 note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); 320 } 321 } 322 323 static void walk_pagetables(struct pg_state *st) 324 { 325 unsigned int i; 326 unsigned long addr = st->start_address & PGDIR_MASK; 327 pgd_t *pgd = pgd_offset_k(addr); 328 329 /* 330 * Traverse the linux pagetable structure and dump pages that are in 331 * the hash pagetable. 332 */ 333 for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { 334 p4d_t *p4d = p4d_offset(pgd, 0); 335 336 if (p4d_none(*p4d) || p4d_is_leaf(*p4d)) 337 note_page(st, addr, 1, p4d_val(*p4d), PGDIR_SIZE); 338 else if (is_hugepd(__hugepd(p4d_val(*p4d)))) 339 walk_hugepd(st, (hugepd_t *)p4d, addr, PGDIR_SHIFT, 1); 340 else 341 /* p4d exists */ 342 walk_pud(st, p4d, addr); 343 } 344 } 345 346 static void populate_markers(void) 347 { 348 int i = 0; 349 350 address_markers[i++].start_address = PAGE_OFFSET; 351 address_markers[i++].start_address = VMALLOC_START; 352 address_markers[i++].start_address = VMALLOC_END; 353 #ifdef CONFIG_PPC64 354 address_markers[i++].start_address = ISA_IO_BASE; 355 address_markers[i++].start_address = ISA_IO_END; 356 address_markers[i++].start_address = PHB_IO_BASE; 357 address_markers[i++].start_address = PHB_IO_END; 358 address_markers[i++].start_address = IOREMAP_BASE; 359 address_markers[i++].start_address = IOREMAP_END; 360 /* What is the ifdef about? */ 361 #ifdef CONFIG_PPC_BOOK3S_64 362 address_markers[i++].start_address = H_VMEMMAP_START; 363 #else 364 address_markers[i++].start_address = VMEMMAP_BASE; 365 #endif 366 #else /* !CONFIG_PPC64 */ 367 address_markers[i++].start_address = ioremap_bot; 368 address_markers[i++].start_address = IOREMAP_TOP; 369 #ifdef CONFIG_HIGHMEM 370 address_markers[i++].start_address = PKMAP_BASE; 371 address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); 372 #endif 373 address_markers[i++].start_address = FIXADDR_START; 374 address_markers[i++].start_address = FIXADDR_TOP; 375 #ifdef CONFIG_KASAN 376 address_markers[i++].start_address = KASAN_SHADOW_START; 377 address_markers[i++].start_address = KASAN_SHADOW_END; 378 #endif 379 #endif /* CONFIG_PPC64 */ 380 } 381 382 static int ptdump_show(struct seq_file *m, void *v) 383 { 384 struct pg_state st = { 385 .seq = m, 386 .marker = address_markers, 387 .start_address = PAGE_OFFSET, 388 }; 389 390 #ifdef CONFIG_PPC64 391 if (!radix_enabled()) 392 st.start_address = KERN_VIRT_START; 393 #endif 394 395 /* Traverse kernel page tables */ 396 walk_pagetables(&st); 397 note_page(&st, 0, 0, 0, 0); 398 return 0; 399 } 400 401 402 static int ptdump_open(struct inode *inode, struct file *file) 403 { 404 return single_open(file, ptdump_show, NULL); 405 } 406 407 static const struct file_operations ptdump_fops = { 408 .open = ptdump_open, 409 .read = seq_read, 410 .llseek = seq_lseek, 411 .release = single_release, 412 }; 413 414 static void build_pgtable_complete_mask(void) 415 { 416 unsigned int i, j; 417 418 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 419 if (pg_level[i].flag) 420 for (j = 0; j < pg_level[i].num; j++) 421 pg_level[i].mask |= pg_level[i].flag[j].mask; 422 } 423 424 #ifdef CONFIG_PPC_DEBUG_WX 425 void ptdump_check_wx(void) 426 { 427 struct pg_state st = { 428 .seq = NULL, 429 .marker = address_markers, 430 .check_wx = true, 431 .start_address = PAGE_OFFSET, 432 }; 433 434 #ifdef CONFIG_PPC64 435 if (!radix_enabled()) 436 st.start_address = KERN_VIRT_START; 437 #endif 438 439 walk_pagetables(&st); 440 441 if (st.wx_pages) 442 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", 443 st.wx_pages); 444 else 445 pr_info("Checked W+X mappings: passed, no W+X pages found\n"); 446 } 447 #endif 448 449 static int ptdump_init(void) 450 { 451 populate_markers(); 452 build_pgtable_complete_mask(); 453 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, 454 &ptdump_fops); 455 return 0; 456 } 457 device_initcall(ptdump_init); 458