1 /* 2 * Debug helper to dump the current kernel pagetables of the system 3 * so that we can see what the various memory ranges are set to. 4 * 5 * Derived from x86 implementation: 6 * (C) Copyright 2008 Intel Corporation 7 * 8 * Author: Arjan van de Ven <arjan@linux.intel.com> 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; version 2 13 * of the License. 14 */ 15 #include <linux/debugfs.h> 16 #include <linux/fs.h> 17 #include <linux/mm.h> 18 #include <linux/seq_file.h> 19 20 #include <asm/fixmap.h> 21 #include <asm/memory.h> 22 #include <asm/pgtable.h> 23 24 struct addr_marker { 25 unsigned long start_address; 26 const char *name; 27 }; 28 29 static struct addr_marker address_markers[] = { 30 { MODULES_VADDR, "Modules" }, 31 { PAGE_OFFSET, "Kernel Mapping" }, 32 { 0, "vmalloc() Area" }, 33 { VMALLOC_END, "vmalloc() End" }, 34 { FIXADDR_START, "Fixmap Area" }, 35 { VECTORS_BASE, "Vectors" }, 36 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, 37 { -1, NULL }, 38 }; 39 40 struct pg_state { 41 struct seq_file *seq; 42 const struct addr_marker *marker; 43 unsigned long start_address; 44 unsigned level; 45 u64 current_prot; 46 }; 47 48 struct prot_bits { 49 u64 mask; 50 u64 val; 51 const char *set; 52 const char *clear; 53 }; 54 55 static const struct prot_bits pte_bits[] = { 56 { 57 .mask = L_PTE_USER, 58 .val = L_PTE_USER, 59 .set = "USR", 60 .clear = " ", 61 }, { 62 .mask = L_PTE_RDONLY, 63 .val = L_PTE_RDONLY, 64 .set = "ro", 65 .clear = "RW", 66 }, { 67 .mask = L_PTE_XN, 68 .val = L_PTE_XN, 69 .set = "NX", 70 .clear = "x ", 71 }, { 72 .mask = L_PTE_SHARED, 73 .val = L_PTE_SHARED, 74 .set = "SHD", 75 .clear = " ", 76 }, { 77 .mask = L_PTE_MT_MASK, 78 .val = L_PTE_MT_UNCACHED, 79 .set = "SO/UNCACHED", 80 }, { 81 .mask = L_PTE_MT_MASK, 82 .val = L_PTE_MT_BUFFERABLE, 83 .set = "MEM/BUFFERABLE/WC", 84 }, { 85 .mask = L_PTE_MT_MASK, 86 .val = L_PTE_MT_WRITETHROUGH, 87 .set = "MEM/CACHED/WT", 88 }, { 89 .mask = L_PTE_MT_MASK, 90 .val = L_PTE_MT_WRITEBACK, 91 .set = "MEM/CACHED/WBRA", 92 #ifndef CONFIG_ARM_LPAE 93 }, { 94 .mask = L_PTE_MT_MASK, 95 .val = L_PTE_MT_MINICACHE, 96 .set = "MEM/MINICACHE", 97 #endif 98 }, { 99 .mask = L_PTE_MT_MASK, 100 .val = L_PTE_MT_WRITEALLOC, 101 .set = "MEM/CACHED/WBWA", 102 }, { 103 .mask = L_PTE_MT_MASK, 104 .val = L_PTE_MT_DEV_SHARED, 105 .set = "DEV/SHARED", 106 #ifndef CONFIG_ARM_LPAE 107 }, { 108 .mask = L_PTE_MT_MASK, 109 .val = L_PTE_MT_DEV_NONSHARED, 110 .set = "DEV/NONSHARED", 111 #endif 112 }, { 113 .mask = L_PTE_MT_MASK, 114 .val = L_PTE_MT_DEV_WC, 115 .set = "DEV/WC", 116 }, { 117 .mask = L_PTE_MT_MASK, 118 .val = L_PTE_MT_DEV_CACHED, 119 .set = "DEV/CACHED", 120 }, 121 }; 122 123 static const struct prot_bits section_bits[] = { 124 #ifdef CONFIG_ARM_LPAE 125 { 126 .mask = PMD_SECT_USER, 127 .val = PMD_SECT_USER, 128 .set = "USR", 129 }, { 130 .mask = L_PMD_SECT_RDONLY, 131 .val = L_PMD_SECT_RDONLY, 132 .set = "ro", 133 .clear = "RW", 134 #elif __LINUX_ARM_ARCH__ >= 6 135 { 136 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 137 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE, 138 .set = " ro", 139 }, { 140 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 141 .val = PMD_SECT_AP_WRITE, 142 .set = " RW", 143 }, { 144 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 145 .val = PMD_SECT_AP_READ, 146 .set = "USR ro", 147 }, { 148 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 149 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 150 .set = "USR RW", 151 #else /* ARMv4/ARMv5 */ 152 /* These are approximate */ 153 { 154 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 155 .val = 0, 156 .set = " ro", 157 }, { 158 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 159 .val = PMD_SECT_AP_WRITE, 160 .set = " RW", 161 }, { 162 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 163 .val = PMD_SECT_AP_READ, 164 .set = "USR ro", 165 }, { 166 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 167 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 168 .set = "USR RW", 169 #endif 170 }, { 171 .mask = PMD_SECT_XN, 172 .val = PMD_SECT_XN, 173 .set = "NX", 174 .clear = "x ", 175 }, { 176 .mask = PMD_SECT_S, 177 .val = PMD_SECT_S, 178 .set = "SHD", 179 .clear = " ", 180 }, 181 }; 182 183 struct pg_level { 184 const struct prot_bits *bits; 185 size_t num; 186 u64 mask; 187 }; 188 189 static struct pg_level pg_level[] = { 190 { 191 }, { /* pgd */ 192 }, { /* pud */ 193 }, { /* pmd */ 194 .bits = section_bits, 195 .num = ARRAY_SIZE(section_bits), 196 }, { /* pte */ 197 .bits = pte_bits, 198 .num = ARRAY_SIZE(pte_bits), 199 }, 200 }; 201 202 static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num) 203 { 204 unsigned i; 205 206 for (i = 0; i < num; i++, bits++) { 207 const char *s; 208 209 if ((st->current_prot & bits->mask) == bits->val) 210 s = bits->set; 211 else 212 s = bits->clear; 213 214 if (s) 215 seq_printf(st->seq, " %s", s); 216 } 217 } 218 219 static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val) 220 { 221 static const char units[] = "KMGTPE"; 222 u64 prot = val & pg_level[level].mask; 223 224 if (!st->level) { 225 st->level = level; 226 st->current_prot = prot; 227 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 228 } else if (prot != st->current_prot || level != st->level || 229 addr >= st->marker[1].start_address) { 230 const char *unit = units; 231 unsigned long delta; 232 233 if (st->current_prot) { 234 seq_printf(st->seq, "0x%08lx-0x%08lx ", 235 st->start_address, addr); 236 237 delta = (addr - st->start_address) >> 10; 238 while (!(delta & 1023) && unit[1]) { 239 delta >>= 10; 240 unit++; 241 } 242 seq_printf(st->seq, "%9lu%c", delta, *unit); 243 if (pg_level[st->level].bits) 244 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); 245 seq_printf(st->seq, "\n"); 246 } 247 248 if (addr >= st->marker[1].start_address) { 249 st->marker++; 250 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 251 } 252 st->start_address = addr; 253 st->current_prot = prot; 254 st->level = level; 255 } 256 } 257 258 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 259 { 260 pte_t *pte = pte_offset_kernel(pmd, 0); 261 unsigned long addr; 262 unsigned i; 263 264 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 265 addr = start + i * PAGE_SIZE; 266 note_page(st, addr, 4, pte_val(*pte)); 267 } 268 } 269 270 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 271 { 272 pmd_t *pmd = pmd_offset(pud, 0); 273 unsigned long addr; 274 unsigned i; 275 276 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 277 addr = start + i * PMD_SIZE; 278 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) 279 note_page(st, addr, 3, pmd_val(*pmd)); 280 else 281 walk_pte(st, pmd, addr); 282 283 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 284 note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); 285 } 286 } 287 288 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 289 { 290 pud_t *pud = pud_offset(pgd, 0); 291 unsigned long addr; 292 unsigned i; 293 294 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 295 addr = start + i * PUD_SIZE; 296 if (!pud_none(*pud)) { 297 walk_pmd(st, pud, addr); 298 } else { 299 note_page(st, addr, 2, pud_val(*pud)); 300 } 301 } 302 } 303 304 static void walk_pgd(struct seq_file *m) 305 { 306 pgd_t *pgd = swapper_pg_dir; 307 struct pg_state st; 308 unsigned long addr; 309 unsigned i; 310 311 memset(&st, 0, sizeof(st)); 312 st.seq = m; 313 st.marker = address_markers; 314 315 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 316 addr = i * PGDIR_SIZE; 317 if (!pgd_none(*pgd)) { 318 walk_pud(&st, pgd, addr); 319 } else { 320 note_page(&st, addr, 1, pgd_val(*pgd)); 321 } 322 } 323 324 note_page(&st, 0, 0, 0); 325 } 326 327 static int ptdump_show(struct seq_file *m, void *v) 328 { 329 walk_pgd(m); 330 return 0; 331 } 332 333 static int ptdump_open(struct inode *inode, struct file *file) 334 { 335 return single_open(file, ptdump_show, NULL); 336 } 337 338 static const struct file_operations ptdump_fops = { 339 .open = ptdump_open, 340 .read = seq_read, 341 .llseek = seq_lseek, 342 .release = single_release, 343 }; 344 345 static int ptdump_init(void) 346 { 347 struct dentry *pe; 348 unsigned i, j; 349 350 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 351 if (pg_level[i].bits) 352 for (j = 0; j < pg_level[i].num; j++) 353 pg_level[i].mask |= pg_level[i].bits[j].mask; 354 355 address_markers[2].start_address = VMALLOC_START; 356 357 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, 358 &ptdump_fops); 359 return pe ? 0 : -ENOMEM; 360 } 361 __initcall(ptdump_init); 362