1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/cpufeature.h> 4 #include <linux/set_memory.h> 5 #include <linux/ptdump.h> 6 #include <linux/seq_file.h> 7 #include <linux/debugfs.h> 8 #include <linux/sort.h> 9 #include <linux/mm.h> 10 #include <linux/kfence.h> 11 #include <linux/kasan.h> 12 #include <asm/kasan.h> 13 #include <asm/abs_lowcore.h> 14 #include <asm/nospec-branch.h> 15 #include <asm/sections.h> 16 #include <asm/maccess.h> 17 18 static unsigned long max_addr; 19 20 struct addr_marker { 21 int is_start; 22 unsigned long start_address; 23 unsigned long size; 24 const char *name; 25 }; 26 27 static struct addr_marker *markers; 28 static unsigned int markers_cnt; 29 30 struct pg_state { 31 struct ptdump_state ptdump; 32 struct seq_file *seq; 33 int level; 34 unsigned int current_prot; 35 bool check_wx; 36 unsigned long wx_pages; 37 unsigned long start_address; 38 const struct addr_marker *marker; 39 }; 40 41 #define pt_dump_seq_printf(m, fmt, args...) \ 42 ({ \ 43 struct seq_file *__m = (m); \ 44 \ 45 if (__m) \ 46 seq_printf(__m, fmt, ##args); \ 47 }) 48 49 #define pt_dump_seq_puts(m, fmt) \ 50 ({ \ 51 struct seq_file *__m = (m); \ 52 \ 53 if (__m) \ 54 seq_printf(__m, fmt); \ 55 }) 56 57 static void print_prot(struct seq_file *m, unsigned int pr, int level) 58 { 59 static const char * const level_name[] = 60 { "ASCE", "PGD", "PUD", "PMD", "PTE" }; 61 62 pt_dump_seq_printf(m, "%s ", level_name[level]); 63 if (pr & _PAGE_INVALID) { 64 pt_dump_seq_printf(m, "I\n"); 65 return; 66 } 67 pt_dump_seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW "); 68 pt_dump_seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n"); 69 } 70 71 static void note_prot_wx(struct pg_state *st, unsigned long addr) 72 { 73 if (!st->check_wx) 74 return; 75 if (st->current_prot & _PAGE_INVALID) 76 return; 77 if (st->current_prot & _PAGE_PROTECT) 78 return; 79 if (st->current_prot & _PAGE_NOEXEC) 80 return; 81 /* 82 * The first lowcore page is W+X if spectre mitigations are using 83 * trampolines or the BEAR enhancements facility is not installed, 84 * in which case we have two lpswe instructions in lowcore that need 85 * to be executable. 86 */ 87 if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !cpu_has_bear())) 88 return; 89 WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX), 90 "s390/mm: Found insecure W+X mapping at address %pS\n", 91 (void *)st->start_address); 92 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 93 } 94 95 static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level) 96 { 97 struct seq_file *m = st->seq; 98 99 while (addr >= st->marker[1].start_address) { 100 st->marker++; 101 pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name, 102 st->marker->is_start ? "Start" : "End"); 103 } 104 st->start_address = addr; 105 st->current_prot = prot; 106 st->level = level; 107 } 108 109 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) 110 { 111 int width = sizeof(unsigned long) * 2; 112 static const char units[] = "KMGTPE"; 113 const char *unit = units; 114 unsigned long delta; 115 struct pg_state *st; 116 struct seq_file *m; 117 unsigned int prot; 118 119 st = container_of(pt_st, struct pg_state, ptdump); 120 m = st->seq; 121 prot = val & (_PAGE_PROTECT | _PAGE_NOEXEC); 122 if (level == 4 && (val & _PAGE_INVALID)) 123 prot = _PAGE_INVALID; 124 /* For pmd_none() & friends val gets passed as zero. */ 125 if (level != 4 && !val) 126 prot = _PAGE_INVALID; 127 /* Final flush from generic code. */ 128 if (level == -1) 129 addr = max_addr; 130 if (st->level == -1) { 131 pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n"); 132 note_page_update_state(st, addr, prot, level); 133 } else if (prot != st->current_prot || level != st->level || 134 addr >= st->marker[1].start_address) { 135 note_prot_wx(st, addr); 136 pt_dump_seq_printf(m, "0x%0*lx-0x%0*lx ", 137 width, st->start_address, 138 width, addr); 139 delta = (addr - st->start_address) >> 10; 140 while (!(delta & 0x3ff) && unit[1]) { 141 delta >>= 10; 142 unit++; 143 } 144 pt_dump_seq_printf(m, "%9lu%c ", delta, *unit); 145 print_prot(m, st->current_prot, st->level); 146 note_page_update_state(st, addr, prot, level); 147 } 148 } 149 150 static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 151 { 152 note_page(pt_st, addr, 4, pte_val(pte)); 153 } 154 155 static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 156 { 157 note_page(pt_st, addr, 3, pmd_val(pmd)); 158 } 159 160 static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 161 { 162 note_page(pt_st, addr, 2, pud_val(pud)); 163 } 164 165 static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 166 { 167 note_page(pt_st, addr, 1, p4d_val(p4d)); 168 } 169 170 static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 171 { 172 note_page(pt_st, addr, 0, pgd_val(pgd)); 173 } 174 175 static void note_page_flush(struct ptdump_state *pt_st) 176 { 177 pte_t pte_zero = {0}; 178 179 note_page(pt_st, 0, -1, pte_val(pte_zero)); 180 } 181 182 bool ptdump_check_wx(void) 183 { 184 struct pg_state st = { 185 .ptdump = { 186 .note_page_pte = note_page_pte, 187 .note_page_pmd = note_page_pmd, 188 .note_page_pud = note_page_pud, 189 .note_page_p4d = note_page_p4d, 190 .note_page_pgd = note_page_pgd, 191 .note_page_flush = note_page_flush, 192 .range = (struct ptdump_range[]) { 193 {.start = 0, .end = max_addr}, 194 {.start = 0, .end = 0}, 195 } 196 }, 197 .seq = NULL, 198 .level = -1, 199 .current_prot = 0, 200 .check_wx = true, 201 .wx_pages = 0, 202 .start_address = 0, 203 .marker = (struct addr_marker[]) { 204 { .start_address = 0, .name = NULL}, 205 { .start_address = -1, .name = NULL}, 206 }, 207 }; 208 209 if (!cpu_has_nx()) 210 return true; 211 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); 212 if (st.wx_pages) { 213 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages); 214 215 return false; 216 } else { 217 pr_info("Checked W+X mappings: passed, no %sW+X pages found\n", 218 (nospec_uses_trampoline() || !cpu_has_bear()) ? 219 "unexpected " : ""); 220 221 return true; 222 } 223 } 224 225 #ifdef CONFIG_PTDUMP_DEBUGFS 226 static int ptdump_show(struct seq_file *m, void *v) 227 { 228 struct pg_state st = { 229 .ptdump = { 230 .note_page_pte = note_page_pte, 231 .note_page_pmd = note_page_pmd, 232 .note_page_pud = note_page_pud, 233 .note_page_p4d = note_page_p4d, 234 .note_page_pgd = note_page_pgd, 235 .note_page_flush = note_page_flush, 236 .range = (struct ptdump_range[]) { 237 {.start = 0, .end = max_addr}, 238 {.start = 0, .end = 0}, 239 } 240 }, 241 .seq = m, 242 .level = -1, 243 .current_prot = 0, 244 .check_wx = false, 245 .wx_pages = 0, 246 .start_address = 0, 247 .marker = markers, 248 }; 249 250 get_online_mems(); 251 mutex_lock(&cpa_mutex); 252 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); 253 mutex_unlock(&cpa_mutex); 254 put_online_mems(); 255 return 0; 256 } 257 DEFINE_SHOW_ATTRIBUTE(ptdump); 258 #endif /* CONFIG_PTDUMP_DEBUGFS */ 259 260 static int ptdump_cmp(const void *a, const void *b) 261 { 262 const struct addr_marker *ama = a; 263 const struct addr_marker *amb = b; 264 265 if (ama->start_address > amb->start_address) 266 return 1; 267 if (ama->start_address < amb->start_address) 268 return -1; 269 /* 270 * If the start addresses of two markers are identical sort markers in an 271 * order that considers areas contained within other areas correctly. 272 */ 273 if (ama->is_start && amb->is_start) { 274 if (ama->size > amb->size) 275 return -1; 276 if (ama->size < amb->size) 277 return 1; 278 return 0; 279 } 280 if (!ama->is_start && !amb->is_start) { 281 if (ama->size > amb->size) 282 return 1; 283 if (ama->size < amb->size) 284 return -1; 285 return 0; 286 } 287 if (ama->is_start) 288 return 1; 289 if (amb->is_start) 290 return -1; 291 return 0; 292 } 293 294 static int add_marker(unsigned long start, unsigned long end, const char *name) 295 { 296 size_t oldsize, newsize; 297 298 oldsize = markers_cnt * sizeof(*markers); 299 newsize = oldsize + 2 * sizeof(*markers); 300 if (!oldsize) 301 markers = kvmalloc(newsize, GFP_KERNEL); 302 else 303 markers = kvrealloc(markers, newsize, GFP_KERNEL); 304 if (!markers) 305 goto error; 306 markers[markers_cnt].is_start = 1; 307 markers[markers_cnt].start_address = start; 308 markers[markers_cnt].size = end - start; 309 markers[markers_cnt].name = name; 310 markers_cnt++; 311 markers[markers_cnt].is_start = 0; 312 markers[markers_cnt].start_address = end; 313 markers[markers_cnt].size = end - start; 314 markers[markers_cnt].name = name; 315 markers_cnt++; 316 return 0; 317 error: 318 markers_cnt = 0; 319 return -ENOMEM; 320 } 321 322 static int pt_dump_init(void) 323 { 324 #ifdef CONFIG_KFENCE 325 unsigned long kfence_start = (unsigned long)__kfence_pool; 326 #endif 327 unsigned long lowcore = (unsigned long)get_lowcore(); 328 int rc; 329 330 /* 331 * Figure out the maximum virtual address being accessible with the 332 * kernel ASCE. We need this to keep the page table walker functions 333 * from accessing non-existent entries. 334 */ 335 max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; 336 max_addr = 1UL << (max_addr * 11 + 31); 337 /* start + end markers - must be added first */ 338 rc = add_marker(0, -1UL, NULL); 339 rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image"); 340 rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore"); 341 rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping"); 342 rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area"); 343 rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area"); 344 rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area"); 345 rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area"); 346 rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area"); 347 rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area"); 348 #ifdef CONFIG_KFENCE 349 rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool"); 350 #endif 351 #ifdef CONFIG_KMSAN 352 rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow"); 353 rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins"); 354 rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow"); 355 rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins"); 356 #endif 357 #ifdef CONFIG_KASAN 358 rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow"); 359 #endif 360 if (rc) 361 goto error; 362 sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL); 363 #ifdef CONFIG_PTDUMP_DEBUGFS 364 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 365 #endif /* CONFIG_PTDUMP_DEBUGFS */ 366 return 0; 367 error: 368 kvfree(markers); 369 return -ENOMEM; 370 } 371 device_initcall(pt_dump_init); 372