1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Debug helper to dump the current kernel pagetables of the system 4 * so that we can see what the various memory ranges are set to. 5 * 6 * (C) Copyright 2008 Intel Corporation 7 * 8 * Author: Arjan van de Ven <arjan@linux.intel.com> 9 */ 10 11 #include <linux/debugfs.h> 12 #include <linux/kasan.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/sched.h> 16 #include <linux/seq_file.h> 17 #include <linux/highmem.h> 18 #include <linux/pci.h> 19 20 #include <asm/e820/types.h> 21 #include <asm/pgtable.h> 22 23 /* 24 * The dumper groups pagetable entries of the same type into one, and for 25 * that it needs to keep some state when walking, and flush this state 26 * when a "break" in the continuity is found. 27 */ 28 struct pg_state { 29 int level; 30 pgprot_t current_prot; 31 pgprotval_t effective_prot; 32 unsigned long start_address; 33 unsigned long current_address; 34 const struct addr_marker *marker; 35 unsigned long lines; 36 bool to_dmesg; 37 bool check_wx; 38 unsigned long wx_pages; 39 struct seq_file *seq; 40 }; 41 42 struct addr_marker { 43 unsigned long start_address; 44 const char *name; 45 unsigned long max_lines; 46 }; 47 48 /* Address space markers hints */ 49 50 #ifdef CONFIG_X86_64 51 52 enum address_markers_idx { 53 USER_SPACE_NR = 0, 54 KERNEL_SPACE_NR, 55 #ifdef CONFIG_MODIFY_LDT_SYSCALL 56 LDT_NR, 57 #endif 58 LOW_KERNEL_NR, 59 VMALLOC_START_NR, 60 VMEMMAP_START_NR, 61 #ifdef CONFIG_KASAN 62 KASAN_SHADOW_START_NR, 63 KASAN_SHADOW_END_NR, 64 #endif 65 CPU_ENTRY_AREA_NR, 66 #ifdef CONFIG_X86_ESPFIX64 67 ESPFIX_START_NR, 68 #endif 69 #ifdef CONFIG_EFI 70 EFI_END_NR, 71 #endif 72 HIGH_KERNEL_NR, 73 MODULES_VADDR_NR, 74 MODULES_END_NR, 75 FIXADDR_START_NR, 76 END_OF_SPACE_NR, 77 }; 78 79 static struct addr_marker address_markers[] = { 80 [USER_SPACE_NR] = { 0, "User Space" }, 81 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, 82 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, 83 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 84 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, 85 #ifdef CONFIG_KASAN 86 /* 87 * These fields get initialized with the (dynamic) 88 * KASAN_SHADOW_{START,END} values in pt_dump_init(). 89 */ 90 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" }, 91 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" }, 92 #endif 93 #ifdef CONFIG_MODIFY_LDT_SYSCALL 94 [LDT_NR] = { 0UL, "LDT remap" }, 95 #endif 96 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 97 #ifdef CONFIG_X86_ESPFIX64 98 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 99 #endif 100 #ifdef CONFIG_EFI 101 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, 102 #endif 103 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, 104 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, 105 [MODULES_END_NR] = { MODULES_END, "End Modules" }, 106 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, 107 [END_OF_SPACE_NR] = { -1, NULL } 108 }; 109 110 #define INIT_PGD ((pgd_t *) &init_top_pgt) 111 112 #else /* CONFIG_X86_64 */ 113 114 enum address_markers_idx { 115 USER_SPACE_NR = 0, 116 KERNEL_SPACE_NR, 117 VMALLOC_START_NR, 118 VMALLOC_END_NR, 119 #ifdef CONFIG_HIGHMEM 120 PKMAP_BASE_NR, 121 #endif 122 #ifdef CONFIG_MODIFY_LDT_SYSCALL 123 LDT_NR, 124 #endif 125 CPU_ENTRY_AREA_NR, 126 FIXADDR_START_NR, 127 END_OF_SPACE_NR, 128 }; 129 130 static struct addr_marker address_markers[] = { 131 [USER_SPACE_NR] = { 0, "User Space" }, 132 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, 133 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 134 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, 135 #ifdef CONFIG_HIGHMEM 136 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 137 #endif 138 #ifdef CONFIG_MODIFY_LDT_SYSCALL 139 [LDT_NR] = { 0UL, "LDT remap" }, 140 #endif 141 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 142 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 143 [END_OF_SPACE_NR] = { -1, NULL } 144 }; 145 146 #define INIT_PGD (swapper_pg_dir) 147 148 #endif /* !CONFIG_X86_64 */ 149 150 /* Multipliers for offsets within the PTEs */ 151 #define PTE_LEVEL_MULT (PAGE_SIZE) 152 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) 153 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) 154 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) 155 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT) 156 157 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \ 158 ({ \ 159 if (to_dmesg) \ 160 printk(KERN_INFO fmt, ##args); \ 161 else \ 162 if (m) \ 163 seq_printf(m, fmt, ##args); \ 164 }) 165 166 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \ 167 ({ \ 168 if (to_dmesg) \ 169 printk(KERN_CONT fmt, ##args); \ 170 else \ 171 if (m) \ 172 seq_printf(m, fmt, ##args); \ 173 }) 174 175 /* 176 * Print a readable form of a pgprot_t to the seq_file 177 */ 178 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) 179 { 180 pgprotval_t pr = pgprot_val(prot); 181 static const char * const level_name[] = 182 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; 183 184 if (!(pr & _PAGE_PRESENT)) { 185 /* Not present */ 186 pt_dump_cont_printf(m, dmsg, " "); 187 } else { 188 if (pr & _PAGE_USER) 189 pt_dump_cont_printf(m, dmsg, "USR "); 190 else 191 pt_dump_cont_printf(m, dmsg, " "); 192 if (pr & _PAGE_RW) 193 pt_dump_cont_printf(m, dmsg, "RW "); 194 else 195 pt_dump_cont_printf(m, dmsg, "ro "); 196 if (pr & _PAGE_PWT) 197 pt_dump_cont_printf(m, dmsg, "PWT "); 198 else 199 pt_dump_cont_printf(m, dmsg, " "); 200 if (pr & _PAGE_PCD) 201 pt_dump_cont_printf(m, dmsg, "PCD "); 202 else 203 pt_dump_cont_printf(m, dmsg, " "); 204 205 /* Bit 7 has a different meaning on level 3 vs 4 */ 206 if (level <= 4 && pr & _PAGE_PSE) 207 pt_dump_cont_printf(m, dmsg, "PSE "); 208 else 209 pt_dump_cont_printf(m, dmsg, " "); 210 if ((level == 5 && pr & _PAGE_PAT) || 211 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE)) 212 pt_dump_cont_printf(m, dmsg, "PAT "); 213 else 214 pt_dump_cont_printf(m, dmsg, " "); 215 if (pr & _PAGE_GLOBAL) 216 pt_dump_cont_printf(m, dmsg, "GLB "); 217 else 218 pt_dump_cont_printf(m, dmsg, " "); 219 if (pr & _PAGE_NX) 220 pt_dump_cont_printf(m, dmsg, "NX "); 221 else 222 pt_dump_cont_printf(m, dmsg, "x "); 223 } 224 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]); 225 } 226 227 /* 228 * On 64 bits, sign-extend the 48 bit address to 64 bit 229 */ 230 static unsigned long normalize_addr(unsigned long u) 231 { 232 int shift; 233 if (!IS_ENABLED(CONFIG_X86_64)) 234 return u; 235 236 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); 237 return (signed long)(u << shift) >> shift; 238 } 239 240 static void note_wx(struct pg_state *st) 241 { 242 unsigned long npages; 243 244 npages = (st->current_address - st->start_address) / PAGE_SIZE; 245 246 #ifdef CONFIG_PCI_BIOS 247 /* 248 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX. 249 * Inform about it, but avoid the warning. 250 */ 251 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN && 252 st->current_address <= PAGE_OFFSET + BIOS_END) { 253 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); 254 return; 255 } 256 #endif 257 /* Account the WX pages */ 258 st->wx_pages += npages; 259 WARN_ONCE(__supported_pte_mask & _PAGE_NX, 260 "x86/mm: Found insecure W+X mapping at address %pS\n", 261 (void *)st->start_address); 262 } 263 264 /* 265 * This function gets called on a break in a continuous series 266 * of PTE entries; the next one is different so we need to 267 * print what we collected so far. 268 */ 269 static void note_page(struct pg_state *st, pgprot_t new_prot, 270 pgprotval_t new_eff, int level) 271 { 272 pgprotval_t prot, cur, eff; 273 static const char units[] = "BKMGTPE"; 274 struct seq_file *m = st->seq; 275 276 /* 277 * If we have a "break" in the series, we need to flush the state that 278 * we have now. "break" is either changing perms, levels or 279 * address space marker. 280 */ 281 prot = pgprot_val(new_prot); 282 cur = pgprot_val(st->current_prot); 283 eff = st->effective_prot; 284 285 if (!st->level) { 286 /* First entry */ 287 st->current_prot = new_prot; 288 st->effective_prot = new_eff; 289 st->level = level; 290 st->marker = address_markers; 291 st->lines = 0; 292 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 293 st->marker->name); 294 } else if (prot != cur || new_eff != eff || level != st->level || 295 st->current_address >= st->marker[1].start_address) { 296 const char *unit = units; 297 unsigned long delta; 298 int width = sizeof(unsigned long) * 2; 299 300 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) 301 note_wx(st); 302 303 /* 304 * Now print the actual finished series 305 */ 306 if (!st->marker->max_lines || 307 st->lines < st->marker->max_lines) { 308 pt_dump_seq_printf(m, st->to_dmesg, 309 "0x%0*lx-0x%0*lx ", 310 width, st->start_address, 311 width, st->current_address); 312 313 delta = st->current_address - st->start_address; 314 while (!(delta & 1023) && unit[1]) { 315 delta >>= 10; 316 unit++; 317 } 318 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", 319 delta, *unit); 320 printk_prot(m, st->current_prot, st->level, 321 st->to_dmesg); 322 } 323 st->lines++; 324 325 /* 326 * We print markers for special areas of address space, 327 * such as the start of vmalloc space etc. 328 * This helps in the interpretation. 329 */ 330 if (st->current_address >= st->marker[1].start_address) { 331 if (st->marker->max_lines && 332 st->lines > st->marker->max_lines) { 333 unsigned long nskip = 334 st->lines - st->marker->max_lines; 335 pt_dump_seq_printf(m, st->to_dmesg, 336 "... %lu entr%s skipped ... \n", 337 nskip, 338 nskip == 1 ? "y" : "ies"); 339 } 340 st->marker++; 341 st->lines = 0; 342 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 343 st->marker->name); 344 } 345 346 st->start_address = st->current_address; 347 st->current_prot = new_prot; 348 st->effective_prot = new_eff; 349 st->level = level; 350 } 351 } 352 353 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2) 354 { 355 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) | 356 ((prot1 | prot2) & _PAGE_NX); 357 } 358 359 static void walk_pte_level(struct pg_state *st, pmd_t addr, pgprotval_t eff_in, 360 unsigned long P) 361 { 362 int i; 363 pte_t *pte; 364 pgprotval_t prot, eff; 365 366 for (i = 0; i < PTRS_PER_PTE; i++) { 367 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 368 pte = pte_offset_map(&addr, st->current_address); 369 prot = pte_flags(*pte); 370 eff = effective_prot(eff_in, prot); 371 note_page(st, __pgprot(prot), eff, 5); 372 pte_unmap(pte); 373 } 374 } 375 #ifdef CONFIG_KASAN 376 377 /* 378 * This is an optimization for KASAN=y case. Since all kasan page tables 379 * eventually point to the kasan_early_shadow_page we could call note_page() 380 * right away without walking through lower level page tables. This saves 381 * us dozens of seconds (minutes for 5-level config) while checking for 382 * W+X mapping or reading kernel_page_tables debugfs file. 383 */ 384 static inline bool kasan_page_table(struct pg_state *st, void *pt) 385 { 386 if (__pa(pt) == __pa(kasan_early_shadow_pmd) || 387 (pgtable_l5_enabled() && 388 __pa(pt) == __pa(kasan_early_shadow_p4d)) || 389 __pa(pt) == __pa(kasan_early_shadow_pud)) { 390 pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]); 391 note_page(st, __pgprot(prot), 0, 5); 392 return true; 393 } 394 return false; 395 } 396 #else 397 static inline bool kasan_page_table(struct pg_state *st, void *pt) 398 { 399 return false; 400 } 401 #endif 402 403 #if PTRS_PER_PMD > 1 404 405 static void walk_pmd_level(struct pg_state *st, pud_t addr, 406 pgprotval_t eff_in, unsigned long P) 407 { 408 int i; 409 pmd_t *start, *pmd_start; 410 pgprotval_t prot, eff; 411 412 pmd_start = start = (pmd_t *)pud_page_vaddr(addr); 413 for (i = 0; i < PTRS_PER_PMD; i++) { 414 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); 415 if (!pmd_none(*start)) { 416 prot = pmd_flags(*start); 417 eff = effective_prot(eff_in, prot); 418 if (pmd_large(*start) || !pmd_present(*start)) { 419 note_page(st, __pgprot(prot), eff, 4); 420 } else if (!kasan_page_table(st, pmd_start)) { 421 walk_pte_level(st, *start, eff, 422 P + i * PMD_LEVEL_MULT); 423 } 424 } else 425 note_page(st, __pgprot(0), 0, 4); 426 start++; 427 } 428 } 429 430 #else 431 #define walk_pmd_level(s,a,e,p) walk_pte_level(s,__pmd(pud_val(a)),e,p) 432 #define pud_large(a) pmd_large(__pmd(pud_val(a))) 433 #define pud_none(a) pmd_none(__pmd(pud_val(a))) 434 #endif 435 436 #if PTRS_PER_PUD > 1 437 438 static void walk_pud_level(struct pg_state *st, p4d_t addr, pgprotval_t eff_in, 439 unsigned long P) 440 { 441 int i; 442 pud_t *start, *pud_start; 443 pgprotval_t prot, eff; 444 445 pud_start = start = (pud_t *)p4d_page_vaddr(addr); 446 447 for (i = 0; i < PTRS_PER_PUD; i++) { 448 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); 449 if (!pud_none(*start)) { 450 prot = pud_flags(*start); 451 eff = effective_prot(eff_in, prot); 452 if (pud_large(*start) || !pud_present(*start)) { 453 note_page(st, __pgprot(prot), eff, 3); 454 } else if (!kasan_page_table(st, pud_start)) { 455 walk_pmd_level(st, *start, eff, 456 P + i * PUD_LEVEL_MULT); 457 } 458 } else 459 note_page(st, __pgprot(0), 0, 3); 460 461 start++; 462 } 463 } 464 465 #else 466 #define walk_pud_level(s,a,e,p) walk_pmd_level(s,__pud(p4d_val(a)),e,p) 467 #define p4d_large(a) pud_large(__pud(p4d_val(a))) 468 #define p4d_none(a) pud_none(__pud(p4d_val(a))) 469 #endif 470 471 static void walk_p4d_level(struct pg_state *st, pgd_t addr, pgprotval_t eff_in, 472 unsigned long P) 473 { 474 int i; 475 p4d_t *start, *p4d_start; 476 pgprotval_t prot, eff; 477 478 if (PTRS_PER_P4D == 1) 479 return walk_pud_level(st, __p4d(pgd_val(addr)), eff_in, P); 480 481 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); 482 483 for (i = 0; i < PTRS_PER_P4D; i++) { 484 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT); 485 if (!p4d_none(*start)) { 486 prot = p4d_flags(*start); 487 eff = effective_prot(eff_in, prot); 488 if (p4d_large(*start) || !p4d_present(*start)) { 489 note_page(st, __pgprot(prot), eff, 2); 490 } else if (!kasan_page_table(st, p4d_start)) { 491 walk_pud_level(st, *start, eff, 492 P + i * P4D_LEVEL_MULT); 493 } 494 } else 495 note_page(st, __pgprot(0), 0, 2); 496 497 start++; 498 } 499 } 500 501 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 502 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 503 504 static inline bool is_hypervisor_range(int idx) 505 { 506 #ifdef CONFIG_X86_64 507 /* 508 * A hole in the beginning of kernel address space reserved 509 * for a hypervisor. 510 */ 511 return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && 512 (idx < pgd_index(GUARD_HOLE_END_ADDR)); 513 #else 514 return false; 515 #endif 516 } 517 518 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 519 bool checkwx, bool dmesg) 520 { 521 pgd_t *start = pgd; 522 pgprotval_t prot, eff; 523 int i; 524 struct pg_state st = {}; 525 526 st.to_dmesg = dmesg; 527 st.check_wx = checkwx; 528 st.seq = m; 529 if (checkwx) 530 st.wx_pages = 0; 531 532 for (i = 0; i < PTRS_PER_PGD; i++) { 533 st.current_address = normalize_addr(i * PGD_LEVEL_MULT); 534 if (!pgd_none(*start) && !is_hypervisor_range(i)) { 535 prot = pgd_flags(*start); 536 #ifdef CONFIG_X86_PAE 537 eff = _PAGE_USER | _PAGE_RW; 538 #else 539 eff = prot; 540 #endif 541 if (pgd_large(*start) || !pgd_present(*start)) { 542 note_page(&st, __pgprot(prot), eff, 1); 543 } else { 544 walk_p4d_level(&st, *start, eff, 545 i * PGD_LEVEL_MULT); 546 } 547 } else 548 note_page(&st, __pgprot(0), 0, 1); 549 550 cond_resched(); 551 start++; 552 } 553 554 /* Flush out the last page */ 555 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); 556 note_page(&st, __pgprot(0), 0, 0); 557 if (!checkwx) 558 return; 559 if (st.wx_pages) 560 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n", 561 st.wx_pages); 562 else 563 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n"); 564 } 565 566 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm) 567 { 568 ptdump_walk_pgd_level_core(m, mm->pgd, false, true); 569 } 570 571 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, 572 bool user) 573 { 574 pgd_t *pgd = mm->pgd; 575 #ifdef CONFIG_PAGE_TABLE_ISOLATION 576 if (user && boot_cpu_has(X86_FEATURE_PTI)) 577 pgd = kernel_to_user_pgdp(pgd); 578 #endif 579 ptdump_walk_pgd_level_core(m, pgd, false, false); 580 } 581 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); 582 583 void ptdump_walk_user_pgd_level_checkwx(void) 584 { 585 #ifdef CONFIG_PAGE_TABLE_ISOLATION 586 pgd_t *pgd = INIT_PGD; 587 588 if (!(__supported_pte_mask & _PAGE_NX) || 589 !boot_cpu_has(X86_FEATURE_PTI)) 590 return; 591 592 pr_info("x86/mm: Checking user space page tables\n"); 593 pgd = kernel_to_user_pgdp(pgd); 594 ptdump_walk_pgd_level_core(NULL, pgd, true, false); 595 #endif 596 } 597 598 void ptdump_walk_pgd_level_checkwx(void) 599 { 600 ptdump_walk_pgd_level_core(NULL, INIT_PGD, true, false); 601 } 602 603 static int __init pt_dump_init(void) 604 { 605 /* 606 * Various markers are not compile-time constants, so assign them 607 * here. 608 */ 609 #ifdef CONFIG_X86_64 610 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET; 611 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 612 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; 613 #ifdef CONFIG_MODIFY_LDT_SYSCALL 614 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 615 #endif 616 #ifdef CONFIG_KASAN 617 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; 618 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; 619 #endif 620 #endif 621 #ifdef CONFIG_X86_32 622 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 623 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 624 # ifdef CONFIG_HIGHMEM 625 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 626 # endif 627 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 628 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 629 # ifdef CONFIG_MODIFY_LDT_SYSCALL 630 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 631 # endif 632 #endif 633 return 0; 634 } 635 __initcall(pt_dump_init); 636