1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm_init.c - Memory initialisation verification and debugging 4 * 5 * Copyright 2008 IBM Corporation, 2008 6 * Author Mel Gorman <mel@csn.ul.ie> 7 * 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/kobject.h> 12 #include <linux/export.h> 13 #include <linux/memory.h> 14 #include <linux/notifier.h> 15 #include <linux/sched.h> 16 #include <linux/mman.h> 17 #include <linux/memblock.h> 18 #include <linux/page-isolation.h> 19 #include <linux/padata.h> 20 #include <linux/nmi.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kmemleak.h> 23 #include <linux/kfence.h> 24 #include <linux/page_ext.h> 25 #include <linux/pti.h> 26 #include <linux/pgtable.h> 27 #include <linux/stackdepot.h> 28 #include <linux/swap.h> 29 #include <linux/cma.h> 30 #include <linux/crash_dump.h> 31 #include <linux/execmem.h> 32 #include <linux/vmstat.h> 33 #include <linux/kexec_handover.h> 34 #include <linux/hugetlb.h> 35 #include "internal.h" 36 #include "slab.h" 37 #include "shuffle.h" 38 39 #include <asm/setup.h> 40 41 #ifndef CONFIG_NUMA 42 unsigned long max_mapnr; 43 EXPORT_SYMBOL(max_mapnr); 44 45 struct page *mem_map; 46 EXPORT_SYMBOL(mem_map); 47 #endif 48 49 /* 50 * high_memory defines the upper bound on direct map memory, then end 51 * of ZONE_NORMAL. 52 */ 53 void *high_memory; 54 EXPORT_SYMBOL(high_memory); 55 56 unsigned long zero_page_pfn __ro_after_init; 57 EXPORT_SYMBOL(zero_page_pfn); 58 59 #ifndef __HAVE_COLOR_ZERO_PAGE 60 uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss; 61 EXPORT_SYMBOL(empty_zero_page); 62 63 struct page *__zero_page __ro_after_init; 64 EXPORT_SYMBOL(__zero_page); 65 #endif /* __HAVE_COLOR_ZERO_PAGE */ 66 67 #ifdef CONFIG_DEBUG_MEMORY_INIT 68 int __meminitdata mminit_loglevel; 69 70 /* The zonelists are simply reported, validation is manual. */ 71 void __init mminit_verify_zonelist(void) 72 { 73 int nid; 74 75 if (mminit_loglevel < MMINIT_VERIFY) 76 return; 77 78 for_each_online_node(nid) { 79 pg_data_t *pgdat = NODE_DATA(nid); 80 struct zone *zone; 81 struct zoneref *z; 82 struct zonelist *zonelist; 83 int i, listid, zoneid; 84 85 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 86 87 /* Identify the zone and nodelist */ 88 zoneid = i % MAX_NR_ZONES; 89 listid = i / MAX_NR_ZONES; 90 zonelist = &pgdat->node_zonelists[listid]; 91 zone = &pgdat->node_zones[zoneid]; 92 if (!populated_zone(zone)) 93 continue; 94 95 /* Print information about the zonelist */ 96 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 97 listid > 0 ? "thisnode" : "general", nid, 98 zone->name); 99 100 /* Iterate the zonelist */ 101 for_each_zone_zonelist(zone, z, zonelist, zoneid) 102 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); 103 pr_cont("\n"); 104 } 105 } 106 } 107 108 void __init mminit_verify_pageflags_layout(void) 109 { 110 int shift, width; 111 unsigned long or_mask, add_mask; 112 113 shift = BITS_PER_LONG; 114 width = shift - NR_NON_PAGEFLAG_BITS; 115 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 116 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 117 SECTIONS_WIDTH, 118 NODES_WIDTH, 119 ZONES_WIDTH, 120 LAST_CPUPID_WIDTH, 121 KASAN_TAG_WIDTH, 122 LRU_GEN_WIDTH, 123 LRU_REFS_WIDTH, 124 NR_PAGEFLAGS); 125 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 126 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", 127 SECTIONS_SHIFT, 128 NODES_SHIFT, 129 ZONES_SHIFT, 130 LAST_CPUPID_SHIFT, 131 KASAN_TAG_WIDTH); 132 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 133 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", 134 (unsigned long)SECTIONS_PGSHIFT, 135 (unsigned long)NODES_PGSHIFT, 136 (unsigned long)ZONES_PGSHIFT, 137 (unsigned long)LAST_CPUPID_PGSHIFT, 138 (unsigned long)KASAN_TAG_PGSHIFT); 139 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 140 "Node/Zone ID: %lu -> %lu\n", 141 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 142 (unsigned long)ZONEID_PGOFF); 143 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 144 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 145 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 146 #ifdef NODE_NOT_IN_PAGE_FLAGS 147 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 148 "Node not in page flags"); 149 #endif 150 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 151 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 152 "Last cpupid not in page flags"); 153 #endif 154 155 if (SECTIONS_WIDTH) { 156 shift -= SECTIONS_WIDTH; 157 BUG_ON(shift != SECTIONS_PGSHIFT); 158 } 159 if (NODES_WIDTH) { 160 shift -= NODES_WIDTH; 161 BUG_ON(shift != NODES_PGSHIFT); 162 } 163 if (ZONES_WIDTH) { 164 shift -= ZONES_WIDTH; 165 BUG_ON(shift != ZONES_PGSHIFT); 166 } 167 168 /* Check for bitmask overlaps */ 169 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 170 (NODES_MASK << NODES_PGSHIFT) | 171 (SECTIONS_MASK << SECTIONS_PGSHIFT); 172 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 173 (NODES_MASK << NODES_PGSHIFT) + 174 (SECTIONS_MASK << SECTIONS_PGSHIFT); 175 BUG_ON(or_mask != add_mask); 176 } 177 178 static __init int set_mminit_loglevel(char *str) 179 { 180 get_option(&str, &mminit_loglevel); 181 return 0; 182 } 183 early_param("mminit_loglevel", set_mminit_loglevel); 184 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 185 186 struct kobject *mm_kobj; 187 188 #ifdef CONFIG_SMP 189 s32 vm_committed_as_batch = 32; 190 191 void mm_compute_batch(int overcommit_policy) 192 { 193 u64 memsized_batch; 194 s32 nr = num_present_cpus(); 195 s32 batch = max_t(s32, nr*2, 32); 196 unsigned long ram_pages = totalram_pages(); 197 198 /* 199 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of 200 * (total memory/#cpus), and lift it to 25% for other policies 201 * to ease the possible lock contention for percpu_counter 202 * vm_committed_as, while the max limit is INT_MAX 203 */ 204 if (overcommit_policy == OVERCOMMIT_NEVER) 205 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); 206 else 207 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); 208 209 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 210 } 211 212 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 213 unsigned long action, void *arg) 214 { 215 switch (action) { 216 case MEM_ONLINE: 217 case MEM_OFFLINE: 218 mm_compute_batch(sysctl_overcommit_memory); 219 break; 220 default: 221 break; 222 } 223 return NOTIFY_OK; 224 } 225 226 static int __init mm_compute_batch_init(void) 227 { 228 mm_compute_batch(sysctl_overcommit_memory); 229 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); 230 return 0; 231 } 232 233 __initcall(mm_compute_batch_init); 234 235 #endif 236 237 static int __init mm_sysfs_init(void) 238 { 239 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 240 if (!mm_kobj) 241 return -ENOMEM; 242 243 return 0; 244 } 245 postcore_initcall(mm_sysfs_init); 246 247 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 248 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 249 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 250 251 static unsigned long required_kernelcore __initdata; 252 static unsigned long required_kernelcore_percent __initdata; 253 static unsigned long required_movablecore __initdata; 254 static unsigned long required_movablecore_percent __initdata; 255 256 static unsigned long nr_kernel_pages __initdata; 257 static unsigned long nr_all_pages __initdata; 258 259 static bool deferred_struct_pages __meminitdata; 260 261 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 262 263 static int __init cmdline_parse_core(char *p, unsigned long *core, 264 unsigned long *percent) 265 { 266 unsigned long long coremem; 267 char *endptr; 268 269 if (!p) 270 return -EINVAL; 271 272 /* Value may be a percentage of total memory, otherwise bytes */ 273 coremem = simple_strtoull(p, &endptr, 0); 274 if (*endptr == '%') { 275 /* Paranoid check for percent values greater than 100 */ 276 WARN_ON(coremem > 100); 277 278 *percent = coremem; 279 } else { 280 coremem = memparse(p, &p); 281 /* Paranoid check that UL is enough for the coremem value */ 282 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 283 284 *core = coremem >> PAGE_SHIFT; 285 *percent = 0UL; 286 } 287 return 0; 288 } 289 290 bool mirrored_kernelcore __initdata_memblock; 291 292 /* 293 * kernelcore=size sets the amount of memory for use for allocations that 294 * cannot be reclaimed or migrated. 295 */ 296 static int __init cmdline_parse_kernelcore(char *p) 297 { 298 /* parse kernelcore=mirror */ 299 if (parse_option_str(p, "mirror")) { 300 mirrored_kernelcore = true; 301 return 0; 302 } 303 304 return cmdline_parse_core(p, &required_kernelcore, 305 &required_kernelcore_percent); 306 } 307 early_param("kernelcore", cmdline_parse_kernelcore); 308 309 /* 310 * movablecore=size sets the amount of memory for use for allocations that 311 * can be reclaimed or migrated. 312 */ 313 static int __init cmdline_parse_movablecore(char *p) 314 { 315 return cmdline_parse_core(p, &required_movablecore, 316 &required_movablecore_percent); 317 } 318 early_param("movablecore", cmdline_parse_movablecore); 319 320 /* 321 * early_calculate_totalpages() 322 * Sum pages in active regions for movable zone. 323 * Populate N_MEMORY for calculating usable_nodes. 324 */ 325 static unsigned long __init early_calculate_totalpages(void) 326 { 327 unsigned long totalpages = 0; 328 unsigned long start_pfn, end_pfn; 329 int i, nid; 330 331 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 332 unsigned long pages = end_pfn - start_pfn; 333 334 totalpages += pages; 335 if (pages) 336 node_set_state(nid, N_MEMORY); 337 } 338 return totalpages; 339 } 340 341 /* 342 * This finds a zone that can be used for ZONE_MOVABLE pages. The 343 * assumption is made that zones within a node are ordered in monotonic 344 * increasing memory addresses so that the "highest" populated zone is used 345 */ 346 static void __init find_usable_zone_for_movable(void) 347 { 348 int zone_index; 349 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 350 if (zone_index == ZONE_MOVABLE) 351 continue; 352 353 if (arch_zone_highest_possible_pfn[zone_index] > 354 arch_zone_lowest_possible_pfn[zone_index]) 355 break; 356 } 357 358 VM_BUG_ON(zone_index == -1); 359 movable_zone = zone_index; 360 } 361 362 /* 363 * Find the PFN the Movable zone begins in each node. Kernel memory 364 * is spread evenly between nodes as long as the nodes have enough 365 * memory. When they don't, some nodes will have more kernelcore than 366 * others 367 */ 368 static void __init find_zone_movable_pfns_for_nodes(void) 369 { 370 int i, nid; 371 unsigned long usable_startpfn; 372 unsigned long kernelcore_node, kernelcore_remaining; 373 /* save the state before borrow the nodemask */ 374 nodemask_t saved_node_state = node_states[N_MEMORY]; 375 unsigned long totalpages = early_calculate_totalpages(); 376 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 377 struct memblock_region *r; 378 379 /* Need to find movable_zone earlier when movable_node is specified. */ 380 find_usable_zone_for_movable(); 381 382 /* 383 * If movable_node is specified, ignore kernelcore and movablecore 384 * options. 385 */ 386 if (movable_node_is_enabled()) { 387 for_each_mem_region(r) { 388 if (!memblock_is_hotpluggable(r)) 389 continue; 390 391 nid = memblock_get_region_node(r); 392 393 usable_startpfn = memblock_region_memory_base_pfn(r); 394 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 395 min(usable_startpfn, zone_movable_pfn[nid]) : 396 usable_startpfn; 397 } 398 399 goto out2; 400 } 401 402 /* 403 * If kernelcore=mirror is specified, ignore movablecore option 404 */ 405 if (mirrored_kernelcore) { 406 bool mem_below_4gb_not_mirrored = false; 407 408 if (!memblock_has_mirror()) { 409 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n"); 410 goto out; 411 } 412 413 if (is_kdump_kernel()) { 414 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n"); 415 goto out; 416 } 417 418 for_each_mem_region(r) { 419 if (memblock_is_mirror(r)) 420 continue; 421 422 nid = memblock_get_region_node(r); 423 424 usable_startpfn = memblock_region_memory_base_pfn(r); 425 426 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 427 mem_below_4gb_not_mirrored = true; 428 continue; 429 } 430 431 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 432 min(usable_startpfn, zone_movable_pfn[nid]) : 433 usable_startpfn; 434 } 435 436 if (mem_below_4gb_not_mirrored) 437 pr_warn("This configuration results in unmirrored kernel memory.\n"); 438 439 goto out2; 440 } 441 442 /* 443 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 444 * amount of necessary memory. 445 */ 446 if (required_kernelcore_percent) 447 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 448 10000UL; 449 if (required_movablecore_percent) 450 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 451 10000UL; 452 453 /* 454 * If movablecore= was specified, calculate what size of 455 * kernelcore that corresponds so that memory usable for 456 * any allocation type is evenly spread. If both kernelcore 457 * and movablecore are specified, then the value of kernelcore 458 * will be used for required_kernelcore if it's greater than 459 * what movablecore would have allowed. 460 */ 461 if (required_movablecore) { 462 unsigned long corepages; 463 464 /* 465 * Round-up so that ZONE_MOVABLE is at least as large as what 466 * was requested by the user 467 */ 468 required_movablecore = 469 round_up(required_movablecore, MAX_ORDER_NR_PAGES); 470 required_movablecore = min(totalpages, required_movablecore); 471 corepages = totalpages - required_movablecore; 472 473 required_kernelcore = max(required_kernelcore, corepages); 474 } 475 476 /* 477 * If kernelcore was not specified or kernelcore size is larger 478 * than totalpages, there is no ZONE_MOVABLE. 479 */ 480 if (!required_kernelcore || required_kernelcore >= totalpages) 481 goto out; 482 483 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 484 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 485 486 restart: 487 /* Spread kernelcore memory as evenly as possible throughout nodes */ 488 kernelcore_node = required_kernelcore / usable_nodes; 489 for_each_node_state(nid, N_MEMORY) { 490 unsigned long start_pfn, end_pfn; 491 492 /* 493 * Recalculate kernelcore_node if the division per node 494 * now exceeds what is necessary to satisfy the requested 495 * amount of memory for the kernel 496 */ 497 if (required_kernelcore < kernelcore_node) 498 kernelcore_node = required_kernelcore / usable_nodes; 499 500 /* 501 * As the map is walked, we track how much memory is usable 502 * by the kernel using kernelcore_remaining. When it is 503 * 0, the rest of the node is usable by ZONE_MOVABLE 504 */ 505 kernelcore_remaining = kernelcore_node; 506 507 /* Go through each range of PFNs within this node */ 508 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 509 unsigned long size_pages; 510 511 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 512 if (start_pfn >= end_pfn) 513 continue; 514 515 /* Account for what is only usable for kernelcore */ 516 if (start_pfn < usable_startpfn) { 517 unsigned long kernel_pages; 518 kernel_pages = min(end_pfn, usable_startpfn) 519 - start_pfn; 520 521 kernelcore_remaining -= min(kernel_pages, 522 kernelcore_remaining); 523 required_kernelcore -= min(kernel_pages, 524 required_kernelcore); 525 526 /* Continue if range is now fully accounted */ 527 if (end_pfn <= usable_startpfn) { 528 529 /* 530 * Push zone_movable_pfn to the end so 531 * that if we have to rebalance 532 * kernelcore across nodes, we will 533 * not double account here 534 */ 535 zone_movable_pfn[nid] = end_pfn; 536 continue; 537 } 538 start_pfn = usable_startpfn; 539 } 540 541 /* 542 * The usable PFN range for ZONE_MOVABLE is from 543 * start_pfn->end_pfn. Calculate size_pages as the 544 * number of pages used as kernelcore 545 */ 546 size_pages = end_pfn - start_pfn; 547 if (size_pages > kernelcore_remaining) 548 size_pages = kernelcore_remaining; 549 zone_movable_pfn[nid] = start_pfn + size_pages; 550 551 /* 552 * Some kernelcore has been met, update counts and 553 * break if the kernelcore for this node has been 554 * satisfied 555 */ 556 required_kernelcore -= min(required_kernelcore, 557 size_pages); 558 kernelcore_remaining -= size_pages; 559 if (!kernelcore_remaining) 560 break; 561 } 562 } 563 564 /* 565 * If there is still required_kernelcore, we do another pass with one 566 * less node in the count. This will push zone_movable_pfn[nid] further 567 * along on the nodes that still have memory until kernelcore is 568 * satisfied 569 */ 570 usable_nodes--; 571 if (usable_nodes && required_kernelcore > usable_nodes) 572 goto restart; 573 574 out2: 575 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 576 for_each_node_state(nid, N_MEMORY) { 577 unsigned long start_pfn, end_pfn; 578 579 zone_movable_pfn[nid] = 580 round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 581 582 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 583 if (zone_movable_pfn[nid] >= end_pfn) 584 zone_movable_pfn[nid] = 0; 585 } 586 587 out: 588 /* restore the node_state */ 589 node_states[N_MEMORY] = saved_node_state; 590 } 591 592 void __meminit __init_single_page(struct page *page, unsigned long pfn, 593 unsigned long zone, int nid) 594 { 595 mm_zero_struct_page(page); 596 set_page_links(page, zone, nid, pfn); 597 init_page_count(page); 598 atomic_set(&page->_mapcount, -1); 599 page_cpupid_reset_last(page); 600 page_kasan_tag_reset(page); 601 602 INIT_LIST_HEAD(&page->lru); 603 #ifdef WANT_PAGE_VIRTUAL 604 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 605 if (!is_highmem_idx(zone)) 606 set_page_address(page, __va(pfn << PAGE_SHIFT)); 607 #endif 608 } 609 610 #ifdef CONFIG_NUMA 611 /* 612 * During memory init memblocks map pfns to nids. The search is expensive and 613 * this caches recent lookups. The implementation of __early_pfn_to_nid 614 * treats start/end as pfns. 615 */ 616 struct mminit_pfnnid_cache { 617 unsigned long last_start; 618 unsigned long last_end; 619 int last_nid; 620 }; 621 622 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 623 624 /* 625 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 626 */ 627 static int __meminit __early_pfn_to_nid(unsigned long pfn, 628 struct mminit_pfnnid_cache *state) 629 { 630 unsigned long start_pfn, end_pfn; 631 int nid; 632 633 if (state->last_start <= pfn && pfn < state->last_end) 634 return state->last_nid; 635 636 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 637 if (nid != NUMA_NO_NODE) { 638 state->last_start = start_pfn; 639 state->last_end = end_pfn; 640 state->last_nid = nid; 641 } 642 643 return nid; 644 } 645 646 int __meminit early_pfn_to_nid(unsigned long pfn) 647 { 648 static DEFINE_SPINLOCK(early_pfn_lock); 649 int nid; 650 651 spin_lock(&early_pfn_lock); 652 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 653 if (nid < 0) 654 nid = first_online_node; 655 spin_unlock(&early_pfn_lock); 656 657 return nid; 658 } 659 660 bool hashdist = HASHDIST_DEFAULT; 661 662 static int __init set_hashdist(char *str) 663 { 664 return kstrtobool(str, &hashdist) == 0; 665 } 666 __setup("hashdist=", set_hashdist); 667 668 static inline void fixup_hashdist(void) 669 { 670 if (num_node_state(N_MEMORY) == 1) 671 hashdist = false; 672 } 673 #else 674 static inline void fixup_hashdist(void) {} 675 #endif /* CONFIG_NUMA */ 676 677 /* 678 * Initialize a reserved page unconditionally, finding its zone first. 679 */ 680 void __meminit __init_page_from_nid(unsigned long pfn, int nid) 681 { 682 pg_data_t *pgdat; 683 int zid; 684 685 pgdat = NODE_DATA(nid); 686 687 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 688 struct zone *zone = &pgdat->node_zones[zid]; 689 690 if (zone_spans_pfn(zone, pfn)) 691 break; 692 } 693 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 694 695 if (pageblock_aligned(pfn)) 696 init_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE, 697 false); 698 } 699 700 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 701 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 702 { 703 pgdat->first_deferred_pfn = ULONG_MAX; 704 } 705 706 /* Returns true if the struct page for the pfn is initialised */ 707 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) 708 { 709 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 710 return false; 711 712 return true; 713 } 714 715 /* 716 * Returns true when the remaining initialisation should be deferred until 717 * later in the boot cycle when it can be parallelised. 718 */ 719 static bool __meminit 720 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 721 { 722 static unsigned long prev_end_pfn, nr_initialised; 723 724 if (early_page_ext_enabled()) 725 return false; 726 727 /* Always populate low zones for address-constrained allocations */ 728 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 729 return false; 730 731 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 732 return true; 733 734 /* 735 * prev_end_pfn static that contains the end of previous zone 736 * No need to protect because called very early in boot before smp_init. 737 */ 738 if (prev_end_pfn != end_pfn) { 739 prev_end_pfn = end_pfn; 740 nr_initialised = 0; 741 } 742 743 /* 744 * We start only with one section of pages, more pages are added as 745 * needed until the rest of deferred pages are initialized. 746 */ 747 nr_initialised++; 748 if ((nr_initialised > PAGES_PER_SECTION) && 749 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 750 NODE_DATA(nid)->first_deferred_pfn = pfn; 751 return true; 752 } 753 return false; 754 } 755 756 static void __meminit __init_deferred_page(unsigned long pfn, int nid) 757 { 758 if (early_page_initialised(pfn, nid)) 759 return; 760 761 __init_page_from_nid(pfn, nid); 762 } 763 #else 764 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 765 766 static inline bool early_page_initialised(unsigned long pfn, int nid) 767 { 768 return true; 769 } 770 771 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 772 { 773 return false; 774 } 775 776 static inline void __init_deferred_page(unsigned long pfn, int nid) 777 { 778 } 779 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 780 781 void __meminit init_deferred_page(unsigned long pfn, int nid) 782 { 783 __init_deferred_page(pfn, nid); 784 } 785 786 /* 787 * Initialised pages do not have PageReserved set. This function is 788 * called for each range allocated by the bootmem allocator and 789 * marks the pages PageReserved. The remaining valid pages are later 790 * sent to the buddy page allocator. 791 */ 792 void __meminit reserve_bootmem_region(phys_addr_t start, 793 phys_addr_t end, int nid) 794 { 795 unsigned long pfn; 796 797 for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) { 798 struct page *page = pfn_to_page(pfn); 799 800 __init_deferred_page(pfn, nid); 801 802 /* 803 * no need for atomic set_bit because the struct 804 * page is not visible yet so nobody should 805 * access it yet. 806 */ 807 __SetPageReserved(page); 808 } 809 } 810 811 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 812 static bool __meminit 813 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 814 { 815 static struct memblock_region *r __meminitdata; 816 817 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 818 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 819 for_each_mem_region(r) { 820 if (*pfn < memblock_region_memory_end_pfn(r)) 821 break; 822 } 823 } 824 if (*pfn >= memblock_region_memory_base_pfn(r) && 825 memblock_is_mirror(r)) { 826 *pfn = memblock_region_memory_end_pfn(r); 827 return true; 828 } 829 } 830 return false; 831 } 832 833 /* 834 * Only struct pages that correspond to ranges defined by memblock.memory 835 * are zeroed and initialized by going through __init_single_page() during 836 * memmap_init_zone_range(). 837 * 838 * But, there could be struct pages that correspond to holes in 839 * memblock.memory. This can happen because of the following reasons: 840 * - physical memory bank size is not necessarily the exact multiple of the 841 * arbitrary section size 842 * - early reserved memory may not be listed in memblock.memory 843 * - non-memory regions covered by the contiguous flatmem mapping 844 * - memory layouts defined with memmap= kernel parameter may not align 845 * nicely with memmap sections 846 * 847 * Explicitly initialize those struct pages so that: 848 * - PG_Reserved is set 849 * - zone and node links point to zone and node that span the page if the 850 * hole is in the middle of a zone 851 * - zone and node links point to adjacent zone/node if the hole falls on 852 * the zone boundary; the pages in such holes will be prepended to the 853 * zone/node above the hole except for the trailing pages in the last 854 * section that will be appended to the zone/node below. 855 */ 856 static void __init init_unavailable_range(unsigned long spfn, 857 unsigned long epfn, 858 int zone, int node) 859 { 860 unsigned long pfn; 861 u64 pgcnt = 0; 862 863 for_each_valid_pfn(pfn, spfn, epfn) { 864 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 865 __SetPageReserved(pfn_to_page(pfn)); 866 pgcnt++; 867 } 868 869 if (pgcnt) 870 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n", 871 node, zone_names[zone], pgcnt); 872 } 873 874 /* 875 * Initially all pages are reserved - free ones are freed 876 * up by memblock_free_all() once the early boot process is 877 * done. Non-atomic initialization, single-pass. 878 * 879 * All aligned pageblocks are initialized to the specified migratetype 880 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 881 * zone stats (e.g., nr_isolate_pageblock) are touched. 882 */ 883 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 884 unsigned long start_pfn, unsigned long zone_end_pfn, 885 enum meminit_context context, 886 struct vmem_altmap *altmap, int migratetype, 887 bool isolate_pageblock) 888 { 889 unsigned long pfn, end_pfn = start_pfn + size; 890 struct page *page; 891 892 if (highest_memmap_pfn < end_pfn - 1) 893 highest_memmap_pfn = end_pfn - 1; 894 895 #ifdef CONFIG_ZONE_DEVICE 896 /* 897 * Honor reservation requested by the driver for this ZONE_DEVICE 898 * memory. We limit the total number of pages to initialize to just 899 * those that might contain the memory mapping. We will defer the 900 * ZONE_DEVICE page initialization until after we have released 901 * the hotplug lock. 902 */ 903 if (zone == ZONE_DEVICE) { 904 if (!altmap) 905 return; 906 907 if (start_pfn == altmap->base_pfn) 908 start_pfn += altmap->reserve; 909 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 910 } 911 #endif 912 913 for (pfn = start_pfn; pfn < end_pfn; ) { 914 /* 915 * There can be holes in boot-time mem_map[]s handed to this 916 * function. They do not exist on hotplugged memory. 917 */ 918 if (context == MEMINIT_EARLY) { 919 if (overlap_memmap_init(zone, &pfn)) 920 continue; 921 if (defer_init(nid, pfn, zone_end_pfn)) { 922 deferred_struct_pages = true; 923 break; 924 } 925 } 926 927 page = pfn_to_page(pfn); 928 __init_single_page(page, pfn, zone, nid); 929 if (context == MEMINIT_HOTPLUG) { 930 #ifdef CONFIG_ZONE_DEVICE 931 if (zone == ZONE_DEVICE) 932 __SetPageReserved(page); 933 else 934 #endif 935 __SetPageOffline(page); 936 } 937 938 /* 939 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 940 * such that unmovable allocations won't be scattered all 941 * over the place during system boot. 942 */ 943 if (pageblock_aligned(pfn)) { 944 init_pageblock_migratetype(page, migratetype, 945 isolate_pageblock); 946 cond_resched(); 947 } 948 pfn++; 949 } 950 } 951 952 static void __init memmap_init_zone_range(struct zone *zone, 953 unsigned long start_pfn, 954 unsigned long end_pfn, 955 unsigned long *hole_pfn) 956 { 957 unsigned long zone_start_pfn = zone->zone_start_pfn; 958 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 959 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 960 961 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 962 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 963 964 if (start_pfn >= end_pfn) 965 return; 966 967 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 968 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE, 969 false); 970 971 if (*hole_pfn < start_pfn) 972 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 973 974 *hole_pfn = end_pfn; 975 } 976 977 static void __init memmap_init(void) 978 { 979 unsigned long start_pfn, end_pfn; 980 unsigned long hole_pfn = 0; 981 int i, j, zone_id = 0, nid; 982 983 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 984 struct pglist_data *node = NODE_DATA(nid); 985 986 for (j = 0; j < MAX_NR_ZONES; j++) { 987 struct zone *zone = node->node_zones + j; 988 989 if (!populated_zone(zone)) 990 continue; 991 992 memmap_init_zone_range(zone, start_pfn, end_pfn, 993 &hole_pfn); 994 zone_id = j; 995 } 996 } 997 998 /* 999 * Initialize the memory map for hole in the range [memory_end, 1000 * section_end] for SPARSEMEM and in the range [memory_end, memmap_end] 1001 * for FLATMEM. 1002 * Append the pages in this hole to the highest zone in the last 1003 * node. 1004 */ 1005 #ifdef CONFIG_SPARSEMEM 1006 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 1007 #else 1008 end_pfn = round_up(end_pfn, MAX_ORDER_NR_PAGES); 1009 #endif 1010 if (hole_pfn < end_pfn) 1011 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 1012 } 1013 1014 #ifdef CONFIG_ZONE_DEVICE 1015 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 1016 unsigned long zone_idx, int nid, 1017 struct dev_pagemap *pgmap) 1018 { 1019 1020 __init_single_page(page, pfn, zone_idx, nid); 1021 1022 /* 1023 * Mark page reserved as it will need to wait for onlining 1024 * phase for it to be fully associated with a zone. 1025 * 1026 * We can use the non-atomic __set_bit operation for setting 1027 * the flag as we are still initializing the pages. 1028 */ 1029 __SetPageReserved(page); 1030 1031 /* 1032 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 1033 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 1034 * ever freed or placed on a driver-private list. 1035 */ 1036 page_folio(page)->pgmap = pgmap; 1037 page->zone_device_data = NULL; 1038 1039 /* 1040 * Mark the block movable so that blocks are reserved for 1041 * movable at startup. This will force kernel allocations 1042 * to reserve their blocks rather than leaking throughout 1043 * the address space during boot when many long-lived 1044 * kernel allocations are made. 1045 * 1046 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 1047 * because this is done early in section_activate() 1048 */ 1049 if (pageblock_aligned(pfn)) { 1050 init_pageblock_migratetype(page, MIGRATE_MOVABLE, false); 1051 cond_resched(); 1052 } 1053 1054 /* 1055 * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released 1056 * directly to the driver page allocator which will set the page count 1057 * to 1 when allocating the page. 1058 * 1059 * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have 1060 * their refcount reset to one whenever they are freed (ie. after 1061 * their refcount drops to 0). 1062 */ 1063 switch (pgmap->type) { 1064 case MEMORY_DEVICE_FS_DAX: 1065 case MEMORY_DEVICE_PRIVATE: 1066 case MEMORY_DEVICE_COHERENT: 1067 case MEMORY_DEVICE_PCI_P2PDMA: 1068 set_page_count(page, 0); 1069 break; 1070 1071 case MEMORY_DEVICE_GENERIC: 1072 break; 1073 } 1074 } 1075 1076 /* 1077 * With compound page geometry and when struct pages are stored in ram most 1078 * tail pages are reused. Consequently, the amount of unique struct pages to 1079 * initialize is a lot smaller that the total amount of struct pages being 1080 * mapped. This is a paired / mild layering violation with explicit knowledge 1081 * of how the sparse_vmemmap internals handle compound pages in the lack 1082 * of an altmap. See vmemmap_populate_compound_pages(). 1083 */ 1084 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 1085 struct dev_pagemap *pgmap) 1086 { 1087 if (!vmemmap_can_optimize(altmap, pgmap)) 1088 return pgmap_vmemmap_nr(pgmap); 1089 1090 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page)); 1091 } 1092 1093 static void __ref memmap_init_compound(struct page *head, 1094 unsigned long head_pfn, 1095 unsigned long zone_idx, int nid, 1096 struct dev_pagemap *pgmap, 1097 unsigned long nr_pages) 1098 { 1099 unsigned long pfn, end_pfn = head_pfn + nr_pages; 1100 unsigned int order = pgmap->vmemmap_shift; 1101 1102 /* 1103 * We have to initialize the pages, including setting up page links. 1104 * prep_compound_page() does not take care of that, so instead we 1105 * open-code prep_compound_page() so we can take care of initializing 1106 * the pages in the same go. 1107 */ 1108 __SetPageHead(head); 1109 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 1110 struct page *page = pfn_to_page(pfn); 1111 1112 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1113 prep_compound_tail(page, head, order); 1114 set_page_count(page, 0); 1115 } 1116 prep_compound_head(head, order); 1117 } 1118 1119 void __ref memmap_init_zone_device(struct zone *zone, 1120 unsigned long start_pfn, 1121 unsigned long nr_pages, 1122 struct dev_pagemap *pgmap) 1123 { 1124 unsigned long pfn, end_pfn = start_pfn + nr_pages; 1125 struct pglist_data *pgdat = zone->zone_pgdat; 1126 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 1127 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 1128 unsigned long zone_idx = zone_idx(zone); 1129 unsigned long start = jiffies; 1130 int nid = pgdat->node_id; 1131 1132 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 1133 return; 1134 1135 /* 1136 * The call to memmap_init should have already taken care 1137 * of the pages reserved for the memmap, so we can just jump to 1138 * the end of that region and start processing the device pages. 1139 */ 1140 if (altmap) { 1141 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 1142 nr_pages = end_pfn - start_pfn; 1143 } 1144 1145 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 1146 struct page *page = pfn_to_page(pfn); 1147 1148 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1149 1150 if (pfns_per_compound == 1) 1151 continue; 1152 1153 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 1154 compound_nr_pages(altmap, pgmap)); 1155 } 1156 1157 pr_debug("%s initialised %lu pages in %ums\n", __func__, 1158 nr_pages, jiffies_to_msecs(jiffies - start)); 1159 } 1160 #endif 1161 1162 /* 1163 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 1164 * because it is sized independent of architecture. Unlike the other zones, 1165 * the starting point for ZONE_MOVABLE is not fixed. It may be different 1166 * in each node depending on the size of each node and how evenly kernelcore 1167 * is distributed. This helper function adjusts the zone ranges 1168 * provided by the architecture for a given node by using the end of the 1169 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 1170 * zones within a node are in order of monotonic increases memory addresses 1171 */ 1172 static void __init adjust_zone_range_for_zone_movable(int nid, 1173 unsigned long zone_type, 1174 unsigned long node_end_pfn, 1175 unsigned long *zone_start_pfn, 1176 unsigned long *zone_end_pfn) 1177 { 1178 /* Only adjust if ZONE_MOVABLE is on this node */ 1179 if (zone_movable_pfn[nid]) { 1180 /* Size ZONE_MOVABLE */ 1181 if (zone_type == ZONE_MOVABLE) { 1182 *zone_start_pfn = zone_movable_pfn[nid]; 1183 *zone_end_pfn = min(node_end_pfn, 1184 arch_zone_highest_possible_pfn[movable_zone]); 1185 1186 /* Adjust for ZONE_MOVABLE starting within this range */ 1187 } else if (!mirrored_kernelcore && 1188 *zone_start_pfn < zone_movable_pfn[nid] && 1189 *zone_end_pfn > zone_movable_pfn[nid]) { 1190 *zone_end_pfn = zone_movable_pfn[nid]; 1191 1192 /* Check if this whole range is within ZONE_MOVABLE */ 1193 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 1194 *zone_start_pfn = *zone_end_pfn; 1195 } 1196 } 1197 1198 /* 1199 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 1200 * then all holes in the requested range will be accounted for. 1201 */ 1202 static unsigned long __init __absent_pages_in_range(int nid, 1203 unsigned long range_start_pfn, 1204 unsigned long range_end_pfn) 1205 { 1206 unsigned long nr_absent = range_end_pfn - range_start_pfn; 1207 unsigned long start_pfn, end_pfn; 1208 int i; 1209 1210 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 1211 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 1212 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 1213 nr_absent -= end_pfn - start_pfn; 1214 } 1215 return nr_absent; 1216 } 1217 1218 /** 1219 * absent_pages_in_range - Return number of page frames in holes within a range 1220 * @start_pfn: The start PFN to start searching for holes 1221 * @end_pfn: The end PFN to stop searching for holes 1222 * 1223 * Return: the number of pages frames in memory holes within a range. 1224 */ 1225 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 1226 unsigned long end_pfn) 1227 { 1228 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 1229 } 1230 1231 /* Return the number of page frames in holes in a zone on a node */ 1232 static unsigned long __init zone_absent_pages_in_node(int nid, 1233 unsigned long zone_type, 1234 unsigned long zone_start_pfn, 1235 unsigned long zone_end_pfn) 1236 { 1237 unsigned long nr_absent; 1238 1239 /* zone is empty, we don't have any absent pages */ 1240 if (zone_start_pfn == zone_end_pfn) 1241 return 0; 1242 1243 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 1244 1245 /* 1246 * ZONE_MOVABLE handling. 1247 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 1248 * and vice versa. 1249 */ 1250 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 1251 unsigned long start_pfn, end_pfn; 1252 struct memblock_region *r; 1253 1254 for_each_mem_region(r) { 1255 start_pfn = clamp(memblock_region_memory_base_pfn(r), 1256 zone_start_pfn, zone_end_pfn); 1257 end_pfn = clamp(memblock_region_memory_end_pfn(r), 1258 zone_start_pfn, zone_end_pfn); 1259 1260 if (zone_type == ZONE_MOVABLE && 1261 memblock_is_mirror(r)) 1262 nr_absent += end_pfn - start_pfn; 1263 1264 if (zone_type == ZONE_NORMAL && 1265 !memblock_is_mirror(r)) 1266 nr_absent += end_pfn - start_pfn; 1267 } 1268 } 1269 1270 return nr_absent; 1271 } 1272 1273 /* 1274 * Return the number of pages a zone spans in a node, including holes 1275 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1276 */ 1277 static unsigned long __init zone_spanned_pages_in_node(int nid, 1278 unsigned long zone_type, 1279 unsigned long node_start_pfn, 1280 unsigned long node_end_pfn, 1281 unsigned long *zone_start_pfn, 1282 unsigned long *zone_end_pfn) 1283 { 1284 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1285 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1286 1287 /* Get the start and end of the zone */ 1288 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1289 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1290 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn, 1291 zone_start_pfn, zone_end_pfn); 1292 1293 /* Check that this node has pages within the zone's required range */ 1294 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 1295 return 0; 1296 1297 /* Move the zone boundaries inside the node if necessary */ 1298 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 1299 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 1300 1301 /* Return the spanned pages */ 1302 return *zone_end_pfn - *zone_start_pfn; 1303 } 1304 1305 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) 1306 { 1307 struct zone *z; 1308 1309 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { 1310 z->zone_start_pfn = 0; 1311 z->spanned_pages = 0; 1312 z->present_pages = 0; 1313 #if defined(CONFIG_MEMORY_HOTPLUG) 1314 z->present_early_pages = 0; 1315 #endif 1316 } 1317 1318 pgdat->node_spanned_pages = 0; 1319 pgdat->node_present_pages = 0; 1320 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); 1321 } 1322 1323 static void __init calc_nr_kernel_pages(void) 1324 { 1325 unsigned long start_pfn, end_pfn; 1326 phys_addr_t start_addr, end_addr; 1327 u64 u; 1328 #ifdef CONFIG_HIGHMEM 1329 unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]; 1330 #endif 1331 1332 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) { 1333 start_pfn = PFN_UP(start_addr); 1334 end_pfn = PFN_DOWN(end_addr); 1335 1336 if (start_pfn < end_pfn) { 1337 nr_all_pages += end_pfn - start_pfn; 1338 #ifdef CONFIG_HIGHMEM 1339 start_pfn = clamp(start_pfn, 0, high_zone_low); 1340 end_pfn = clamp(end_pfn, 0, high_zone_low); 1341 #endif 1342 nr_kernel_pages += end_pfn - start_pfn; 1343 } 1344 } 1345 } 1346 1347 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 1348 unsigned long node_start_pfn, 1349 unsigned long node_end_pfn) 1350 { 1351 unsigned long realtotalpages = 0, totalpages = 0; 1352 enum zone_type i; 1353 1354 for (i = 0; i < MAX_NR_ZONES; i++) { 1355 struct zone *zone = pgdat->node_zones + i; 1356 unsigned long zone_start_pfn, zone_end_pfn; 1357 unsigned long spanned, absent; 1358 unsigned long real_size; 1359 1360 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 1361 node_start_pfn, 1362 node_end_pfn, 1363 &zone_start_pfn, 1364 &zone_end_pfn); 1365 absent = zone_absent_pages_in_node(pgdat->node_id, i, 1366 zone_start_pfn, 1367 zone_end_pfn); 1368 1369 real_size = spanned - absent; 1370 1371 if (spanned) 1372 zone->zone_start_pfn = zone_start_pfn; 1373 else 1374 zone->zone_start_pfn = 0; 1375 zone->spanned_pages = spanned; 1376 zone->present_pages = real_size; 1377 #if defined(CONFIG_MEMORY_HOTPLUG) 1378 zone->present_early_pages = real_size; 1379 #endif 1380 1381 totalpages += spanned; 1382 realtotalpages += real_size; 1383 } 1384 1385 pgdat->node_spanned_pages = totalpages; 1386 pgdat->node_present_pages = realtotalpages; 1387 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1388 } 1389 1390 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1391 static void pgdat_init_split_queue(struct pglist_data *pgdat) 1392 { 1393 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 1394 1395 spin_lock_init(&ds_queue->split_queue_lock); 1396 INIT_LIST_HEAD(&ds_queue->split_queue); 1397 ds_queue->split_queue_len = 0; 1398 } 1399 #else 1400 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 1401 #endif 1402 1403 #ifdef CONFIG_COMPACTION 1404 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 1405 { 1406 init_waitqueue_head(&pgdat->kcompactd_wait); 1407 } 1408 #else 1409 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 1410 #endif 1411 1412 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 1413 { 1414 int i; 1415 1416 pgdat_resize_init(pgdat); 1417 pgdat_kswapd_lock_init(pgdat); 1418 1419 pgdat_init_split_queue(pgdat); 1420 pgdat_init_kcompactd(pgdat); 1421 1422 init_waitqueue_head(&pgdat->kswapd_wait); 1423 init_waitqueue_head(&pgdat->pfmemalloc_wait); 1424 1425 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 1426 init_waitqueue_head(&pgdat->reclaim_wait[i]); 1427 1428 pgdat_page_ext_init(pgdat); 1429 lruvec_init(&pgdat->__lruvec); 1430 } 1431 1432 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 1433 unsigned long remaining_pages) 1434 { 1435 atomic_long_set(&zone->managed_pages, remaining_pages); 1436 zone_set_nid(zone, nid); 1437 zone->name = zone_names[idx]; 1438 zone->zone_pgdat = NODE_DATA(nid); 1439 spin_lock_init(&zone->lock); 1440 zone_seqlock_init(zone); 1441 zone_pcp_init(zone); 1442 } 1443 1444 static void __meminit zone_init_free_lists(struct zone *zone) 1445 { 1446 unsigned int order, t; 1447 for_each_migratetype_order(order, t) { 1448 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 1449 zone->free_area[order].nr_free = 0; 1450 } 1451 1452 #ifdef CONFIG_UNACCEPTED_MEMORY 1453 INIT_LIST_HEAD(&zone->unaccepted_pages); 1454 #endif 1455 } 1456 1457 void __meminit init_currently_empty_zone(struct zone *zone, 1458 unsigned long zone_start_pfn, 1459 unsigned long size) 1460 { 1461 struct pglist_data *pgdat = zone->zone_pgdat; 1462 int zone_idx = zone_idx(zone) + 1; 1463 1464 if (zone_idx > pgdat->nr_zones) 1465 pgdat->nr_zones = zone_idx; 1466 1467 zone->zone_start_pfn = zone_start_pfn; 1468 1469 mminit_dprintk(MMINIT_TRACE, "memmap_init", 1470 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 1471 pgdat->node_id, 1472 (unsigned long)zone_idx(zone), 1473 zone_start_pfn, (zone_start_pfn + size)); 1474 1475 zone_init_free_lists(zone); 1476 zone->initialized = 1; 1477 } 1478 1479 #ifndef CONFIG_SPARSEMEM 1480 /* 1481 * Calculate the size of the zone->pageblock_flags rounded to an unsigned long 1482 * Start by making sure zonesize is a multiple of pageblock_order by rounding 1483 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 1484 * round what is now in bits to nearest long in bits, then return it in 1485 * bytes. 1486 */ 1487 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 1488 { 1489 unsigned long usemapsize; 1490 1491 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 1492 usemapsize = round_up(zonesize, pageblock_nr_pages); 1493 usemapsize = usemapsize >> pageblock_order; 1494 usemapsize *= NR_PAGEBLOCK_BITS; 1495 usemapsize = round_up(usemapsize, BITS_PER_LONG); 1496 1497 return usemapsize / BITS_PER_BYTE; 1498 } 1499 1500 static void __ref setup_usemap(struct zone *zone) 1501 { 1502 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 1503 zone->spanned_pages); 1504 zone->pageblock_flags = NULL; 1505 if (usemapsize) { 1506 zone->pageblock_flags = 1507 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 1508 zone_to_nid(zone)); 1509 if (!zone->pageblock_flags) 1510 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 1511 usemapsize, zone->name, zone_to_nid(zone)); 1512 } 1513 } 1514 #else 1515 static inline void setup_usemap(struct zone *zone) {} 1516 #endif /* CONFIG_SPARSEMEM */ 1517 1518 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 1519 1520 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 1521 void __init set_pageblock_order(void) 1522 { 1523 unsigned int order = PAGE_BLOCK_MAX_ORDER; 1524 1525 /* Check that pageblock_nr_pages has not already been setup */ 1526 if (pageblock_order) 1527 return; 1528 1529 /* Don't let pageblocks exceed the maximum allocation granularity. */ 1530 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 1531 order = HUGETLB_PAGE_ORDER; 1532 1533 /* 1534 * Assume the largest contiguous order of interest is a huge page. 1535 * This value may be variable depending on boot parameters on powerpc. 1536 */ 1537 pageblock_order = order; 1538 } 1539 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1540 1541 /* 1542 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 1543 * is unused as pageblock_order is set at compile-time. See 1544 * include/linux/pageblock-flags.h for the values of pageblock_order based on 1545 * the kernel config 1546 */ 1547 void __init set_pageblock_order(void) 1548 { 1549 } 1550 1551 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1552 1553 /* 1554 * Set up the zone data structures 1555 * - init pgdat internals 1556 * - init all zones belonging to this node 1557 * 1558 * NOTE: this function is only called during memory hotplug 1559 */ 1560 #ifdef CONFIG_MEMORY_HOTPLUG 1561 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 1562 { 1563 int nid = pgdat->node_id; 1564 enum zone_type z; 1565 int cpu; 1566 1567 pgdat_init_internals(pgdat); 1568 1569 if (pgdat->per_cpu_nodestats == &boot_nodestats) 1570 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1571 1572 /* 1573 * Reset the nr_zones, order and highest_zoneidx before reuse. 1574 * Note that kswapd will init kswapd_highest_zoneidx properly 1575 * when it starts in the near future. 1576 */ 1577 pgdat->nr_zones = 0; 1578 pgdat->kswapd_order = 0; 1579 pgdat->kswapd_highest_zoneidx = 0; 1580 pgdat->node_start_pfn = 0; 1581 pgdat->node_present_pages = 0; 1582 1583 for_each_online_cpu(cpu) { 1584 struct per_cpu_nodestat *p; 1585 1586 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 1587 memset(p, 0, sizeof(*p)); 1588 } 1589 1590 /* 1591 * When memory is hot-added, all the memory is in offline state. So 1592 * clear all zones' present_pages and managed_pages because they will 1593 * be updated in online_pages() and offline_pages(). 1594 */ 1595 for (z = 0; z < MAX_NR_ZONES; z++) { 1596 struct zone *zone = pgdat->node_zones + z; 1597 1598 zone->present_pages = 0; 1599 zone_init_internals(zone, z, nid, 0); 1600 } 1601 } 1602 #endif 1603 1604 static void __init free_area_init_core(struct pglist_data *pgdat) 1605 { 1606 enum zone_type j; 1607 int nid = pgdat->node_id; 1608 1609 pgdat_init_internals(pgdat); 1610 pgdat->per_cpu_nodestats = &boot_nodestats; 1611 1612 for (j = 0; j < MAX_NR_ZONES; j++) { 1613 struct zone *zone = pgdat->node_zones + j; 1614 unsigned long size = zone->spanned_pages; 1615 1616 /* 1617 * Initialize zone->managed_pages as 0 , it will be reset 1618 * when memblock allocator frees pages into buddy system. 1619 */ 1620 zone_init_internals(zone, j, nid, zone->present_pages); 1621 1622 if (!size) 1623 continue; 1624 1625 setup_usemap(zone); 1626 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 1627 } 1628 } 1629 1630 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 1631 phys_addr_t min_addr, int nid, bool exact_nid) 1632 { 1633 void *ptr; 1634 1635 /* 1636 * Kmemleak will explicitly scan mem_map by traversing all valid 1637 * `struct *page`,so memblock does not need to be added to the scan list. 1638 */ 1639 if (exact_nid) 1640 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 1641 MEMBLOCK_ALLOC_NOLEAKTRACE, 1642 nid); 1643 else 1644 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 1645 MEMBLOCK_ALLOC_NOLEAKTRACE, 1646 nid); 1647 1648 if (ptr && size > 0) 1649 page_init_poison(ptr, size); 1650 1651 return ptr; 1652 } 1653 1654 #ifdef CONFIG_FLATMEM 1655 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 1656 { 1657 unsigned long start, offset, size, end; 1658 struct page *map; 1659 1660 /* Skip empty nodes */ 1661 if (!pgdat->node_spanned_pages) 1662 return; 1663 1664 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1665 offset = pgdat->node_start_pfn - start; 1666 /* 1667 * The zone's endpoints aren't required to be MAX_PAGE_ORDER 1668 * aligned but the node_mem_map endpoints must be in order 1669 * for the buddy allocator to function correctly. 1670 */ 1671 end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES); 1672 size = (end - start) * sizeof(struct page); 1673 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 1674 pgdat->node_id, false); 1675 if (!map) 1676 panic("Failed to allocate %ld bytes for node %d memory map\n", 1677 size, pgdat->node_id); 1678 pgdat->node_mem_map = map + offset; 1679 memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE)); 1680 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1681 __func__, pgdat->node_id, (unsigned long)pgdat, 1682 (unsigned long)pgdat->node_mem_map); 1683 1684 /* the global mem_map is just set as node 0's */ 1685 WARN_ON(pgdat != NODE_DATA(0)); 1686 1687 mem_map = pgdat->node_mem_map; 1688 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1689 mem_map -= offset; 1690 1691 max_mapnr = end - start; 1692 } 1693 #else 1694 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 1695 #endif /* CONFIG_FLATMEM */ 1696 1697 /** 1698 * get_pfn_range_for_nid - Return the start and end page frames for a node 1699 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 1700 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 1701 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 1702 * 1703 * It returns the start and end page frame of a node based on information 1704 * provided by memblock_set_node(). If called for a node 1705 * with no available memory, the start and end PFNs will be 0. 1706 */ 1707 void __init get_pfn_range_for_nid(unsigned int nid, 1708 unsigned long *start_pfn, unsigned long *end_pfn) 1709 { 1710 unsigned long this_start_pfn, this_end_pfn; 1711 int i; 1712 1713 *start_pfn = -1UL; 1714 *end_pfn = 0; 1715 1716 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 1717 *start_pfn = min(*start_pfn, this_start_pfn); 1718 *end_pfn = max(*end_pfn, this_end_pfn); 1719 } 1720 1721 if (*start_pfn == -1UL) 1722 *start_pfn = 0; 1723 } 1724 1725 static void __init free_area_init_node(int nid) 1726 { 1727 pg_data_t *pgdat = NODE_DATA(nid); 1728 unsigned long start_pfn = 0; 1729 unsigned long end_pfn = 0; 1730 1731 /* pg_data_t should be reset to zero when it's allocated */ 1732 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 1733 1734 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1735 1736 pgdat->node_id = nid; 1737 pgdat->node_start_pfn = start_pfn; 1738 pgdat->per_cpu_nodestats = NULL; 1739 1740 if (start_pfn != end_pfn) { 1741 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 1742 (u64)start_pfn << PAGE_SHIFT, 1743 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 1744 1745 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 1746 } else { 1747 pr_info("Initmem setup node %d as memoryless\n", nid); 1748 1749 reset_memoryless_node_totalpages(pgdat); 1750 } 1751 1752 alloc_node_mem_map(pgdat); 1753 pgdat_set_deferred_range(pgdat); 1754 1755 free_area_init_core(pgdat); 1756 lru_gen_init_pgdat(pgdat); 1757 } 1758 1759 /* Any regular or high memory on that node? */ 1760 static void __init check_for_memory(pg_data_t *pgdat) 1761 { 1762 enum zone_type zone_type; 1763 1764 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 1765 struct zone *zone = &pgdat->node_zones[zone_type]; 1766 if (populated_zone(zone)) { 1767 if (IS_ENABLED(CONFIG_HIGHMEM)) 1768 node_set_state(pgdat->node_id, N_HIGH_MEMORY); 1769 if (zone_type <= ZONE_NORMAL) 1770 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); 1771 break; 1772 } 1773 } 1774 } 1775 1776 #if MAX_NUMNODES > 1 1777 /* 1778 * Figure out the number of possible node ids. 1779 */ 1780 void __init setup_nr_node_ids(void) 1781 { 1782 unsigned int highest; 1783 1784 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 1785 nr_node_ids = highest + 1; 1786 } 1787 #endif 1788 1789 /* 1790 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 1791 * such cases we allow max_zone_pfn sorted in the descending order 1792 */ 1793 static bool arch_has_descending_max_zone_pfns(void) 1794 { 1795 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1796 } 1797 1798 static void __init set_high_memory(void) 1799 { 1800 phys_addr_t highmem = memblock_end_of_DRAM(); 1801 1802 /* 1803 * Some architectures (e.g. ARM) set high_memory very early and 1804 * use it in arch setup code. 1805 * If an architecture already set high_memory don't overwrite it 1806 */ 1807 if (high_memory) 1808 return; 1809 1810 #ifdef CONFIG_HIGHMEM 1811 if (arch_has_descending_max_zone_pfns() || 1812 highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])) 1813 highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]); 1814 #endif 1815 1816 high_memory = phys_to_virt(highmem - 1) + 1; 1817 } 1818 1819 /** 1820 * free_area_init - Initialise all pg_data_t and zone data 1821 * 1822 * This will call free_area_init_node() for each active node in the system. 1823 * Using the page ranges provided by memblock_set_node(), the size of each 1824 * zone in each node and their holes is calculated. If the maximum PFN 1825 * between two adjacent zones match, it is assumed that the zone is empty. 1826 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 1827 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 1828 * starts where the previous one ended. For example, ZONE_DMA32 starts 1829 * at arch_max_dma_pfn. 1830 */ 1831 static void __init free_area_init(void) 1832 { 1833 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; 1834 unsigned long start_pfn, end_pfn; 1835 int i, nid, zone; 1836 bool descending; 1837 1838 arch_zone_limits_init(max_zone_pfn); 1839 sparse_init(); 1840 1841 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 1842 descending = arch_has_descending_max_zone_pfns(); 1843 1844 for (i = 0; i < MAX_NR_ZONES; i++) { 1845 if (descending) 1846 zone = MAX_NR_ZONES - i - 1; 1847 else 1848 zone = i; 1849 1850 if (zone == ZONE_MOVABLE) 1851 continue; 1852 1853 end_pfn = max(max_zone_pfn[zone], start_pfn); 1854 arch_zone_lowest_possible_pfn[zone] = start_pfn; 1855 arch_zone_highest_possible_pfn[zone] = end_pfn; 1856 1857 start_pfn = end_pfn; 1858 } 1859 1860 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 1861 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 1862 find_zone_movable_pfns_for_nodes(); 1863 1864 /* Print out the zone ranges */ 1865 pr_info("Zone ranges:\n"); 1866 for (i = 0; i < MAX_NR_ZONES; i++) { 1867 if (i == ZONE_MOVABLE) 1868 continue; 1869 pr_info(" %-8s ", zone_names[i]); 1870 if (arch_zone_lowest_possible_pfn[i] == 1871 arch_zone_highest_possible_pfn[i]) 1872 pr_cont("empty\n"); 1873 else 1874 pr_cont("[mem %#018Lx-%#018Lx]\n", 1875 (u64)arch_zone_lowest_possible_pfn[i] 1876 << PAGE_SHIFT, 1877 ((u64)arch_zone_highest_possible_pfn[i] 1878 << PAGE_SHIFT) - 1); 1879 } 1880 1881 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 1882 pr_info("Movable zone start for each node\n"); 1883 for (i = 0; i < MAX_NUMNODES; i++) { 1884 if (zone_movable_pfn[i]) 1885 pr_info(" Node %d: %#018Lx\n", i, 1886 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 1887 } 1888 1889 /* 1890 * Print out the early node map, and initialize the 1891 * subsection-map relative to active online memory ranges to 1892 * enable future "sub-section" extensions of the memory map. 1893 */ 1894 pr_info("Early memory node ranges\n"); 1895 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1896 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 1897 (u64)start_pfn << PAGE_SHIFT, 1898 ((u64)end_pfn << PAGE_SHIFT) - 1); 1899 sparse_init_subsection_map(start_pfn, end_pfn - start_pfn); 1900 } 1901 1902 /* Initialise every node */ 1903 mminit_verify_pageflags_layout(); 1904 setup_nr_node_ids(); 1905 set_pageblock_order(); 1906 1907 for_each_node(nid) { 1908 pg_data_t *pgdat; 1909 1910 /* 1911 * If an architecture has not allocated node data for 1912 * this node, presume the node is memoryless or offline. 1913 */ 1914 if (!NODE_DATA(nid)) 1915 alloc_offline_node_data(nid); 1916 1917 pgdat = NODE_DATA(nid); 1918 free_area_init_node(nid); 1919 1920 /* 1921 * No sysfs hierarchy will be created via register_node() 1922 *for memory-less node because here it's not marked as N_MEMORY 1923 *and won't be set online later. The benefit is userspace 1924 *program won't be confused by sysfs files/directories of 1925 *memory-less node. The pgdat will get fully initialized by 1926 *hotadd_init_pgdat() when memory is hotplugged into this node. 1927 */ 1928 if (pgdat->node_present_pages) { 1929 node_set_state(nid, N_MEMORY); 1930 check_for_memory(pgdat); 1931 } 1932 } 1933 1934 for_each_node_state(nid, N_MEMORY) 1935 sparse_vmemmap_init_nid_late(nid); 1936 1937 calc_nr_kernel_pages(); 1938 memmap_init(); 1939 1940 /* disable hash distribution for systems with a single node */ 1941 fixup_hashdist(); 1942 1943 set_high_memory(); 1944 } 1945 1946 /** 1947 * node_map_pfn_alignment - determine the maximum internode alignment 1948 * 1949 * This function should be called after node map is populated and sorted. 1950 * It calculates the maximum power of two alignment which can distinguish 1951 * all the nodes. 1952 * 1953 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 1954 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 1955 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 1956 * shifted, 1GiB is enough and this function will indicate so. 1957 * 1958 * This is used to test whether pfn -> nid mapping of the chosen memory 1959 * model has fine enough granularity to avoid incorrect mapping for the 1960 * populated node map. 1961 * 1962 * Return: the determined alignment in pfn's. 0 if there is no alignment 1963 * requirement (single node). 1964 */ 1965 unsigned long __init node_map_pfn_alignment(void) 1966 { 1967 unsigned long accl_mask = 0, last_end = 0; 1968 unsigned long start, end, mask; 1969 int last_nid = NUMA_NO_NODE; 1970 int i, nid; 1971 1972 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 1973 if (!start || last_nid < 0 || last_nid == nid) { 1974 last_nid = nid; 1975 last_end = end; 1976 continue; 1977 } 1978 1979 /* 1980 * Start with a mask granular enough to pin-point to the 1981 * start pfn and tick off bits one-by-one until it becomes 1982 * too coarse to separate the current node from the last. 1983 */ 1984 mask = ~((1 << __ffs(start)) - 1); 1985 while (mask && last_end <= (start & (mask << 1))) 1986 mask <<= 1; 1987 1988 /* accumulate all internode masks */ 1989 accl_mask |= mask; 1990 } 1991 1992 /* convert mask to number of pages */ 1993 return ~accl_mask + 1; 1994 } 1995 1996 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1997 static void __init deferred_free_pages(unsigned long pfn, 1998 unsigned long nr_pages) 1999 { 2000 struct page *page; 2001 unsigned long i; 2002 2003 if (!nr_pages) 2004 return; 2005 2006 page = pfn_to_page(pfn); 2007 2008 /* Free a large naturally-aligned chunk if possible */ 2009 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { 2010 for (i = 0; i < nr_pages; i += pageblock_nr_pages) 2011 init_pageblock_migratetype(page + i, MIGRATE_MOVABLE, 2012 false); 2013 __free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY); 2014 return; 2015 } 2016 2017 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */ 2018 accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE); 2019 2020 for (i = 0; i < nr_pages; i++, page++, pfn++) { 2021 if (pageblock_aligned(pfn)) 2022 init_pageblock_migratetype(page, MIGRATE_MOVABLE, 2023 false); 2024 __free_pages_core(page, 0, MEMINIT_EARLY); 2025 } 2026 } 2027 2028 /* Completion tracking for deferred_init_memmap() threads */ 2029 static atomic_t pgdat_init_n_undone __initdata; 2030 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 2031 2032 static inline void __init pgdat_init_report_one_done(void) 2033 { 2034 if (atomic_dec_and_test(&pgdat_init_n_undone)) 2035 complete(&pgdat_init_all_done_comp); 2036 } 2037 2038 /* 2039 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 2040 * by performing it only once every MAX_ORDER_NR_PAGES. 2041 * Return number of pages initialized. 2042 */ 2043 static unsigned long __init deferred_init_pages(struct zone *zone, 2044 unsigned long pfn, unsigned long end_pfn) 2045 { 2046 int nid = zone_to_nid(zone); 2047 unsigned long nr_pages = end_pfn - pfn; 2048 int zid = zone_idx(zone); 2049 struct page *page = pfn_to_page(pfn); 2050 2051 for (; pfn < end_pfn; pfn++, page++) 2052 __init_single_page(page, pfn, zid, nid); 2053 return nr_pages; 2054 } 2055 2056 /* 2057 * Initialize and free pages. 2058 * 2059 * At this point reserved pages and struct pages that correspond to holes in 2060 * memblock.memory are already initialized so every free range has a valid 2061 * memory map around it. 2062 * This ensures that access of pages that are ahead of the range being 2063 * initialized (computing buddy page in __free_one_page()) always reads a valid 2064 * struct page. 2065 * 2066 * In order to try and improve CPU cache locality we have the loop broken along 2067 * max page order boundaries. 2068 */ 2069 static unsigned long __init 2070 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2071 struct zone *zone, bool can_resched) 2072 { 2073 int nid = zone_to_nid(zone); 2074 unsigned long nr_pages = 0; 2075 phys_addr_t start, end; 2076 u64 i = 0; 2077 2078 for_each_free_mem_range(i, nid, 0, &start, &end, NULL) { 2079 unsigned long spfn = PFN_UP(start); 2080 unsigned long epfn = PFN_DOWN(end); 2081 2082 if (spfn >= end_pfn) 2083 break; 2084 2085 spfn = max(spfn, start_pfn); 2086 epfn = min(epfn, end_pfn); 2087 2088 while (spfn < epfn) { 2089 unsigned long mo_pfn = ALIGN(spfn + 1, MAX_ORDER_NR_PAGES); 2090 unsigned long chunk_end = min(mo_pfn, epfn); 2091 2092 nr_pages += deferred_init_pages(zone, spfn, chunk_end); 2093 deferred_free_pages(spfn, chunk_end - spfn); 2094 2095 spfn = chunk_end; 2096 2097 if (can_resched) 2098 cond_resched(); 2099 else 2100 touch_nmi_watchdog(); 2101 } 2102 } 2103 2104 return nr_pages; 2105 } 2106 2107 static void __init 2108 deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn, 2109 void *arg) 2110 { 2111 struct zone *zone = arg; 2112 2113 deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true); 2114 } 2115 2116 static unsigned int __init 2117 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2118 { 2119 return max(cpumask_weight(node_cpumask), 1U); 2120 } 2121 2122 /* Initialise remaining memory on a node */ 2123 static int __init deferred_init_memmap(void *data) 2124 { 2125 pg_data_t *pgdat = data; 2126 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2127 int max_threads = deferred_page_init_max_threads(cpumask); 2128 unsigned long first_init_pfn, last_pfn, flags; 2129 unsigned long start = jiffies; 2130 struct zone *zone; 2131 2132 /* Bind memory initialisation thread to a local node if possible */ 2133 if (!cpumask_empty(cpumask)) 2134 set_cpus_allowed_ptr(current, cpumask); 2135 2136 pgdat_resize_lock(pgdat, &flags); 2137 first_init_pfn = pgdat->first_deferred_pfn; 2138 if (first_init_pfn == ULONG_MAX) { 2139 pgdat_resize_unlock(pgdat, &flags); 2140 pgdat_init_report_one_done(); 2141 return 0; 2142 } 2143 2144 /* Sanity check boundaries */ 2145 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2146 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2147 pgdat->first_deferred_pfn = ULONG_MAX; 2148 2149 /* 2150 * Once we unlock here, the zone cannot be grown anymore, thus if an 2151 * interrupt thread must allocate this early in boot, zone must be 2152 * pre-grown prior to start of deferred page initialization. 2153 */ 2154 pgdat_resize_unlock(pgdat, &flags); 2155 2156 /* Only the highest zone is deferred */ 2157 zone = pgdat->node_zones + pgdat->nr_zones - 1; 2158 last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone)); 2159 2160 struct padata_mt_job job = { 2161 .thread_fn = deferred_init_memmap_job, 2162 .fn_arg = zone, 2163 .start = first_init_pfn, 2164 .size = last_pfn - first_init_pfn, 2165 .align = PAGES_PER_SECTION, 2166 .min_chunk = PAGES_PER_SECTION, 2167 .max_threads = max_threads, 2168 .numa_aware = false, 2169 }; 2170 2171 padata_do_multithreaded(&job); 2172 2173 /* Sanity check that the next zone really is unpopulated */ 2174 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); 2175 2176 pr_info("node %d deferred pages initialised in %ums\n", 2177 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2178 2179 pgdat_init_report_one_done(); 2180 return 0; 2181 } 2182 2183 /* 2184 * If this zone has deferred pages, try to grow it by initializing enough 2185 * deferred pages to satisfy the allocation specified by order, rounded up to 2186 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2187 * of SECTION_SIZE bytes by initializing struct pages in increments of 2188 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2189 * 2190 * Return true when zone was grown, otherwise return false. We return true even 2191 * when we grow less than requested, to let the caller decide if there are 2192 * enough pages to satisfy the allocation. 2193 */ 2194 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) 2195 { 2196 unsigned long nr_pages_needed = SECTION_ALIGN_UP(1 << order); 2197 pg_data_t *pgdat = zone->zone_pgdat; 2198 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2199 unsigned long spfn, epfn, flags; 2200 unsigned long nr_pages = 0; 2201 2202 /* Only the last zone may have deferred pages */ 2203 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2204 return false; 2205 2206 pgdat_resize_lock(pgdat, &flags); 2207 2208 /* 2209 * If someone grew this zone while we were waiting for spinlock, return 2210 * true, as there might be enough pages already. 2211 */ 2212 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2213 pgdat_resize_unlock(pgdat, &flags); 2214 return true; 2215 } 2216 2217 /* 2218 * Initialize at least nr_pages_needed in section chunks. 2219 * If a section has less free memory than nr_pages_needed, the next 2220 * section will be also initialized. 2221 * Note, that it still does not guarantee that allocation of order can 2222 * be satisfied if the sections are fragmented because of memblock 2223 * allocations. 2224 */ 2225 for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1); 2226 nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone); 2227 spfn = epfn, epfn += PAGES_PER_SECTION) { 2228 nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false); 2229 } 2230 2231 /* 2232 * There were no pages to initialize and free which means the zone's 2233 * memory map is completely initialized. 2234 */ 2235 pgdat->first_deferred_pfn = nr_pages ? spfn : ULONG_MAX; 2236 2237 pgdat_resize_unlock(pgdat, &flags); 2238 2239 return nr_pages > 0; 2240 } 2241 2242 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2243 2244 #ifdef CONFIG_CMA 2245 void __init init_cma_reserved_pageblock(struct page *page) 2246 { 2247 unsigned i = pageblock_nr_pages; 2248 struct page *p = page; 2249 2250 do { 2251 __ClearPageReserved(p); 2252 set_page_count(p, 0); 2253 } while (++p, --i); 2254 2255 init_pageblock_migratetype(page, MIGRATE_CMA, false); 2256 set_page_refcounted(page); 2257 /* pages were reserved and not allocated */ 2258 clear_page_tag_ref(page); 2259 __free_pages(page, pageblock_order); 2260 2261 adjust_managed_page_count(page, pageblock_nr_pages); 2262 page_zone(page)->cma_pages += pageblock_nr_pages; 2263 } 2264 /* 2265 * Similar to above, but only set the migrate type and stats. 2266 */ 2267 void __init init_cma_pageblock(struct page *page) 2268 { 2269 init_pageblock_migratetype(page, MIGRATE_CMA, false); 2270 adjust_managed_page_count(page, pageblock_nr_pages); 2271 page_zone(page)->cma_pages += pageblock_nr_pages; 2272 } 2273 #endif 2274 2275 void set_zone_contiguous(struct zone *zone) 2276 { 2277 unsigned long block_start_pfn = zone->zone_start_pfn; 2278 unsigned long block_end_pfn; 2279 2280 block_end_pfn = pageblock_end_pfn(block_start_pfn); 2281 for (; block_start_pfn < zone_end_pfn(zone); 2282 block_start_pfn = block_end_pfn, 2283 block_end_pfn += pageblock_nr_pages) { 2284 2285 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 2286 2287 if (!__pageblock_pfn_to_page(block_start_pfn, 2288 block_end_pfn, zone)) 2289 return; 2290 cond_resched(); 2291 } 2292 2293 /* We confirm that there is no hole */ 2294 zone->contiguous = true; 2295 } 2296 2297 /* 2298 * Check if a PFN range intersects multiple zones on one or more 2299 * NUMA nodes. Specify the @nid argument if it is known that this 2300 * PFN range is on one node, NUMA_NO_NODE otherwise. 2301 */ 2302 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, 2303 unsigned long nr_pages) 2304 { 2305 struct zone *zone, *izone = NULL; 2306 2307 for_each_zone(zone) { 2308 if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid) 2309 continue; 2310 2311 if (zone_intersects(zone, start_pfn, nr_pages)) { 2312 if (izone != NULL) 2313 return true; 2314 izone = zone; 2315 } 2316 2317 } 2318 2319 return false; 2320 } 2321 2322 static void __init mem_init_print_info(void); 2323 void __init page_alloc_init_late(void) 2324 { 2325 struct zone *zone; 2326 int nid; 2327 2328 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2329 2330 /* There will be num_node_state(N_MEMORY) threads */ 2331 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2332 for_each_node_state(nid, N_MEMORY) { 2333 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2334 } 2335 2336 /* Block until all are initialised */ 2337 wait_for_completion(&pgdat_init_all_done_comp); 2338 2339 /* 2340 * We initialized the rest of the deferred pages. Permanently disable 2341 * on-demand struct page initialization. 2342 */ 2343 static_branch_disable(&deferred_pages); 2344 2345 /* Reinit limits that are based on free pages after the kernel is up */ 2346 files_maxfiles_init(); 2347 #endif 2348 2349 /* Accounting of total+free memory is stable at this point. */ 2350 mem_init_print_info(); 2351 buffer_init(); 2352 2353 /* Discard memblock private memory */ 2354 memblock_discard(); 2355 2356 for_each_node_state(nid, N_MEMORY) 2357 shuffle_free_memory(NODE_DATA(nid)); 2358 2359 for_each_populated_zone(zone) 2360 set_zone_contiguous(zone); 2361 2362 /* Initialize page ext after all struct pages are initialized. */ 2363 if (deferred_struct_pages) 2364 page_ext_init(); 2365 2366 page_alloc_sysctl_init(); 2367 } 2368 2369 /* 2370 * Adaptive scale is meant to reduce sizes of hash tables on large memory 2371 * machines. As memory size is increased the scale is also increased but at 2372 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 2373 * quadruples the scale is increased by one, which means the size of hash table 2374 * only doubles, instead of quadrupling as well. 2375 * Because 32-bit systems cannot have large physical memory, where this scaling 2376 * makes sense, it is disabled on such platforms. 2377 */ 2378 #if __BITS_PER_LONG > 32 2379 #define ADAPT_SCALE_BASE (64ul << 30) 2380 #define ADAPT_SCALE_SHIFT 2 2381 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 2382 #endif 2383 2384 /* 2385 * allocate a large system hash table from bootmem 2386 * - it is assumed that the hash table must contain an exact power-of-2 2387 * quantity of entries 2388 * - limit is the number of hash buckets, not the total allocation size 2389 */ 2390 void *__init alloc_large_system_hash(const char *tablename, 2391 unsigned long bucketsize, 2392 unsigned long numentries, 2393 int scale, 2394 int flags, 2395 unsigned int *_hash_shift, 2396 unsigned int *_hash_mask, 2397 unsigned long low_limit, 2398 unsigned long high_limit) 2399 { 2400 unsigned long long max = high_limit; 2401 unsigned long log2qty, size; 2402 void *table; 2403 gfp_t gfp_flags; 2404 bool virt; 2405 bool huge; 2406 2407 /* allow the kernel cmdline to have a say */ 2408 if (!numentries) { 2409 /* round applicable memory size up to nearest megabyte */ 2410 numentries = nr_kernel_pages; 2411 2412 /* It isn't necessary when PAGE_SIZE >= 1MB */ 2413 if (PAGE_SIZE < SZ_1M) 2414 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 2415 2416 #if __BITS_PER_LONG > 32 2417 if (!high_limit) { 2418 unsigned long adapt; 2419 2420 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 2421 adapt <<= ADAPT_SCALE_SHIFT) 2422 scale++; 2423 } 2424 #endif 2425 2426 /* limit to 1 bucket per 2^scale bytes of low memory */ 2427 if (scale > PAGE_SHIFT) 2428 numentries >>= (scale - PAGE_SHIFT); 2429 else 2430 numentries <<= (PAGE_SHIFT - scale); 2431 2432 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 2433 numentries = PAGE_SIZE / bucketsize; 2434 } 2435 numentries = roundup_pow_of_two(numentries); 2436 2437 /* limit allocation size to 1/16 total memory by default */ 2438 if (max == 0) { 2439 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2440 do_div(max, bucketsize); 2441 } 2442 max = min(max, 0x80000000ULL); 2443 2444 if (numentries < low_limit) 2445 numentries = low_limit; 2446 if (numentries > max) 2447 numentries = max; 2448 2449 log2qty = ilog2(numentries); 2450 2451 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 2452 do { 2453 virt = false; 2454 size = bucketsize << log2qty; 2455 if (flags & HASH_EARLY) { 2456 if (flags & HASH_ZERO) 2457 table = memblock_alloc(size, SMP_CACHE_BYTES); 2458 else 2459 table = memblock_alloc_raw(size, 2460 SMP_CACHE_BYTES); 2461 } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) { 2462 table = vmalloc_huge(size, gfp_flags); 2463 virt = true; 2464 if (table) 2465 huge = is_vm_area_hugepages(table); 2466 } else { 2467 /* 2468 * If bucketsize is not a power-of-two, we may free 2469 * some pages at the end of hash table which 2470 * alloc_pages_exact() automatically does 2471 */ 2472 table = alloc_pages_exact(size, gfp_flags); 2473 kmemleak_alloc(table, size, 1, gfp_flags); 2474 } 2475 } while (!table && size > PAGE_SIZE && --log2qty); 2476 2477 if (!table) 2478 panic("Failed to allocate %s hash table\n", tablename); 2479 2480 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 2481 tablename, 1UL << log2qty, get_order(size), size, 2482 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 2483 2484 if (_hash_shift) 2485 *_hash_shift = log2qty; 2486 if (_hash_mask) 2487 *_hash_mask = (1 << log2qty) - 1; 2488 2489 return table; 2490 } 2491 2492 void __init memblock_free_pages(unsigned long pfn, unsigned int order) 2493 { 2494 struct page *page = pfn_to_page(pfn); 2495 2496 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { 2497 int nid = early_pfn_to_nid(pfn); 2498 2499 if (!early_page_initialised(pfn, nid)) 2500 return; 2501 } 2502 2503 if (!kmsan_memblock_free_pages(page, order)) { 2504 /* KMSAN will take care of these pages. */ 2505 return; 2506 } 2507 2508 /* pages were reserved and not allocated */ 2509 clear_page_tag_ref(page); 2510 __free_pages_core(page, order, MEMINIT_EARLY); 2511 } 2512 2513 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2514 EXPORT_SYMBOL(init_on_alloc); 2515 2516 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2517 EXPORT_SYMBOL(init_on_free); 2518 2519 static bool _init_on_alloc_enabled_early __read_mostly 2520 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2521 static int __init early_init_on_alloc(char *buf) 2522 { 2523 2524 return kstrtobool(buf, &_init_on_alloc_enabled_early); 2525 } 2526 early_param("init_on_alloc", early_init_on_alloc); 2527 2528 static bool _init_on_free_enabled_early __read_mostly 2529 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 2530 static int __init early_init_on_free(char *buf) 2531 { 2532 return kstrtobool(buf, &_init_on_free_enabled_early); 2533 } 2534 early_param("init_on_free", early_init_on_free); 2535 2536 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2537 2538 static bool check_pages_enabled_early __initdata; 2539 2540 static int __init early_check_pages(char *buf) 2541 { 2542 return kstrtobool(buf, &check_pages_enabled_early); 2543 } 2544 early_param("check_pages", early_check_pages); 2545 2546 /* 2547 * Enable static keys related to various memory debugging and hardening options. 2548 * Some override others, and depend on early params that are evaluated in the 2549 * order of appearance. So we need to first gather the full picture of what was 2550 * enabled, and then make decisions. 2551 */ 2552 static void __init mem_debugging_and_hardening_init(void) 2553 { 2554 bool page_poisoning_requested = false; 2555 bool want_check_pages = check_pages_enabled_early; 2556 2557 #ifdef CONFIG_PAGE_POISONING 2558 /* 2559 * Page poisoning is debug page alloc for some arches. If 2560 * either of those options are enabled, enable poisoning. 2561 */ 2562 if (page_poisoning_enabled() || 2563 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 2564 debug_pagealloc_enabled())) { 2565 static_branch_enable(&_page_poisoning_enabled); 2566 page_poisoning_requested = true; 2567 want_check_pages = true; 2568 } 2569 #endif 2570 2571 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2572 page_poisoning_requested) { 2573 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2574 "will take precedence over init_on_alloc and init_on_free\n"); 2575 _init_on_alloc_enabled_early = false; 2576 _init_on_free_enabled_early = false; 2577 } 2578 2579 if (_init_on_alloc_enabled_early) { 2580 want_check_pages = true; 2581 static_branch_enable(&init_on_alloc); 2582 } else { 2583 static_branch_disable(&init_on_alloc); 2584 } 2585 2586 if (_init_on_free_enabled_early) { 2587 want_check_pages = true; 2588 static_branch_enable(&init_on_free); 2589 } else { 2590 static_branch_disable(&init_on_free); 2591 } 2592 2593 if (IS_ENABLED(CONFIG_KMSAN) && 2594 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2595 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2596 2597 #ifdef CONFIG_DEBUG_PAGEALLOC 2598 if (debug_pagealloc_enabled()) { 2599 want_check_pages = true; 2600 static_branch_enable(&_debug_pagealloc_enabled); 2601 2602 if (debug_guardpage_minorder()) 2603 static_branch_enable(&_debug_guardpage_enabled); 2604 } 2605 #endif 2606 2607 /* 2608 * Any page debugging or hardening option also enables sanity checking 2609 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's 2610 * enabled already. 2611 */ 2612 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) 2613 static_branch_enable(&check_pages_enabled); 2614 } 2615 2616 /* Report memory auto-initialization states for this boot. */ 2617 static void __init report_meminit(void) 2618 { 2619 const char *stack; 2620 2621 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) 2622 stack = "all(pattern)"; 2623 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) 2624 stack = "all(zero)"; 2625 else 2626 stack = "off"; 2627 2628 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2629 stack, str_on_off(want_init_on_alloc(GFP_KERNEL)), 2630 str_on_off(want_init_on_free())); 2631 if (want_init_on_free()) 2632 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2633 } 2634 2635 static void __init mem_init_print_info(void) 2636 { 2637 unsigned long physpages, codesize, datasize, rosize, bss_size; 2638 unsigned long init_code_size, init_data_size; 2639 2640 physpages = get_num_physpages(); 2641 codesize = _etext - _stext; 2642 datasize = _edata - _sdata; 2643 rosize = __end_rodata - __start_rodata; 2644 bss_size = __bss_stop - __bss_start; 2645 init_data_size = __init_end - __init_begin; 2646 init_code_size = _einittext - _sinittext; 2647 2648 /* 2649 * Detect special cases and adjust section sizes accordingly: 2650 * 1) .init.* may be embedded into .data sections 2651 * 2) .init.text.* may be out of [__init_begin, __init_end], 2652 * please refer to arch/tile/kernel/vmlinux.lds.S. 2653 * 3) .rodata.* may be embedded into .text or .data sections. 2654 */ 2655 #define adj_init_size(start, end, size, pos, adj) \ 2656 do { \ 2657 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 2658 size -= adj; \ 2659 } while (0) 2660 2661 adj_init_size(__init_begin, __init_end, init_data_size, 2662 _sinittext, init_code_size); 2663 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 2664 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 2665 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 2666 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 2667 2668 #undef adj_init_size 2669 2670 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 2671 #ifdef CONFIG_HIGHMEM 2672 ", %luK highmem" 2673 #endif 2674 ")\n", 2675 K(nr_free_pages()), K(physpages), 2676 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 2677 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 2678 K(physpages - totalram_pages() - totalcma_pages), 2679 K(totalcma_pages) 2680 #ifdef CONFIG_HIGHMEM 2681 , K(totalhigh_pages()) 2682 #endif 2683 ); 2684 } 2685 2686 #ifndef __HAVE_COLOR_ZERO_PAGE 2687 /* 2688 * architectures that __HAVE_COLOR_ZERO_PAGE must define this function 2689 */ 2690 void __init __weak arch_setup_zero_pages(void) 2691 { 2692 __zero_page = virt_to_page(empty_zero_page); 2693 } 2694 #endif 2695 2696 static void __init init_zero_page_pfn(void) 2697 { 2698 arch_setup_zero_pages(); 2699 zero_page_pfn = page_to_pfn(ZERO_PAGE(0)); 2700 } 2701 2702 void __init __weak arch_mm_preinit(void) 2703 { 2704 } 2705 2706 void __init __weak mem_init(void) 2707 { 2708 } 2709 2710 void __init mm_core_init_early(void) 2711 { 2712 hugetlb_cma_reserve(); 2713 hugetlb_bootmem_alloc(); 2714 2715 free_area_init(); 2716 } 2717 2718 /* 2719 * Set up kernel memory allocators 2720 */ 2721 void __init mm_core_init(void) 2722 { 2723 arch_mm_preinit(); 2724 init_zero_page_pfn(); 2725 2726 /* Initializations relying on SMP setup */ 2727 BUILD_BUG_ON(MAX_ZONELISTS > 2); 2728 build_all_zonelists(NULL); 2729 page_alloc_init_cpuhp(); 2730 alloc_tag_sec_init(); 2731 /* 2732 * page_ext requires contiguous pages, 2733 * bigger than MAX_PAGE_ORDER unless SPARSEMEM. 2734 */ 2735 page_ext_init_flatmem(); 2736 mem_debugging_and_hardening_init(); 2737 kfence_alloc_pool_and_metadata(); 2738 report_meminit(); 2739 kmsan_init_shadow(); 2740 stack_depot_early_init(); 2741 2742 /* 2743 * KHO memory setup must happen while memblock is still active, but 2744 * as close as possible to buddy initialization 2745 */ 2746 kho_memory_init(); 2747 2748 memblock_free_all(); 2749 mem_init(); 2750 kmem_cache_init(); 2751 /* 2752 * page_owner must be initialized after buddy is ready, and also after 2753 * slab is ready so that stack_depot_init() works properly 2754 */ 2755 page_ext_init_flatmem_late(); 2756 kmemleak_init(); 2757 ptlock_cache_init(); 2758 pgtable_cache_init(); 2759 debug_objects_mem_init(); 2760 vmalloc_init(); 2761 /* If no deferred init page_ext now, as vmap is fully initialized */ 2762 if (!deferred_struct_pages) 2763 page_ext_init(); 2764 /* Should be run before the first non-init thread is created */ 2765 init_espfix_bsp(); 2766 /* Should be run after espfix64 is set up. */ 2767 pti_init(); 2768 kmsan_init_runtime(); 2769 mm_cache_init(); 2770 execmem_init(); 2771 } 2772