1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm_init.c - Memory initialisation verification and debugging 4 * 5 * Copyright 2008 IBM Corporation, 2008 6 * Author Mel Gorman <mel@csn.ul.ie> 7 * 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/kobject.h> 12 #include <linux/export.h> 13 #include <linux/memory.h> 14 #include <linux/notifier.h> 15 #include <linux/sched.h> 16 #include <linux/mman.h> 17 #include <linux/memblock.h> 18 #include <linux/page-isolation.h> 19 #include <linux/padata.h> 20 #include <linux/nmi.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kmemleak.h> 23 #include <linux/kfence.h> 24 #include <linux/page_ext.h> 25 #include <linux/pti.h> 26 #include <linux/pgtable.h> 27 #include <linux/stackdepot.h> 28 #include <linux/swap.h> 29 #include <linux/cma.h> 30 #include <linux/crash_dump.h> 31 #include <linux/execmem.h> 32 #include <linux/vmstat.h> 33 #include <linux/kexec_handover.h> 34 #include <linux/hugetlb.h> 35 #include "internal.h" 36 #include "slab.h" 37 #include "shuffle.h" 38 39 #include <asm/setup.h> 40 41 #ifndef CONFIG_NUMA 42 unsigned long max_mapnr; 43 EXPORT_SYMBOL(max_mapnr); 44 45 struct page *mem_map; 46 EXPORT_SYMBOL(mem_map); 47 #endif 48 49 /* 50 * high_memory defines the upper bound on direct map memory, then end 51 * of ZONE_NORMAL. 52 */ 53 void *high_memory; 54 EXPORT_SYMBOL(high_memory); 55 56 #ifdef CONFIG_DEBUG_MEMORY_INIT 57 int __meminitdata mminit_loglevel; 58 59 /* The zonelists are simply reported, validation is manual. */ 60 void __init mminit_verify_zonelist(void) 61 { 62 int nid; 63 64 if (mminit_loglevel < MMINIT_VERIFY) 65 return; 66 67 for_each_online_node(nid) { 68 pg_data_t *pgdat = NODE_DATA(nid); 69 struct zone *zone; 70 struct zoneref *z; 71 struct zonelist *zonelist; 72 int i, listid, zoneid; 73 74 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 75 76 /* Identify the zone and nodelist */ 77 zoneid = i % MAX_NR_ZONES; 78 listid = i / MAX_NR_ZONES; 79 zonelist = &pgdat->node_zonelists[listid]; 80 zone = &pgdat->node_zones[zoneid]; 81 if (!populated_zone(zone)) 82 continue; 83 84 /* Print information about the zonelist */ 85 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 86 listid > 0 ? "thisnode" : "general", nid, 87 zone->name); 88 89 /* Iterate the zonelist */ 90 for_each_zone_zonelist(zone, z, zonelist, zoneid) 91 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); 92 pr_cont("\n"); 93 } 94 } 95 } 96 97 void __init mminit_verify_pageflags_layout(void) 98 { 99 int shift, width; 100 unsigned long or_mask, add_mask; 101 102 shift = BITS_PER_LONG; 103 width = shift - NR_NON_PAGEFLAG_BITS; 104 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 105 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 106 SECTIONS_WIDTH, 107 NODES_WIDTH, 108 ZONES_WIDTH, 109 LAST_CPUPID_WIDTH, 110 KASAN_TAG_WIDTH, 111 LRU_GEN_WIDTH, 112 LRU_REFS_WIDTH, 113 NR_PAGEFLAGS); 114 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 115 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", 116 SECTIONS_SHIFT, 117 NODES_SHIFT, 118 ZONES_SHIFT, 119 LAST_CPUPID_SHIFT, 120 KASAN_TAG_WIDTH); 121 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 122 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", 123 (unsigned long)SECTIONS_PGSHIFT, 124 (unsigned long)NODES_PGSHIFT, 125 (unsigned long)ZONES_PGSHIFT, 126 (unsigned long)LAST_CPUPID_PGSHIFT, 127 (unsigned long)KASAN_TAG_PGSHIFT); 128 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 129 "Node/Zone ID: %lu -> %lu\n", 130 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 131 (unsigned long)ZONEID_PGOFF); 132 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 133 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 134 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 135 #ifdef NODE_NOT_IN_PAGE_FLAGS 136 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 137 "Node not in page flags"); 138 #endif 139 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 140 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 141 "Last cpupid not in page flags"); 142 #endif 143 144 if (SECTIONS_WIDTH) { 145 shift -= SECTIONS_WIDTH; 146 BUG_ON(shift != SECTIONS_PGSHIFT); 147 } 148 if (NODES_WIDTH) { 149 shift -= NODES_WIDTH; 150 BUG_ON(shift != NODES_PGSHIFT); 151 } 152 if (ZONES_WIDTH) { 153 shift -= ZONES_WIDTH; 154 BUG_ON(shift != ZONES_PGSHIFT); 155 } 156 157 /* Check for bitmask overlaps */ 158 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 159 (NODES_MASK << NODES_PGSHIFT) | 160 (SECTIONS_MASK << SECTIONS_PGSHIFT); 161 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 162 (NODES_MASK << NODES_PGSHIFT) + 163 (SECTIONS_MASK << SECTIONS_PGSHIFT); 164 BUG_ON(or_mask != add_mask); 165 } 166 167 static __init int set_mminit_loglevel(char *str) 168 { 169 get_option(&str, &mminit_loglevel); 170 return 0; 171 } 172 early_param("mminit_loglevel", set_mminit_loglevel); 173 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 174 175 struct kobject *mm_kobj; 176 177 #ifdef CONFIG_SMP 178 s32 vm_committed_as_batch = 32; 179 180 void mm_compute_batch(int overcommit_policy) 181 { 182 u64 memsized_batch; 183 s32 nr = num_present_cpus(); 184 s32 batch = max_t(s32, nr*2, 32); 185 unsigned long ram_pages = totalram_pages(); 186 187 /* 188 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of 189 * (total memory/#cpus), and lift it to 25% for other policies 190 * to ease the possible lock contention for percpu_counter 191 * vm_committed_as, while the max limit is INT_MAX 192 */ 193 if (overcommit_policy == OVERCOMMIT_NEVER) 194 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); 195 else 196 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); 197 198 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 199 } 200 201 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 202 unsigned long action, void *arg) 203 { 204 switch (action) { 205 case MEM_ONLINE: 206 case MEM_OFFLINE: 207 mm_compute_batch(sysctl_overcommit_memory); 208 break; 209 default: 210 break; 211 } 212 return NOTIFY_OK; 213 } 214 215 static int __init mm_compute_batch_init(void) 216 { 217 mm_compute_batch(sysctl_overcommit_memory); 218 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); 219 return 0; 220 } 221 222 __initcall(mm_compute_batch_init); 223 224 #endif 225 226 static int __init mm_sysfs_init(void) 227 { 228 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 229 if (!mm_kobj) 230 return -ENOMEM; 231 232 return 0; 233 } 234 postcore_initcall(mm_sysfs_init); 235 236 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 237 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 238 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 239 240 static unsigned long required_kernelcore __initdata; 241 static unsigned long required_kernelcore_percent __initdata; 242 static unsigned long required_movablecore __initdata; 243 static unsigned long required_movablecore_percent __initdata; 244 245 static unsigned long nr_kernel_pages __initdata; 246 static unsigned long nr_all_pages __initdata; 247 248 static bool deferred_struct_pages __meminitdata; 249 250 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 251 252 static int __init cmdline_parse_core(char *p, unsigned long *core, 253 unsigned long *percent) 254 { 255 unsigned long long coremem; 256 char *endptr; 257 258 if (!p) 259 return -EINVAL; 260 261 /* Value may be a percentage of total memory, otherwise bytes */ 262 coremem = simple_strtoull(p, &endptr, 0); 263 if (*endptr == '%') { 264 /* Paranoid check for percent values greater than 100 */ 265 WARN_ON(coremem > 100); 266 267 *percent = coremem; 268 } else { 269 coremem = memparse(p, &p); 270 /* Paranoid check that UL is enough for the coremem value */ 271 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 272 273 *core = coremem >> PAGE_SHIFT; 274 *percent = 0UL; 275 } 276 return 0; 277 } 278 279 bool mirrored_kernelcore __initdata_memblock; 280 281 /* 282 * kernelcore=size sets the amount of memory for use for allocations that 283 * cannot be reclaimed or migrated. 284 */ 285 static int __init cmdline_parse_kernelcore(char *p) 286 { 287 /* parse kernelcore=mirror */ 288 if (parse_option_str(p, "mirror")) { 289 mirrored_kernelcore = true; 290 return 0; 291 } 292 293 return cmdline_parse_core(p, &required_kernelcore, 294 &required_kernelcore_percent); 295 } 296 early_param("kernelcore", cmdline_parse_kernelcore); 297 298 /* 299 * movablecore=size sets the amount of memory for use for allocations that 300 * can be reclaimed or migrated. 301 */ 302 static int __init cmdline_parse_movablecore(char *p) 303 { 304 return cmdline_parse_core(p, &required_movablecore, 305 &required_movablecore_percent); 306 } 307 early_param("movablecore", cmdline_parse_movablecore); 308 309 /* 310 * early_calculate_totalpages() 311 * Sum pages in active regions for movable zone. 312 * Populate N_MEMORY for calculating usable_nodes. 313 */ 314 static unsigned long __init early_calculate_totalpages(void) 315 { 316 unsigned long totalpages = 0; 317 unsigned long start_pfn, end_pfn; 318 int i, nid; 319 320 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 321 unsigned long pages = end_pfn - start_pfn; 322 323 totalpages += pages; 324 if (pages) 325 node_set_state(nid, N_MEMORY); 326 } 327 return totalpages; 328 } 329 330 /* 331 * This finds a zone that can be used for ZONE_MOVABLE pages. The 332 * assumption is made that zones within a node are ordered in monotonic 333 * increasing memory addresses so that the "highest" populated zone is used 334 */ 335 static void __init find_usable_zone_for_movable(void) 336 { 337 int zone_index; 338 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 339 if (zone_index == ZONE_MOVABLE) 340 continue; 341 342 if (arch_zone_highest_possible_pfn[zone_index] > 343 arch_zone_lowest_possible_pfn[zone_index]) 344 break; 345 } 346 347 VM_BUG_ON(zone_index == -1); 348 movable_zone = zone_index; 349 } 350 351 /* 352 * Find the PFN the Movable zone begins in each node. Kernel memory 353 * is spread evenly between nodes as long as the nodes have enough 354 * memory. When they don't, some nodes will have more kernelcore than 355 * others 356 */ 357 static void __init find_zone_movable_pfns_for_nodes(void) 358 { 359 int i, nid; 360 unsigned long usable_startpfn; 361 unsigned long kernelcore_node, kernelcore_remaining; 362 /* save the state before borrow the nodemask */ 363 nodemask_t saved_node_state = node_states[N_MEMORY]; 364 unsigned long totalpages = early_calculate_totalpages(); 365 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 366 struct memblock_region *r; 367 368 /* Need to find movable_zone earlier when movable_node is specified. */ 369 find_usable_zone_for_movable(); 370 371 /* 372 * If movable_node is specified, ignore kernelcore and movablecore 373 * options. 374 */ 375 if (movable_node_is_enabled()) { 376 for_each_mem_region(r) { 377 if (!memblock_is_hotpluggable(r)) 378 continue; 379 380 nid = memblock_get_region_node(r); 381 382 usable_startpfn = memblock_region_memory_base_pfn(r); 383 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 384 min(usable_startpfn, zone_movable_pfn[nid]) : 385 usable_startpfn; 386 } 387 388 goto out2; 389 } 390 391 /* 392 * If kernelcore=mirror is specified, ignore movablecore option 393 */ 394 if (mirrored_kernelcore) { 395 bool mem_below_4gb_not_mirrored = false; 396 397 if (!memblock_has_mirror()) { 398 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n"); 399 goto out; 400 } 401 402 if (is_kdump_kernel()) { 403 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n"); 404 goto out; 405 } 406 407 for_each_mem_region(r) { 408 if (memblock_is_mirror(r)) 409 continue; 410 411 nid = memblock_get_region_node(r); 412 413 usable_startpfn = memblock_region_memory_base_pfn(r); 414 415 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 416 mem_below_4gb_not_mirrored = true; 417 continue; 418 } 419 420 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 421 min(usable_startpfn, zone_movable_pfn[nid]) : 422 usable_startpfn; 423 } 424 425 if (mem_below_4gb_not_mirrored) 426 pr_warn("This configuration results in unmirrored kernel memory.\n"); 427 428 goto out2; 429 } 430 431 /* 432 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 433 * amount of necessary memory. 434 */ 435 if (required_kernelcore_percent) 436 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 437 10000UL; 438 if (required_movablecore_percent) 439 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 440 10000UL; 441 442 /* 443 * If movablecore= was specified, calculate what size of 444 * kernelcore that corresponds so that memory usable for 445 * any allocation type is evenly spread. If both kernelcore 446 * and movablecore are specified, then the value of kernelcore 447 * will be used for required_kernelcore if it's greater than 448 * what movablecore would have allowed. 449 */ 450 if (required_movablecore) { 451 unsigned long corepages; 452 453 /* 454 * Round-up so that ZONE_MOVABLE is at least as large as what 455 * was requested by the user 456 */ 457 required_movablecore = 458 round_up(required_movablecore, MAX_ORDER_NR_PAGES); 459 required_movablecore = min(totalpages, required_movablecore); 460 corepages = totalpages - required_movablecore; 461 462 required_kernelcore = max(required_kernelcore, corepages); 463 } 464 465 /* 466 * If kernelcore was not specified or kernelcore size is larger 467 * than totalpages, there is no ZONE_MOVABLE. 468 */ 469 if (!required_kernelcore || required_kernelcore >= totalpages) 470 goto out; 471 472 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 473 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 474 475 restart: 476 /* Spread kernelcore memory as evenly as possible throughout nodes */ 477 kernelcore_node = required_kernelcore / usable_nodes; 478 for_each_node_state(nid, N_MEMORY) { 479 unsigned long start_pfn, end_pfn; 480 481 /* 482 * Recalculate kernelcore_node if the division per node 483 * now exceeds what is necessary to satisfy the requested 484 * amount of memory for the kernel 485 */ 486 if (required_kernelcore < kernelcore_node) 487 kernelcore_node = required_kernelcore / usable_nodes; 488 489 /* 490 * As the map is walked, we track how much memory is usable 491 * by the kernel using kernelcore_remaining. When it is 492 * 0, the rest of the node is usable by ZONE_MOVABLE 493 */ 494 kernelcore_remaining = kernelcore_node; 495 496 /* Go through each range of PFNs within this node */ 497 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 498 unsigned long size_pages; 499 500 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 501 if (start_pfn >= end_pfn) 502 continue; 503 504 /* Account for what is only usable for kernelcore */ 505 if (start_pfn < usable_startpfn) { 506 unsigned long kernel_pages; 507 kernel_pages = min(end_pfn, usable_startpfn) 508 - start_pfn; 509 510 kernelcore_remaining -= min(kernel_pages, 511 kernelcore_remaining); 512 required_kernelcore -= min(kernel_pages, 513 required_kernelcore); 514 515 /* Continue if range is now fully accounted */ 516 if (end_pfn <= usable_startpfn) { 517 518 /* 519 * Push zone_movable_pfn to the end so 520 * that if we have to rebalance 521 * kernelcore across nodes, we will 522 * not double account here 523 */ 524 zone_movable_pfn[nid] = end_pfn; 525 continue; 526 } 527 start_pfn = usable_startpfn; 528 } 529 530 /* 531 * The usable PFN range for ZONE_MOVABLE is from 532 * start_pfn->end_pfn. Calculate size_pages as the 533 * number of pages used as kernelcore 534 */ 535 size_pages = end_pfn - start_pfn; 536 if (size_pages > kernelcore_remaining) 537 size_pages = kernelcore_remaining; 538 zone_movable_pfn[nid] = start_pfn + size_pages; 539 540 /* 541 * Some kernelcore has been met, update counts and 542 * break if the kernelcore for this node has been 543 * satisfied 544 */ 545 required_kernelcore -= min(required_kernelcore, 546 size_pages); 547 kernelcore_remaining -= size_pages; 548 if (!kernelcore_remaining) 549 break; 550 } 551 } 552 553 /* 554 * If there is still required_kernelcore, we do another pass with one 555 * less node in the count. This will push zone_movable_pfn[nid] further 556 * along on the nodes that still have memory until kernelcore is 557 * satisfied 558 */ 559 usable_nodes--; 560 if (usable_nodes && required_kernelcore > usable_nodes) 561 goto restart; 562 563 out2: 564 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 565 for_each_node_state(nid, N_MEMORY) { 566 unsigned long start_pfn, end_pfn; 567 568 zone_movable_pfn[nid] = 569 round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 570 571 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 572 if (zone_movable_pfn[nid] >= end_pfn) 573 zone_movable_pfn[nid] = 0; 574 } 575 576 out: 577 /* restore the node_state */ 578 node_states[N_MEMORY] = saved_node_state; 579 } 580 581 void __meminit __init_single_page(struct page *page, unsigned long pfn, 582 unsigned long zone, int nid) 583 { 584 mm_zero_struct_page(page); 585 set_page_links(page, zone, nid, pfn); 586 init_page_count(page); 587 atomic_set(&page->_mapcount, -1); 588 page_cpupid_reset_last(page); 589 page_kasan_tag_reset(page); 590 591 INIT_LIST_HEAD(&page->lru); 592 #ifdef WANT_PAGE_VIRTUAL 593 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 594 if (!is_highmem_idx(zone)) 595 set_page_address(page, __va(pfn << PAGE_SHIFT)); 596 #endif 597 } 598 599 #ifdef CONFIG_NUMA 600 /* 601 * During memory init memblocks map pfns to nids. The search is expensive and 602 * this caches recent lookups. The implementation of __early_pfn_to_nid 603 * treats start/end as pfns. 604 */ 605 struct mminit_pfnnid_cache { 606 unsigned long last_start; 607 unsigned long last_end; 608 int last_nid; 609 }; 610 611 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 612 613 /* 614 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 615 */ 616 static int __meminit __early_pfn_to_nid(unsigned long pfn, 617 struct mminit_pfnnid_cache *state) 618 { 619 unsigned long start_pfn, end_pfn; 620 int nid; 621 622 if (state->last_start <= pfn && pfn < state->last_end) 623 return state->last_nid; 624 625 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 626 if (nid != NUMA_NO_NODE) { 627 state->last_start = start_pfn; 628 state->last_end = end_pfn; 629 state->last_nid = nid; 630 } 631 632 return nid; 633 } 634 635 int __meminit early_pfn_to_nid(unsigned long pfn) 636 { 637 static DEFINE_SPINLOCK(early_pfn_lock); 638 int nid; 639 640 spin_lock(&early_pfn_lock); 641 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 642 if (nid < 0) 643 nid = first_online_node; 644 spin_unlock(&early_pfn_lock); 645 646 return nid; 647 } 648 649 bool hashdist = HASHDIST_DEFAULT; 650 651 static int __init set_hashdist(char *str) 652 { 653 return kstrtobool(str, &hashdist) == 0; 654 } 655 __setup("hashdist=", set_hashdist); 656 657 static inline void fixup_hashdist(void) 658 { 659 if (num_node_state(N_MEMORY) == 1) 660 hashdist = false; 661 } 662 #else 663 static inline void fixup_hashdist(void) {} 664 #endif /* CONFIG_NUMA */ 665 666 /* 667 * Initialize a reserved page unconditionally, finding its zone first. 668 */ 669 void __meminit __init_page_from_nid(unsigned long pfn, int nid) 670 { 671 pg_data_t *pgdat; 672 int zid; 673 674 pgdat = NODE_DATA(nid); 675 676 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 677 struct zone *zone = &pgdat->node_zones[zid]; 678 679 if (zone_spans_pfn(zone, pfn)) 680 break; 681 } 682 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 683 684 if (pageblock_aligned(pfn)) 685 init_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE, 686 false); 687 } 688 689 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 690 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 691 { 692 pgdat->first_deferred_pfn = ULONG_MAX; 693 } 694 695 /* Returns true if the struct page for the pfn is initialised */ 696 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) 697 { 698 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 699 return false; 700 701 return true; 702 } 703 704 /* 705 * Returns true when the remaining initialisation should be deferred until 706 * later in the boot cycle when it can be parallelised. 707 */ 708 static bool __meminit 709 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 710 { 711 static unsigned long prev_end_pfn, nr_initialised; 712 713 if (early_page_ext_enabled()) 714 return false; 715 716 /* Always populate low zones for address-constrained allocations */ 717 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 718 return false; 719 720 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 721 return true; 722 723 /* 724 * prev_end_pfn static that contains the end of previous zone 725 * No need to protect because called very early in boot before smp_init. 726 */ 727 if (prev_end_pfn != end_pfn) { 728 prev_end_pfn = end_pfn; 729 nr_initialised = 0; 730 } 731 732 /* 733 * We start only with one section of pages, more pages are added as 734 * needed until the rest of deferred pages are initialized. 735 */ 736 nr_initialised++; 737 if ((nr_initialised > PAGES_PER_SECTION) && 738 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 739 NODE_DATA(nid)->first_deferred_pfn = pfn; 740 return true; 741 } 742 return false; 743 } 744 745 static void __meminit __init_deferred_page(unsigned long pfn, int nid) 746 { 747 if (early_page_initialised(pfn, nid)) 748 return; 749 750 __init_page_from_nid(pfn, nid); 751 } 752 #else 753 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 754 755 static inline bool early_page_initialised(unsigned long pfn, int nid) 756 { 757 return true; 758 } 759 760 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 761 { 762 return false; 763 } 764 765 static inline void __init_deferred_page(unsigned long pfn, int nid) 766 { 767 } 768 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 769 770 void __meminit init_deferred_page(unsigned long pfn, int nid) 771 { 772 __init_deferred_page(pfn, nid); 773 } 774 775 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 776 static bool __meminit 777 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 778 { 779 static struct memblock_region *r; 780 781 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 782 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 783 for_each_mem_region(r) { 784 if (*pfn < memblock_region_memory_end_pfn(r)) 785 break; 786 } 787 } 788 if (*pfn >= memblock_region_memory_base_pfn(r) && 789 memblock_is_mirror(r)) { 790 *pfn = memblock_region_memory_end_pfn(r); 791 return true; 792 } 793 } 794 return false; 795 } 796 797 /* 798 * Only struct pages that correspond to ranges defined by memblock.memory 799 * are zeroed and initialized by going through __init_single_page() during 800 * memmap_init_zone_range(). 801 * 802 * But, there could be struct pages that correspond to holes in 803 * memblock.memory. This can happen because of the following reasons: 804 * - physical memory bank size is not necessarily the exact multiple of the 805 * arbitrary section size 806 * - early reserved memory may not be listed in memblock.memory 807 * - non-memory regions covered by the contiguous flatmem mapping 808 * - memory layouts defined with memmap= kernel parameter may not align 809 * nicely with memmap sections 810 * 811 * Explicitly initialize those struct pages so that: 812 * - PG_Reserved is set 813 * - zone and node links point to zone and node that span the page if the 814 * hole is in the middle of a zone 815 * - zone and node links point to adjacent zone/node if the hole falls on 816 * the zone boundary; the pages in such holes will be prepended to the 817 * zone/node above the hole except for the trailing pages in the last 818 * section that will be appended to the zone/node below. 819 */ 820 static void __init init_unavailable_range(unsigned long spfn, 821 unsigned long epfn, 822 int zone, int node) 823 { 824 unsigned long pfn; 825 u64 pgcnt = 0; 826 827 for_each_valid_pfn(pfn, spfn, epfn) { 828 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 829 __SetPageReserved(pfn_to_page(pfn)); 830 pgcnt++; 831 } 832 833 if (pgcnt) 834 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n", 835 node, zone_names[zone], pgcnt); 836 } 837 838 /* 839 * Initially all pages are reserved - free ones are freed 840 * up by memblock_free_all() once the early boot process is 841 * done. Non-atomic initialization, single-pass. 842 * 843 * All aligned pageblocks are initialized to the specified migratetype 844 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 845 * zone stats (e.g., nr_isolate_pageblock) are touched. 846 */ 847 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 848 unsigned long start_pfn, unsigned long zone_end_pfn, 849 enum meminit_context context, 850 struct vmem_altmap *altmap, int migratetype, 851 bool isolate_pageblock) 852 { 853 unsigned long pfn, end_pfn = start_pfn + size; 854 struct page *page; 855 856 if (highest_memmap_pfn < end_pfn - 1) 857 highest_memmap_pfn = end_pfn - 1; 858 859 #ifdef CONFIG_ZONE_DEVICE 860 /* 861 * Honor reservation requested by the driver for this ZONE_DEVICE 862 * memory. We limit the total number of pages to initialize to just 863 * those that might contain the memory mapping. We will defer the 864 * ZONE_DEVICE page initialization until after we have released 865 * the hotplug lock. 866 */ 867 if (zone == ZONE_DEVICE) { 868 if (!altmap) 869 return; 870 871 if (start_pfn == altmap->base_pfn) 872 start_pfn += altmap->reserve; 873 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 874 } 875 #endif 876 877 for (pfn = start_pfn; pfn < end_pfn; ) { 878 /* 879 * There can be holes in boot-time mem_map[]s handed to this 880 * function. They do not exist on hotplugged memory. 881 */ 882 if (context == MEMINIT_EARLY) { 883 if (overlap_memmap_init(zone, &pfn)) 884 continue; 885 if (defer_init(nid, pfn, zone_end_pfn)) { 886 deferred_struct_pages = true; 887 break; 888 } 889 } 890 891 page = pfn_to_page(pfn); 892 __init_single_page(page, pfn, zone, nid); 893 if (context == MEMINIT_HOTPLUG) { 894 #ifdef CONFIG_ZONE_DEVICE 895 if (zone == ZONE_DEVICE) 896 __SetPageReserved(page); 897 else 898 #endif 899 __SetPageOffline(page); 900 } 901 902 /* 903 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 904 * such that unmovable allocations won't be scattered all 905 * over the place during system boot. 906 */ 907 if (pageblock_aligned(pfn)) { 908 init_pageblock_migratetype(page, migratetype, 909 isolate_pageblock); 910 cond_resched(); 911 } 912 pfn++; 913 } 914 } 915 916 static void __init memmap_init_zone_range(struct zone *zone, 917 unsigned long start_pfn, 918 unsigned long end_pfn, 919 unsigned long *hole_pfn) 920 { 921 unsigned long zone_start_pfn = zone->zone_start_pfn; 922 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 923 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 924 925 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 926 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 927 928 if (start_pfn >= end_pfn) 929 return; 930 931 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 932 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE, 933 false); 934 935 if (*hole_pfn < start_pfn) 936 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 937 938 *hole_pfn = end_pfn; 939 } 940 941 static void __init memmap_init(void) 942 { 943 unsigned long start_pfn, end_pfn; 944 unsigned long hole_pfn = 0; 945 int i, j, zone_id = 0, nid; 946 947 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 948 struct pglist_data *node = NODE_DATA(nid); 949 950 for (j = 0; j < MAX_NR_ZONES; j++) { 951 struct zone *zone = node->node_zones + j; 952 953 if (!populated_zone(zone)) 954 continue; 955 956 memmap_init_zone_range(zone, start_pfn, end_pfn, 957 &hole_pfn); 958 zone_id = j; 959 } 960 } 961 962 /* 963 * Initialize the memory map for hole in the range [memory_end, 964 * section_end] for SPARSEMEM and in the range [memory_end, memmap_end] 965 * for FLATMEM. 966 * Append the pages in this hole to the highest zone in the last 967 * node. 968 */ 969 #ifdef CONFIG_SPARSEMEM 970 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 971 #else 972 end_pfn = round_up(end_pfn, MAX_ORDER_NR_PAGES); 973 #endif 974 if (hole_pfn < end_pfn) 975 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 976 } 977 978 #ifdef CONFIG_ZONE_DEVICE 979 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 980 unsigned long zone_idx, int nid, 981 struct dev_pagemap *pgmap) 982 { 983 984 __init_single_page(page, pfn, zone_idx, nid); 985 986 /* 987 * Mark page reserved as it will need to wait for onlining 988 * phase for it to be fully associated with a zone. 989 * 990 * We can use the non-atomic __set_bit operation for setting 991 * the flag as we are still initializing the pages. 992 */ 993 __SetPageReserved(page); 994 995 /* 996 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 997 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 998 * ever freed or placed on a driver-private list. 999 */ 1000 page_folio(page)->pgmap = pgmap; 1001 page->zone_device_data = NULL; 1002 1003 /* 1004 * Mark the block movable so that blocks are reserved for 1005 * movable at startup. This will force kernel allocations 1006 * to reserve their blocks rather than leaking throughout 1007 * the address space during boot when many long-lived 1008 * kernel allocations are made. 1009 * 1010 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 1011 * because this is done early in section_activate() 1012 */ 1013 if (pageblock_aligned(pfn)) { 1014 init_pageblock_migratetype(page, MIGRATE_MOVABLE, false); 1015 cond_resched(); 1016 } 1017 1018 /* 1019 * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released 1020 * directly to the driver page allocator which will set the page count 1021 * to 1 when allocating the page. 1022 * 1023 * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have 1024 * their refcount reset to one whenever they are freed (ie. after 1025 * their refcount drops to 0). 1026 */ 1027 switch (pgmap->type) { 1028 case MEMORY_DEVICE_FS_DAX: 1029 case MEMORY_DEVICE_PRIVATE: 1030 case MEMORY_DEVICE_COHERENT: 1031 case MEMORY_DEVICE_PCI_P2PDMA: 1032 set_page_count(page, 0); 1033 break; 1034 1035 case MEMORY_DEVICE_GENERIC: 1036 break; 1037 } 1038 } 1039 1040 /* 1041 * With compound page geometry and when struct pages are stored in ram most 1042 * tail pages are reused. Consequently, the amount of unique struct pages to 1043 * initialize is a lot smaller that the total amount of struct pages being 1044 * mapped. This is a paired / mild layering violation with explicit knowledge 1045 * of how the sparse_vmemmap internals handle compound pages in the lack 1046 * of an altmap. See vmemmap_populate_compound_pages(). 1047 */ 1048 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 1049 struct dev_pagemap *pgmap) 1050 { 1051 if (!vmemmap_can_optimize(altmap, pgmap)) 1052 return pgmap_vmemmap_nr(pgmap); 1053 1054 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page)); 1055 } 1056 1057 static void __ref memmap_init_compound(struct page *head, 1058 unsigned long head_pfn, 1059 unsigned long zone_idx, int nid, 1060 struct dev_pagemap *pgmap, 1061 unsigned long nr_pages) 1062 { 1063 unsigned long pfn, end_pfn = head_pfn + nr_pages; 1064 unsigned int order = pgmap->vmemmap_shift; 1065 1066 /* 1067 * We have to initialize the pages, including setting up page links. 1068 * prep_compound_page() does not take care of that, so instead we 1069 * open-code prep_compound_page() so we can take care of initializing 1070 * the pages in the same go. 1071 */ 1072 __SetPageHead(head); 1073 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 1074 struct page *page = pfn_to_page(pfn); 1075 1076 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1077 prep_compound_tail(head, pfn - head_pfn); 1078 set_page_count(page, 0); 1079 } 1080 prep_compound_head(head, order); 1081 } 1082 1083 void __ref memmap_init_zone_device(struct zone *zone, 1084 unsigned long start_pfn, 1085 unsigned long nr_pages, 1086 struct dev_pagemap *pgmap) 1087 { 1088 unsigned long pfn, end_pfn = start_pfn + nr_pages; 1089 struct pglist_data *pgdat = zone->zone_pgdat; 1090 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 1091 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 1092 unsigned long zone_idx = zone_idx(zone); 1093 unsigned long start = jiffies; 1094 int nid = pgdat->node_id; 1095 1096 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 1097 return; 1098 1099 /* 1100 * The call to memmap_init should have already taken care 1101 * of the pages reserved for the memmap, so we can just jump to 1102 * the end of that region and start processing the device pages. 1103 */ 1104 if (altmap) { 1105 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 1106 nr_pages = end_pfn - start_pfn; 1107 } 1108 1109 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 1110 struct page *page = pfn_to_page(pfn); 1111 1112 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1113 1114 if (pfns_per_compound == 1) 1115 continue; 1116 1117 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 1118 compound_nr_pages(altmap, pgmap)); 1119 } 1120 1121 pr_debug("%s initialised %lu pages in %ums\n", __func__, 1122 nr_pages, jiffies_to_msecs(jiffies - start)); 1123 } 1124 #endif 1125 1126 /* 1127 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 1128 * because it is sized independent of architecture. Unlike the other zones, 1129 * the starting point for ZONE_MOVABLE is not fixed. It may be different 1130 * in each node depending on the size of each node and how evenly kernelcore 1131 * is distributed. This helper function adjusts the zone ranges 1132 * provided by the architecture for a given node by using the end of the 1133 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 1134 * zones within a node are in order of monotonic increases memory addresses 1135 */ 1136 static void __init adjust_zone_range_for_zone_movable(int nid, 1137 unsigned long zone_type, 1138 unsigned long node_end_pfn, 1139 unsigned long *zone_start_pfn, 1140 unsigned long *zone_end_pfn) 1141 { 1142 /* Only adjust if ZONE_MOVABLE is on this node */ 1143 if (zone_movable_pfn[nid]) { 1144 /* Size ZONE_MOVABLE */ 1145 if (zone_type == ZONE_MOVABLE) { 1146 *zone_start_pfn = zone_movable_pfn[nid]; 1147 *zone_end_pfn = min(node_end_pfn, 1148 arch_zone_highest_possible_pfn[movable_zone]); 1149 1150 /* Adjust for ZONE_MOVABLE starting within this range */ 1151 } else if (!mirrored_kernelcore && 1152 *zone_start_pfn < zone_movable_pfn[nid] && 1153 *zone_end_pfn > zone_movable_pfn[nid]) { 1154 *zone_end_pfn = zone_movable_pfn[nid]; 1155 1156 /* Check if this whole range is within ZONE_MOVABLE */ 1157 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 1158 *zone_start_pfn = *zone_end_pfn; 1159 } 1160 } 1161 1162 /* 1163 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 1164 * then all holes in the requested range will be accounted for. 1165 */ 1166 static unsigned long __init __absent_pages_in_range(int nid, 1167 unsigned long range_start_pfn, 1168 unsigned long range_end_pfn) 1169 { 1170 unsigned long nr_absent = range_end_pfn - range_start_pfn; 1171 unsigned long start_pfn, end_pfn; 1172 int i; 1173 1174 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 1175 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 1176 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 1177 nr_absent -= end_pfn - start_pfn; 1178 } 1179 return nr_absent; 1180 } 1181 1182 /** 1183 * absent_pages_in_range - Return number of page frames in holes within a range 1184 * @start_pfn: The start PFN to start searching for holes 1185 * @end_pfn: The end PFN to stop searching for holes 1186 * 1187 * Return: the number of pages frames in memory holes within a range. 1188 */ 1189 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 1190 unsigned long end_pfn) 1191 { 1192 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 1193 } 1194 1195 /* Return the number of page frames in holes in a zone on a node */ 1196 static unsigned long __init zone_absent_pages_in_node(int nid, 1197 unsigned long zone_type, 1198 unsigned long zone_start_pfn, 1199 unsigned long zone_end_pfn) 1200 { 1201 unsigned long nr_absent; 1202 1203 /* zone is empty, we don't have any absent pages */ 1204 if (zone_start_pfn == zone_end_pfn) 1205 return 0; 1206 1207 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 1208 1209 /* 1210 * ZONE_MOVABLE handling. 1211 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 1212 * and vice versa. 1213 */ 1214 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 1215 unsigned long start_pfn, end_pfn; 1216 struct memblock_region *r; 1217 1218 for_each_mem_region(r) { 1219 start_pfn = clamp(memblock_region_memory_base_pfn(r), 1220 zone_start_pfn, zone_end_pfn); 1221 end_pfn = clamp(memblock_region_memory_end_pfn(r), 1222 zone_start_pfn, zone_end_pfn); 1223 1224 if (zone_type == ZONE_MOVABLE && 1225 memblock_is_mirror(r)) 1226 nr_absent += end_pfn - start_pfn; 1227 1228 if (zone_type == ZONE_NORMAL && 1229 !memblock_is_mirror(r)) 1230 nr_absent += end_pfn - start_pfn; 1231 } 1232 } 1233 1234 return nr_absent; 1235 } 1236 1237 /* 1238 * Return the number of pages a zone spans in a node, including holes 1239 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1240 */ 1241 static unsigned long __init zone_spanned_pages_in_node(int nid, 1242 unsigned long zone_type, 1243 unsigned long node_start_pfn, 1244 unsigned long node_end_pfn, 1245 unsigned long *zone_start_pfn, 1246 unsigned long *zone_end_pfn) 1247 { 1248 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1249 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1250 1251 /* Get the start and end of the zone */ 1252 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1253 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1254 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn, 1255 zone_start_pfn, zone_end_pfn); 1256 1257 /* Check that this node has pages within the zone's required range */ 1258 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 1259 return 0; 1260 1261 /* Move the zone boundaries inside the node if necessary */ 1262 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 1263 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 1264 1265 /* Return the spanned pages */ 1266 return *zone_end_pfn - *zone_start_pfn; 1267 } 1268 1269 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) 1270 { 1271 struct zone *z; 1272 1273 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { 1274 z->zone_start_pfn = 0; 1275 z->spanned_pages = 0; 1276 z->present_pages = 0; 1277 #if defined(CONFIG_MEMORY_HOTPLUG) 1278 z->present_early_pages = 0; 1279 #endif 1280 } 1281 1282 pgdat->node_spanned_pages = 0; 1283 pgdat->node_present_pages = 0; 1284 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); 1285 } 1286 1287 static void __init calc_nr_kernel_pages(void) 1288 { 1289 unsigned long start_pfn, end_pfn; 1290 phys_addr_t start_addr, end_addr; 1291 u64 u; 1292 #ifdef CONFIG_HIGHMEM 1293 unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]; 1294 #endif 1295 1296 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) { 1297 start_pfn = PFN_UP(start_addr); 1298 end_pfn = PFN_DOWN(end_addr); 1299 1300 if (start_pfn < end_pfn) { 1301 nr_all_pages += end_pfn - start_pfn; 1302 #ifdef CONFIG_HIGHMEM 1303 start_pfn = clamp(start_pfn, 0, high_zone_low); 1304 end_pfn = clamp(end_pfn, 0, high_zone_low); 1305 #endif 1306 nr_kernel_pages += end_pfn - start_pfn; 1307 } 1308 } 1309 } 1310 1311 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 1312 unsigned long node_start_pfn, 1313 unsigned long node_end_pfn) 1314 { 1315 unsigned long realtotalpages = 0, totalpages = 0; 1316 enum zone_type i; 1317 1318 for (i = 0; i < MAX_NR_ZONES; i++) { 1319 struct zone *zone = pgdat->node_zones + i; 1320 unsigned long zone_start_pfn, zone_end_pfn; 1321 unsigned long spanned, absent; 1322 unsigned long real_size; 1323 1324 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 1325 node_start_pfn, 1326 node_end_pfn, 1327 &zone_start_pfn, 1328 &zone_end_pfn); 1329 absent = zone_absent_pages_in_node(pgdat->node_id, i, 1330 zone_start_pfn, 1331 zone_end_pfn); 1332 1333 real_size = spanned - absent; 1334 1335 if (spanned) 1336 zone->zone_start_pfn = zone_start_pfn; 1337 else 1338 zone->zone_start_pfn = 0; 1339 zone->spanned_pages = spanned; 1340 zone->present_pages = real_size; 1341 #if defined(CONFIG_MEMORY_HOTPLUG) 1342 zone->present_early_pages = real_size; 1343 #endif 1344 1345 totalpages += spanned; 1346 realtotalpages += real_size; 1347 } 1348 1349 pgdat->node_spanned_pages = totalpages; 1350 pgdat->node_present_pages = realtotalpages; 1351 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1352 } 1353 1354 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1355 static void pgdat_init_split_queue(struct pglist_data *pgdat) 1356 { 1357 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 1358 1359 spin_lock_init(&ds_queue->split_queue_lock); 1360 INIT_LIST_HEAD(&ds_queue->split_queue); 1361 ds_queue->split_queue_len = 0; 1362 } 1363 #else 1364 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 1365 #endif 1366 1367 #ifdef CONFIG_COMPACTION 1368 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 1369 { 1370 init_waitqueue_head(&pgdat->kcompactd_wait); 1371 } 1372 #else 1373 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 1374 #endif 1375 1376 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 1377 { 1378 int i; 1379 1380 pgdat_resize_init(pgdat); 1381 pgdat_kswapd_lock_init(pgdat); 1382 1383 pgdat_init_split_queue(pgdat); 1384 pgdat_init_kcompactd(pgdat); 1385 1386 init_waitqueue_head(&pgdat->kswapd_wait); 1387 init_waitqueue_head(&pgdat->pfmemalloc_wait); 1388 1389 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 1390 init_waitqueue_head(&pgdat->reclaim_wait[i]); 1391 1392 pgdat_page_ext_init(pgdat); 1393 lruvec_init(&pgdat->__lruvec); 1394 } 1395 1396 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 1397 unsigned long remaining_pages) 1398 { 1399 atomic_long_set(&zone->managed_pages, remaining_pages); 1400 zone_set_nid(zone, nid); 1401 zone->name = zone_names[idx]; 1402 zone->zone_pgdat = NODE_DATA(nid); 1403 spin_lock_init(&zone->lock); 1404 zone_seqlock_init(zone); 1405 zone_pcp_init(zone); 1406 } 1407 1408 static void __meminit zone_init_free_lists(struct zone *zone) 1409 { 1410 unsigned int order, t; 1411 for_each_migratetype_order(order, t) { 1412 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 1413 zone->free_area[order].nr_free = 0; 1414 } 1415 1416 #ifdef CONFIG_UNACCEPTED_MEMORY 1417 INIT_LIST_HEAD(&zone->unaccepted_pages); 1418 #endif 1419 } 1420 1421 void __meminit init_currently_empty_zone(struct zone *zone, 1422 unsigned long zone_start_pfn, 1423 unsigned long size) 1424 { 1425 struct pglist_data *pgdat = zone->zone_pgdat; 1426 int zone_idx = zone_idx(zone) + 1; 1427 1428 if (zone_idx > pgdat->nr_zones) 1429 pgdat->nr_zones = zone_idx; 1430 1431 zone->zone_start_pfn = zone_start_pfn; 1432 1433 mminit_dprintk(MMINIT_TRACE, "memmap_init", 1434 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 1435 pgdat->node_id, 1436 (unsigned long)zone_idx(zone), 1437 zone_start_pfn, (zone_start_pfn + size)); 1438 1439 zone_init_free_lists(zone); 1440 zone->initialized = 1; 1441 } 1442 1443 #ifndef CONFIG_SPARSEMEM 1444 /* 1445 * Calculate the size of the zone->pageblock_flags rounded to an unsigned long 1446 * Start by making sure zonesize is a multiple of pageblock_order by rounding 1447 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 1448 * round what is now in bits to nearest long in bits, then return it in 1449 * bytes. 1450 */ 1451 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 1452 { 1453 unsigned long usemapsize; 1454 1455 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 1456 usemapsize = round_up(zonesize, pageblock_nr_pages); 1457 usemapsize = usemapsize >> pageblock_order; 1458 usemapsize *= NR_PAGEBLOCK_BITS; 1459 usemapsize = round_up(usemapsize, BITS_PER_LONG); 1460 1461 return usemapsize / BITS_PER_BYTE; 1462 } 1463 1464 static void __ref setup_usemap(struct zone *zone) 1465 { 1466 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 1467 zone->spanned_pages); 1468 zone->pageblock_flags = NULL; 1469 if (usemapsize) { 1470 zone->pageblock_flags = 1471 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 1472 zone_to_nid(zone)); 1473 if (!zone->pageblock_flags) 1474 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 1475 usemapsize, zone->name, zone_to_nid(zone)); 1476 } 1477 } 1478 #else 1479 static inline void setup_usemap(struct zone *zone) {} 1480 #endif /* CONFIG_SPARSEMEM */ 1481 1482 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 1483 1484 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 1485 void __init set_pageblock_order(void) 1486 { 1487 unsigned int order = PAGE_BLOCK_MAX_ORDER; 1488 1489 /* Check that pageblock_nr_pages has not already been setup */ 1490 if (pageblock_order) 1491 return; 1492 1493 /* Don't let pageblocks exceed the maximum allocation granularity. */ 1494 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 1495 order = HUGETLB_PAGE_ORDER; 1496 1497 /* 1498 * Assume the largest contiguous order of interest is a huge page. 1499 * This value may be variable depending on boot parameters on powerpc. 1500 */ 1501 pageblock_order = order; 1502 } 1503 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1504 1505 /* 1506 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 1507 * is unused as pageblock_order is set at compile-time. See 1508 * include/linux/pageblock-flags.h for the values of pageblock_order based on 1509 * the kernel config 1510 */ 1511 void __init set_pageblock_order(void) 1512 { 1513 } 1514 1515 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1516 1517 /* 1518 * Set up the zone data structures 1519 * - init pgdat internals 1520 * - init all zones belonging to this node 1521 * 1522 * NOTE: this function is only called during memory hotplug 1523 */ 1524 #ifdef CONFIG_MEMORY_HOTPLUG 1525 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 1526 { 1527 int nid = pgdat->node_id; 1528 enum zone_type z; 1529 int cpu; 1530 1531 pgdat_init_internals(pgdat); 1532 1533 if (pgdat->per_cpu_nodestats == &boot_nodestats) 1534 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1535 1536 /* 1537 * Reset the nr_zones, order and highest_zoneidx before reuse. 1538 * Note that kswapd will init kswapd_highest_zoneidx properly 1539 * when it starts in the near future. 1540 */ 1541 pgdat->nr_zones = 0; 1542 pgdat->kswapd_order = 0; 1543 pgdat->kswapd_highest_zoneidx = 0; 1544 pgdat->node_start_pfn = 0; 1545 pgdat->node_present_pages = 0; 1546 1547 for_each_online_cpu(cpu) { 1548 struct per_cpu_nodestat *p; 1549 1550 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 1551 memset(p, 0, sizeof(*p)); 1552 } 1553 1554 /* 1555 * When memory is hot-added, all the memory is in offline state. So 1556 * clear all zones' present_pages and managed_pages because they will 1557 * be updated in online_pages() and offline_pages(). 1558 */ 1559 for (z = 0; z < MAX_NR_ZONES; z++) { 1560 struct zone *zone = pgdat->node_zones + z; 1561 1562 zone->present_pages = 0; 1563 zone_init_internals(zone, z, nid, 0); 1564 } 1565 } 1566 #endif 1567 1568 static void __init free_area_init_core(struct pglist_data *pgdat) 1569 { 1570 enum zone_type j; 1571 int nid = pgdat->node_id; 1572 1573 pgdat_init_internals(pgdat); 1574 pgdat->per_cpu_nodestats = &boot_nodestats; 1575 1576 for (j = 0; j < MAX_NR_ZONES; j++) { 1577 struct zone *zone = pgdat->node_zones + j; 1578 unsigned long size = zone->spanned_pages; 1579 1580 /* 1581 * Initialize zone->managed_pages as 0 , it will be reset 1582 * when memblock allocator frees pages into buddy system. 1583 */ 1584 zone_init_internals(zone, j, nid, zone->present_pages); 1585 1586 if (!size) 1587 continue; 1588 1589 setup_usemap(zone); 1590 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 1591 } 1592 } 1593 1594 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 1595 phys_addr_t min_addr, int nid, bool exact_nid) 1596 { 1597 void *ptr; 1598 1599 /* 1600 * Kmemleak will explicitly scan mem_map by traversing all valid 1601 * `struct *page`,so memblock does not need to be added to the scan list. 1602 */ 1603 if (exact_nid) 1604 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 1605 MEMBLOCK_ALLOC_NOLEAKTRACE, 1606 nid); 1607 else 1608 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 1609 MEMBLOCK_ALLOC_NOLEAKTRACE, 1610 nid); 1611 1612 if (ptr && size > 0) 1613 page_init_poison(ptr, size); 1614 1615 return ptr; 1616 } 1617 1618 #ifdef CONFIG_FLATMEM 1619 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 1620 { 1621 unsigned long start, offset, size, end; 1622 struct page *map; 1623 1624 /* Skip empty nodes */ 1625 if (!pgdat->node_spanned_pages) 1626 return; 1627 1628 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1629 offset = pgdat->node_start_pfn - start; 1630 /* 1631 * The zone's endpoints aren't required to be MAX_PAGE_ORDER 1632 * aligned but the node_mem_map endpoints must be in order 1633 * for the buddy allocator to function correctly. 1634 */ 1635 end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES); 1636 size = (end - start) * sizeof(struct page); 1637 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 1638 pgdat->node_id, false); 1639 if (!map) 1640 panic("Failed to allocate %ld bytes for node %d memory map\n", 1641 size, pgdat->node_id); 1642 pgdat->node_mem_map = map + offset; 1643 memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE)); 1644 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1645 __func__, pgdat->node_id, (unsigned long)pgdat, 1646 (unsigned long)pgdat->node_mem_map); 1647 1648 /* the global mem_map is just set as node 0's */ 1649 WARN_ON(pgdat != NODE_DATA(0)); 1650 1651 mem_map = pgdat->node_mem_map; 1652 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1653 mem_map -= offset; 1654 1655 max_mapnr = end - start; 1656 } 1657 #else 1658 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 1659 #endif /* CONFIG_FLATMEM */ 1660 1661 /** 1662 * get_pfn_range_for_nid - Return the start and end page frames for a node 1663 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 1664 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 1665 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 1666 * 1667 * It returns the start and end page frame of a node based on information 1668 * provided by memblock_set_node(). If called for a node 1669 * with no available memory, the start and end PFNs will be 0. 1670 */ 1671 void __init get_pfn_range_for_nid(unsigned int nid, 1672 unsigned long *start_pfn, unsigned long *end_pfn) 1673 { 1674 unsigned long this_start_pfn, this_end_pfn; 1675 int i; 1676 1677 *start_pfn = -1UL; 1678 *end_pfn = 0; 1679 1680 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 1681 *start_pfn = min(*start_pfn, this_start_pfn); 1682 *end_pfn = max(*end_pfn, this_end_pfn); 1683 } 1684 1685 if (*start_pfn == -1UL) 1686 *start_pfn = 0; 1687 } 1688 1689 static void __init free_area_init_node(int nid) 1690 { 1691 pg_data_t *pgdat = NODE_DATA(nid); 1692 unsigned long start_pfn = 0; 1693 unsigned long end_pfn = 0; 1694 1695 /* pg_data_t should be reset to zero when it's allocated */ 1696 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 1697 1698 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1699 1700 pgdat->node_id = nid; 1701 pgdat->node_start_pfn = start_pfn; 1702 pgdat->per_cpu_nodestats = NULL; 1703 1704 if (start_pfn != end_pfn) { 1705 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 1706 (u64)start_pfn << PAGE_SHIFT, 1707 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 1708 1709 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 1710 } else { 1711 pr_info("Initmem setup node %d as memoryless\n", nid); 1712 1713 reset_memoryless_node_totalpages(pgdat); 1714 } 1715 1716 alloc_node_mem_map(pgdat); 1717 pgdat_set_deferred_range(pgdat); 1718 1719 free_area_init_core(pgdat); 1720 lru_gen_init_pgdat(pgdat); 1721 } 1722 1723 /* Any regular or high memory on that node? */ 1724 static void __init check_for_memory(pg_data_t *pgdat) 1725 { 1726 enum zone_type zone_type; 1727 1728 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 1729 struct zone *zone = &pgdat->node_zones[zone_type]; 1730 if (populated_zone(zone)) { 1731 if (IS_ENABLED(CONFIG_HIGHMEM)) 1732 node_set_state(pgdat->node_id, N_HIGH_MEMORY); 1733 if (zone_type <= ZONE_NORMAL) 1734 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); 1735 break; 1736 } 1737 } 1738 } 1739 1740 #if MAX_NUMNODES > 1 1741 /* 1742 * Figure out the number of possible node ids. 1743 */ 1744 void __init setup_nr_node_ids(void) 1745 { 1746 unsigned int highest; 1747 1748 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 1749 nr_node_ids = highest + 1; 1750 } 1751 #endif 1752 1753 /* 1754 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 1755 * such cases we allow max_zone_pfn sorted in the descending order 1756 */ 1757 static bool arch_has_descending_max_zone_pfns(void) 1758 { 1759 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1760 } 1761 1762 static void __init set_high_memory(void) 1763 { 1764 phys_addr_t highmem = memblock_end_of_DRAM(); 1765 1766 /* 1767 * Some architectures (e.g. ARM) set high_memory very early and 1768 * use it in arch setup code. 1769 * If an architecture already set high_memory don't overwrite it 1770 */ 1771 if (high_memory) 1772 return; 1773 1774 #ifdef CONFIG_HIGHMEM 1775 if (arch_has_descending_max_zone_pfns() || 1776 highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])) 1777 highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]); 1778 #endif 1779 1780 high_memory = phys_to_virt(highmem - 1) + 1; 1781 } 1782 1783 /** 1784 * free_area_init - Initialise all pg_data_t and zone data 1785 * 1786 * This will call free_area_init_node() for each active node in the system. 1787 * Using the page ranges provided by memblock_set_node(), the size of each 1788 * zone in each node and their holes is calculated. If the maximum PFN 1789 * between two adjacent zones match, it is assumed that the zone is empty. 1790 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 1791 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 1792 * starts where the previous one ended. For example, ZONE_DMA32 starts 1793 * at arch_max_dma_pfn. 1794 */ 1795 static void __init free_area_init(void) 1796 { 1797 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; 1798 unsigned long start_pfn, end_pfn; 1799 int i, nid, zone; 1800 bool descending; 1801 1802 arch_zone_limits_init(max_zone_pfn); 1803 sparse_init(); 1804 1805 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 1806 descending = arch_has_descending_max_zone_pfns(); 1807 1808 for (i = 0; i < MAX_NR_ZONES; i++) { 1809 if (descending) 1810 zone = MAX_NR_ZONES - i - 1; 1811 else 1812 zone = i; 1813 1814 if (zone == ZONE_MOVABLE) 1815 continue; 1816 1817 end_pfn = max(max_zone_pfn[zone], start_pfn); 1818 arch_zone_lowest_possible_pfn[zone] = start_pfn; 1819 arch_zone_highest_possible_pfn[zone] = end_pfn; 1820 1821 start_pfn = end_pfn; 1822 } 1823 1824 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 1825 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 1826 find_zone_movable_pfns_for_nodes(); 1827 1828 /* Print out the zone ranges */ 1829 pr_info("Zone ranges:\n"); 1830 for (i = 0; i < MAX_NR_ZONES; i++) { 1831 if (i == ZONE_MOVABLE) 1832 continue; 1833 pr_info(" %-8s ", zone_names[i]); 1834 if (arch_zone_lowest_possible_pfn[i] == 1835 arch_zone_highest_possible_pfn[i]) 1836 pr_cont("empty\n"); 1837 else 1838 pr_cont("[mem %#018Lx-%#018Lx]\n", 1839 (u64)arch_zone_lowest_possible_pfn[i] 1840 << PAGE_SHIFT, 1841 ((u64)arch_zone_highest_possible_pfn[i] 1842 << PAGE_SHIFT) - 1); 1843 } 1844 1845 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 1846 pr_info("Movable zone start for each node\n"); 1847 for (i = 0; i < MAX_NUMNODES; i++) { 1848 if (zone_movable_pfn[i]) 1849 pr_info(" Node %d: %#018Lx\n", i, 1850 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 1851 } 1852 1853 /* 1854 * Print out the early node map, and initialize the 1855 * subsection-map relative to active online memory ranges to 1856 * enable future "sub-section" extensions of the memory map. 1857 */ 1858 pr_info("Early memory node ranges\n"); 1859 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1860 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 1861 (u64)start_pfn << PAGE_SHIFT, 1862 ((u64)end_pfn << PAGE_SHIFT) - 1); 1863 subsection_map_init(start_pfn, end_pfn - start_pfn); 1864 } 1865 1866 /* Initialise every node */ 1867 mminit_verify_pageflags_layout(); 1868 setup_nr_node_ids(); 1869 set_pageblock_order(); 1870 1871 for_each_node(nid) { 1872 pg_data_t *pgdat; 1873 1874 /* 1875 * If an architecture has not allocated node data for 1876 * this node, presume the node is memoryless or offline. 1877 */ 1878 if (!NODE_DATA(nid)) 1879 alloc_offline_node_data(nid); 1880 1881 pgdat = NODE_DATA(nid); 1882 free_area_init_node(nid); 1883 1884 /* 1885 * No sysfs hierarchy will be created via register_node() 1886 *for memory-less node because here it's not marked as N_MEMORY 1887 *and won't be set online later. The benefit is userspace 1888 *program won't be confused by sysfs files/directories of 1889 *memory-less node. The pgdat will get fully initialized by 1890 *hotadd_init_pgdat() when memory is hotplugged into this node. 1891 */ 1892 if (pgdat->node_present_pages) { 1893 node_set_state(nid, N_MEMORY); 1894 check_for_memory(pgdat); 1895 } 1896 } 1897 1898 for_each_node_state(nid, N_MEMORY) 1899 sparse_vmemmap_init_nid_late(nid); 1900 1901 calc_nr_kernel_pages(); 1902 memmap_init(); 1903 1904 /* disable hash distribution for systems with a single node */ 1905 fixup_hashdist(); 1906 1907 set_high_memory(); 1908 } 1909 1910 /** 1911 * node_map_pfn_alignment - determine the maximum internode alignment 1912 * 1913 * This function should be called after node map is populated and sorted. 1914 * It calculates the maximum power of two alignment which can distinguish 1915 * all the nodes. 1916 * 1917 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 1918 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 1919 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 1920 * shifted, 1GiB is enough and this function will indicate so. 1921 * 1922 * This is used to test whether pfn -> nid mapping of the chosen memory 1923 * model has fine enough granularity to avoid incorrect mapping for the 1924 * populated node map. 1925 * 1926 * Return: the determined alignment in pfn's. 0 if there is no alignment 1927 * requirement (single node). 1928 */ 1929 unsigned long __init node_map_pfn_alignment(void) 1930 { 1931 unsigned long accl_mask = 0, last_end = 0; 1932 unsigned long start, end, mask; 1933 int last_nid = NUMA_NO_NODE; 1934 int i, nid; 1935 1936 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 1937 if (!start || last_nid < 0 || last_nid == nid) { 1938 last_nid = nid; 1939 last_end = end; 1940 continue; 1941 } 1942 1943 /* 1944 * Start with a mask granular enough to pin-point to the 1945 * start pfn and tick off bits one-by-one until it becomes 1946 * too coarse to separate the current node from the last. 1947 */ 1948 mask = ~((1 << __ffs(start)) - 1); 1949 while (mask && last_end <= (start & (mask << 1))) 1950 mask <<= 1; 1951 1952 /* accumulate all internode masks */ 1953 accl_mask |= mask; 1954 } 1955 1956 /* convert mask to number of pages */ 1957 return ~accl_mask + 1; 1958 } 1959 1960 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1961 static void __init deferred_free_pages(unsigned long pfn, 1962 unsigned long nr_pages) 1963 { 1964 struct page *page; 1965 unsigned long i; 1966 1967 if (!nr_pages) 1968 return; 1969 1970 page = pfn_to_page(pfn); 1971 1972 /* Free a large naturally-aligned chunk if possible */ 1973 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { 1974 for (i = 0; i < nr_pages; i += pageblock_nr_pages) 1975 init_pageblock_migratetype(page + i, MIGRATE_MOVABLE, 1976 false); 1977 __free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY); 1978 return; 1979 } 1980 1981 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */ 1982 accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE); 1983 1984 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1985 if (pageblock_aligned(pfn)) 1986 init_pageblock_migratetype(page, MIGRATE_MOVABLE, 1987 false); 1988 __free_pages_core(page, 0, MEMINIT_EARLY); 1989 } 1990 } 1991 1992 /* Completion tracking for deferred_init_memmap() threads */ 1993 static atomic_t pgdat_init_n_undone __initdata; 1994 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1995 1996 static inline void __init pgdat_init_report_one_done(void) 1997 { 1998 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1999 complete(&pgdat_init_all_done_comp); 2000 } 2001 2002 /* 2003 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 2004 * by performing it only once every MAX_ORDER_NR_PAGES. 2005 * Return number of pages initialized. 2006 */ 2007 static unsigned long __init deferred_init_pages(struct zone *zone, 2008 unsigned long pfn, unsigned long end_pfn) 2009 { 2010 int nid = zone_to_nid(zone); 2011 unsigned long nr_pages = end_pfn - pfn; 2012 int zid = zone_idx(zone); 2013 struct page *page = pfn_to_page(pfn); 2014 2015 for (; pfn < end_pfn; pfn++, page++) 2016 __init_single_page(page, pfn, zid, nid); 2017 return nr_pages; 2018 } 2019 2020 /* 2021 * Initialize and free pages. 2022 * 2023 * At this point reserved pages and struct pages that correspond to holes in 2024 * memblock.memory are already initialized so every free range has a valid 2025 * memory map around it. 2026 * This ensures that access of pages that are ahead of the range being 2027 * initialized (computing buddy page in __free_one_page()) always reads a valid 2028 * struct page. 2029 * 2030 * In order to try and improve CPU cache locality we have the loop broken along 2031 * max page order boundaries. 2032 */ 2033 static unsigned long __init 2034 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2035 struct zone *zone, bool can_resched) 2036 { 2037 int nid = zone_to_nid(zone); 2038 unsigned long nr_pages = 0; 2039 phys_addr_t start, end; 2040 u64 i = 0; 2041 2042 for_each_free_mem_range(i, nid, 0, &start, &end, NULL) { 2043 unsigned long spfn = PFN_UP(start); 2044 unsigned long epfn = PFN_DOWN(end); 2045 2046 if (spfn >= end_pfn) 2047 break; 2048 2049 spfn = max(spfn, start_pfn); 2050 epfn = min(epfn, end_pfn); 2051 2052 while (spfn < epfn) { 2053 unsigned long mo_pfn = ALIGN(spfn + 1, MAX_ORDER_NR_PAGES); 2054 unsigned long chunk_end = min(mo_pfn, epfn); 2055 2056 nr_pages += deferred_init_pages(zone, spfn, chunk_end); 2057 deferred_free_pages(spfn, chunk_end - spfn); 2058 2059 spfn = chunk_end; 2060 2061 if (can_resched) 2062 cond_resched(); 2063 else 2064 touch_nmi_watchdog(); 2065 } 2066 } 2067 2068 return nr_pages; 2069 } 2070 2071 static void __init 2072 deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn, 2073 void *arg) 2074 { 2075 struct zone *zone = arg; 2076 2077 deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true); 2078 } 2079 2080 static unsigned int __init 2081 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2082 { 2083 return max(cpumask_weight(node_cpumask), 1U); 2084 } 2085 2086 /* Initialise remaining memory on a node */ 2087 static int __init deferred_init_memmap(void *data) 2088 { 2089 pg_data_t *pgdat = data; 2090 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2091 int max_threads = deferred_page_init_max_threads(cpumask); 2092 unsigned long first_init_pfn, last_pfn, flags; 2093 unsigned long start = jiffies; 2094 struct zone *zone; 2095 2096 /* Bind memory initialisation thread to a local node if possible */ 2097 if (!cpumask_empty(cpumask)) 2098 set_cpus_allowed_ptr(current, cpumask); 2099 2100 pgdat_resize_lock(pgdat, &flags); 2101 first_init_pfn = pgdat->first_deferred_pfn; 2102 if (first_init_pfn == ULONG_MAX) { 2103 pgdat_resize_unlock(pgdat, &flags); 2104 pgdat_init_report_one_done(); 2105 return 0; 2106 } 2107 2108 /* Sanity check boundaries */ 2109 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2110 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2111 pgdat->first_deferred_pfn = ULONG_MAX; 2112 2113 /* 2114 * Once we unlock here, the zone cannot be grown anymore, thus if an 2115 * interrupt thread must allocate this early in boot, zone must be 2116 * pre-grown prior to start of deferred page initialization. 2117 */ 2118 pgdat_resize_unlock(pgdat, &flags); 2119 2120 /* Only the highest zone is deferred */ 2121 zone = pgdat->node_zones + pgdat->nr_zones - 1; 2122 last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone)); 2123 2124 struct padata_mt_job job = { 2125 .thread_fn = deferred_init_memmap_job, 2126 .fn_arg = zone, 2127 .start = first_init_pfn, 2128 .size = last_pfn - first_init_pfn, 2129 .align = PAGES_PER_SECTION, 2130 .min_chunk = PAGES_PER_SECTION, 2131 .max_threads = max_threads, 2132 .numa_aware = false, 2133 }; 2134 2135 padata_do_multithreaded(&job); 2136 2137 /* Sanity check that the next zone really is unpopulated */ 2138 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); 2139 2140 pr_info("node %d deferred pages initialised in %ums\n", 2141 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2142 2143 pgdat_init_report_one_done(); 2144 return 0; 2145 } 2146 2147 /* 2148 * If this zone has deferred pages, try to grow it by initializing enough 2149 * deferred pages to satisfy the allocation specified by order, rounded up to 2150 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2151 * of SECTION_SIZE bytes by initializing struct pages in increments of 2152 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2153 * 2154 * Return true when zone was grown, otherwise return false. We return true even 2155 * when we grow less than requested, to let the caller decide if there are 2156 * enough pages to satisfy the allocation. 2157 */ 2158 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) 2159 { 2160 unsigned long nr_pages_needed = SECTION_ALIGN_UP(1 << order); 2161 pg_data_t *pgdat = zone->zone_pgdat; 2162 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2163 unsigned long spfn, epfn, flags; 2164 unsigned long nr_pages = 0; 2165 2166 /* Only the last zone may have deferred pages */ 2167 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2168 return false; 2169 2170 pgdat_resize_lock(pgdat, &flags); 2171 2172 /* 2173 * If someone grew this zone while we were waiting for spinlock, return 2174 * true, as there might be enough pages already. 2175 */ 2176 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2177 pgdat_resize_unlock(pgdat, &flags); 2178 return true; 2179 } 2180 2181 /* 2182 * Initialize at least nr_pages_needed in section chunks. 2183 * If a section has less free memory than nr_pages_needed, the next 2184 * section will be also initialized. 2185 * Note, that it still does not guarantee that allocation of order can 2186 * be satisfied if the sections are fragmented because of memblock 2187 * allocations. 2188 */ 2189 for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1); 2190 nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone); 2191 spfn = epfn, epfn += PAGES_PER_SECTION) { 2192 nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false); 2193 } 2194 2195 /* 2196 * There were no pages to initialize and free which means the zone's 2197 * memory map is completely initialized. 2198 */ 2199 pgdat->first_deferred_pfn = nr_pages ? spfn : ULONG_MAX; 2200 2201 pgdat_resize_unlock(pgdat, &flags); 2202 2203 return nr_pages > 0; 2204 } 2205 2206 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2207 2208 #ifdef CONFIG_CMA 2209 void __init init_cma_reserved_pageblock(struct page *page) 2210 { 2211 unsigned i = pageblock_nr_pages; 2212 struct page *p = page; 2213 2214 do { 2215 __ClearPageReserved(p); 2216 set_page_count(p, 0); 2217 } while (++p, --i); 2218 2219 init_pageblock_migratetype(page, MIGRATE_CMA, false); 2220 set_page_refcounted(page); 2221 /* pages were reserved and not allocated */ 2222 clear_page_tag_ref(page); 2223 __free_pages(page, pageblock_order); 2224 2225 adjust_managed_page_count(page, pageblock_nr_pages); 2226 page_zone(page)->cma_pages += pageblock_nr_pages; 2227 } 2228 /* 2229 * Similar to above, but only set the migrate type and stats. 2230 */ 2231 void __init init_cma_pageblock(struct page *page) 2232 { 2233 init_pageblock_migratetype(page, MIGRATE_CMA, false); 2234 adjust_managed_page_count(page, pageblock_nr_pages); 2235 page_zone(page)->cma_pages += pageblock_nr_pages; 2236 } 2237 #endif 2238 2239 void set_zone_contiguous(struct zone *zone) 2240 { 2241 unsigned long block_start_pfn = zone->zone_start_pfn; 2242 unsigned long block_end_pfn; 2243 2244 block_end_pfn = pageblock_end_pfn(block_start_pfn); 2245 for (; block_start_pfn < zone_end_pfn(zone); 2246 block_start_pfn = block_end_pfn, 2247 block_end_pfn += pageblock_nr_pages) { 2248 2249 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 2250 2251 if (!__pageblock_pfn_to_page(block_start_pfn, 2252 block_end_pfn, zone)) 2253 return; 2254 cond_resched(); 2255 } 2256 2257 /* We confirm that there is no hole */ 2258 zone->contiguous = true; 2259 } 2260 2261 /* 2262 * Check if a PFN range intersects multiple zones on one or more 2263 * NUMA nodes. Specify the @nid argument if it is known that this 2264 * PFN range is on one node, NUMA_NO_NODE otherwise. 2265 */ 2266 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, 2267 unsigned long nr_pages) 2268 { 2269 struct zone *zone, *izone = NULL; 2270 2271 for_each_zone(zone) { 2272 if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid) 2273 continue; 2274 2275 if (zone_intersects(zone, start_pfn, nr_pages)) { 2276 if (izone != NULL) 2277 return true; 2278 izone = zone; 2279 } 2280 2281 } 2282 2283 return false; 2284 } 2285 2286 static void __init mem_init_print_info(void); 2287 void __init page_alloc_init_late(void) 2288 { 2289 struct zone *zone; 2290 int nid; 2291 2292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2293 2294 /* There will be num_node_state(N_MEMORY) threads */ 2295 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2296 for_each_node_state(nid, N_MEMORY) { 2297 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2298 } 2299 2300 /* Block until all are initialised */ 2301 wait_for_completion(&pgdat_init_all_done_comp); 2302 2303 /* 2304 * We initialized the rest of the deferred pages. Permanently disable 2305 * on-demand struct page initialization. 2306 */ 2307 static_branch_disable(&deferred_pages); 2308 2309 /* Reinit limits that are based on free pages after the kernel is up */ 2310 files_maxfiles_init(); 2311 #endif 2312 2313 /* Accounting of total+free memory is stable at this point. */ 2314 mem_init_print_info(); 2315 buffer_init(); 2316 2317 /* Discard memblock private memory */ 2318 memblock_discard(); 2319 2320 for_each_node_state(nid, N_MEMORY) 2321 shuffle_free_memory(NODE_DATA(nid)); 2322 2323 for_each_populated_zone(zone) 2324 set_zone_contiguous(zone); 2325 2326 /* Initialize page ext after all struct pages are initialized. */ 2327 if (deferred_struct_pages) 2328 page_ext_init(); 2329 2330 page_alloc_sysctl_init(); 2331 } 2332 2333 /* 2334 * Adaptive scale is meant to reduce sizes of hash tables on large memory 2335 * machines. As memory size is increased the scale is also increased but at 2336 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 2337 * quadruples the scale is increased by one, which means the size of hash table 2338 * only doubles, instead of quadrupling as well. 2339 * Because 32-bit systems cannot have large physical memory, where this scaling 2340 * makes sense, it is disabled on such platforms. 2341 */ 2342 #if __BITS_PER_LONG > 32 2343 #define ADAPT_SCALE_BASE (64ul << 30) 2344 #define ADAPT_SCALE_SHIFT 2 2345 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 2346 #endif 2347 2348 /* 2349 * allocate a large system hash table from bootmem 2350 * - it is assumed that the hash table must contain an exact power-of-2 2351 * quantity of entries 2352 * - limit is the number of hash buckets, not the total allocation size 2353 */ 2354 void *__init alloc_large_system_hash(const char *tablename, 2355 unsigned long bucketsize, 2356 unsigned long numentries, 2357 int scale, 2358 int flags, 2359 unsigned int *_hash_shift, 2360 unsigned int *_hash_mask, 2361 unsigned long low_limit, 2362 unsigned long high_limit) 2363 { 2364 unsigned long long max = high_limit; 2365 unsigned long log2qty, size; 2366 void *table; 2367 gfp_t gfp_flags; 2368 bool virt; 2369 bool huge; 2370 2371 /* allow the kernel cmdline to have a say */ 2372 if (!numentries) { 2373 /* round applicable memory size up to nearest megabyte */ 2374 numentries = nr_kernel_pages; 2375 2376 /* It isn't necessary when PAGE_SIZE >= 1MB */ 2377 if (PAGE_SIZE < SZ_1M) 2378 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 2379 2380 #if __BITS_PER_LONG > 32 2381 if (!high_limit) { 2382 unsigned long adapt; 2383 2384 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 2385 adapt <<= ADAPT_SCALE_SHIFT) 2386 scale++; 2387 } 2388 #endif 2389 2390 /* limit to 1 bucket per 2^scale bytes of low memory */ 2391 if (scale > PAGE_SHIFT) 2392 numentries >>= (scale - PAGE_SHIFT); 2393 else 2394 numentries <<= (PAGE_SHIFT - scale); 2395 2396 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 2397 numentries = PAGE_SIZE / bucketsize; 2398 } 2399 numentries = roundup_pow_of_two(numentries); 2400 2401 /* limit allocation size to 1/16 total memory by default */ 2402 if (max == 0) { 2403 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2404 do_div(max, bucketsize); 2405 } 2406 max = min(max, 0x80000000ULL); 2407 2408 if (numentries < low_limit) 2409 numentries = low_limit; 2410 if (numentries > max) 2411 numentries = max; 2412 2413 log2qty = ilog2(numentries); 2414 2415 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 2416 do { 2417 virt = false; 2418 size = bucketsize << log2qty; 2419 if (flags & HASH_EARLY) { 2420 if (flags & HASH_ZERO) 2421 table = memblock_alloc(size, SMP_CACHE_BYTES); 2422 else 2423 table = memblock_alloc_raw(size, 2424 SMP_CACHE_BYTES); 2425 } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) { 2426 table = vmalloc_huge(size, gfp_flags); 2427 virt = true; 2428 if (table) 2429 huge = is_vm_area_hugepages(table); 2430 } else { 2431 /* 2432 * If bucketsize is not a power-of-two, we may free 2433 * some pages at the end of hash table which 2434 * alloc_pages_exact() automatically does 2435 */ 2436 table = alloc_pages_exact(size, gfp_flags); 2437 kmemleak_alloc(table, size, 1, gfp_flags); 2438 } 2439 } while (!table && size > PAGE_SIZE && --log2qty); 2440 2441 if (!table) 2442 panic("Failed to allocate %s hash table\n", tablename); 2443 2444 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 2445 tablename, 1UL << log2qty, get_order(size), size, 2446 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 2447 2448 if (_hash_shift) 2449 *_hash_shift = log2qty; 2450 if (_hash_mask) 2451 *_hash_mask = (1 << log2qty) - 1; 2452 2453 return table; 2454 } 2455 2456 void __init memblock_free_pages(unsigned long pfn, unsigned int order) 2457 { 2458 struct page *page = pfn_to_page(pfn); 2459 2460 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { 2461 int nid = early_pfn_to_nid(pfn); 2462 2463 if (!early_page_initialised(pfn, nid)) 2464 return; 2465 } 2466 2467 if (!kmsan_memblock_free_pages(page, order)) { 2468 /* KMSAN will take care of these pages. */ 2469 return; 2470 } 2471 2472 /* pages were reserved and not allocated */ 2473 clear_page_tag_ref(page); 2474 __free_pages_core(page, order, MEMINIT_EARLY); 2475 } 2476 2477 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2478 EXPORT_SYMBOL(init_on_alloc); 2479 2480 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2481 EXPORT_SYMBOL(init_on_free); 2482 2483 static bool _init_on_alloc_enabled_early __read_mostly 2484 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2485 static int __init early_init_on_alloc(char *buf) 2486 { 2487 2488 return kstrtobool(buf, &_init_on_alloc_enabled_early); 2489 } 2490 early_param("init_on_alloc", early_init_on_alloc); 2491 2492 static bool _init_on_free_enabled_early __read_mostly 2493 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 2494 static int __init early_init_on_free(char *buf) 2495 { 2496 return kstrtobool(buf, &_init_on_free_enabled_early); 2497 } 2498 early_param("init_on_free", early_init_on_free); 2499 2500 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2501 2502 static bool check_pages_enabled_early __initdata; 2503 2504 static int __init early_check_pages(char *buf) 2505 { 2506 return kstrtobool(buf, &check_pages_enabled_early); 2507 } 2508 early_param("check_pages", early_check_pages); 2509 2510 /* 2511 * Enable static keys related to various memory debugging and hardening options. 2512 * Some override others, and depend on early params that are evaluated in the 2513 * order of appearance. So we need to first gather the full picture of what was 2514 * enabled, and then make decisions. 2515 */ 2516 static void __init mem_debugging_and_hardening_init(void) 2517 { 2518 bool page_poisoning_requested = false; 2519 bool want_check_pages = check_pages_enabled_early; 2520 2521 #ifdef CONFIG_PAGE_POISONING 2522 /* 2523 * Page poisoning is debug page alloc for some arches. If 2524 * either of those options are enabled, enable poisoning. 2525 */ 2526 if (page_poisoning_enabled() || 2527 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 2528 debug_pagealloc_enabled())) { 2529 static_branch_enable(&_page_poisoning_enabled); 2530 page_poisoning_requested = true; 2531 want_check_pages = true; 2532 } 2533 #endif 2534 2535 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2536 page_poisoning_requested) { 2537 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2538 "will take precedence over init_on_alloc and init_on_free\n"); 2539 _init_on_alloc_enabled_early = false; 2540 _init_on_free_enabled_early = false; 2541 } 2542 2543 if (_init_on_alloc_enabled_early) { 2544 want_check_pages = true; 2545 static_branch_enable(&init_on_alloc); 2546 } else { 2547 static_branch_disable(&init_on_alloc); 2548 } 2549 2550 if (_init_on_free_enabled_early) { 2551 want_check_pages = true; 2552 static_branch_enable(&init_on_free); 2553 } else { 2554 static_branch_disable(&init_on_free); 2555 } 2556 2557 if (IS_ENABLED(CONFIG_KMSAN) && 2558 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2559 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2560 2561 #ifdef CONFIG_DEBUG_PAGEALLOC 2562 if (debug_pagealloc_enabled()) { 2563 want_check_pages = true; 2564 static_branch_enable(&_debug_pagealloc_enabled); 2565 2566 if (debug_guardpage_minorder()) 2567 static_branch_enable(&_debug_guardpage_enabled); 2568 } 2569 #endif 2570 2571 /* 2572 * Any page debugging or hardening option also enables sanity checking 2573 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's 2574 * enabled already. 2575 */ 2576 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) 2577 static_branch_enable(&check_pages_enabled); 2578 } 2579 2580 /* Report memory auto-initialization states for this boot. */ 2581 static void __init report_meminit(void) 2582 { 2583 const char *stack; 2584 2585 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) 2586 stack = "all(pattern)"; 2587 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) 2588 stack = "all(zero)"; 2589 else 2590 stack = "off"; 2591 2592 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2593 stack, str_on_off(want_init_on_alloc(GFP_KERNEL)), 2594 str_on_off(want_init_on_free())); 2595 if (want_init_on_free()) 2596 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2597 } 2598 2599 static void __init mem_init_print_info(void) 2600 { 2601 unsigned long physpages, codesize, datasize, rosize, bss_size; 2602 unsigned long init_code_size, init_data_size; 2603 2604 physpages = get_num_physpages(); 2605 codesize = _etext - _stext; 2606 datasize = _edata - _sdata; 2607 rosize = __end_rodata - __start_rodata; 2608 bss_size = __bss_stop - __bss_start; 2609 init_data_size = __init_end - __init_begin; 2610 init_code_size = _einittext - _sinittext; 2611 2612 /* 2613 * Detect special cases and adjust section sizes accordingly: 2614 * 1) .init.* may be embedded into .data sections 2615 * 2) .init.text.* may be out of [__init_begin, __init_end], 2616 * please refer to arch/tile/kernel/vmlinux.lds.S. 2617 * 3) .rodata.* may be embedded into .text or .data sections. 2618 */ 2619 #define adj_init_size(start, end, size, pos, adj) \ 2620 do { \ 2621 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 2622 size -= adj; \ 2623 } while (0) 2624 2625 adj_init_size(__init_begin, __init_end, init_data_size, 2626 _sinittext, init_code_size); 2627 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 2628 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 2629 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 2630 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 2631 2632 #undef adj_init_size 2633 2634 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 2635 #ifdef CONFIG_HIGHMEM 2636 ", %luK highmem" 2637 #endif 2638 ")\n", 2639 K(nr_free_pages()), K(physpages), 2640 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 2641 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 2642 K(physpages - totalram_pages() - totalcma_pages), 2643 K(totalcma_pages) 2644 #ifdef CONFIG_HIGHMEM 2645 , K(totalhigh_pages()) 2646 #endif 2647 ); 2648 } 2649 2650 void __init __weak arch_mm_preinit(void) 2651 { 2652 } 2653 2654 void __init __weak mem_init(void) 2655 { 2656 } 2657 2658 void __init mm_core_init_early(void) 2659 { 2660 hugetlb_cma_reserve(); 2661 hugetlb_bootmem_alloc(); 2662 2663 free_area_init(); 2664 } 2665 2666 /* 2667 * Set up kernel memory allocators 2668 */ 2669 void __init mm_core_init(void) 2670 { 2671 arch_mm_preinit(); 2672 2673 /* Initializations relying on SMP setup */ 2674 BUILD_BUG_ON(MAX_ZONELISTS > 2); 2675 build_all_zonelists(NULL); 2676 page_alloc_init_cpuhp(); 2677 alloc_tag_sec_init(); 2678 /* 2679 * page_ext requires contiguous pages, 2680 * bigger than MAX_PAGE_ORDER unless SPARSEMEM. 2681 */ 2682 page_ext_init_flatmem(); 2683 mem_debugging_and_hardening_init(); 2684 kfence_alloc_pool_and_metadata(); 2685 report_meminit(); 2686 kmsan_init_shadow(); 2687 stack_depot_early_init(); 2688 2689 /* 2690 * KHO memory setup must happen while memblock is still active, but 2691 * as close as possible to buddy initialization 2692 */ 2693 kho_memory_init(); 2694 2695 memblock_free_all(); 2696 mem_init(); 2697 kmem_cache_init(); 2698 /* 2699 * page_owner must be initialized after buddy is ready, and also after 2700 * slab is ready so that stack_depot_init() works properly 2701 */ 2702 page_ext_init_flatmem_late(); 2703 kmemleak_init(); 2704 ptlock_cache_init(); 2705 pgtable_cache_init(); 2706 debug_objects_mem_init(); 2707 vmalloc_init(); 2708 /* If no deferred init page_ext now, as vmap is fully initialized */ 2709 if (!deferred_struct_pages) 2710 page_ext_init(); 2711 /* Should be run before the first non-init thread is created */ 2712 init_espfix_bsp(); 2713 /* Should be run after espfix64 is set up. */ 2714 pti_init(); 2715 kmsan_init_runtime(); 2716 mm_cache_init(); 2717 execmem_init(); 2718 } 2719