1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm_init.c - Memory initialisation verification and debugging 4 * 5 * Copyright 2008 IBM Corporation, 2008 6 * Author Mel Gorman <mel@csn.ul.ie> 7 * 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/kobject.h> 12 #include <linux/export.h> 13 #include <linux/memory.h> 14 #include <linux/notifier.h> 15 #include <linux/sched.h> 16 #include <linux/mman.h> 17 #include <linux/memblock.h> 18 #include <linux/page-isolation.h> 19 #include <linux/padata.h> 20 #include <linux/nmi.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kmemleak.h> 23 #include <linux/kfence.h> 24 #include <linux/page_ext.h> 25 #include <linux/pti.h> 26 #include <linux/pgtable.h> 27 #include <linux/stackdepot.h> 28 #include <linux/swap.h> 29 #include <linux/cma.h> 30 #include <linux/crash_dump.h> 31 #include <linux/execmem.h> 32 #include "internal.h" 33 #include "slab.h" 34 #include "shuffle.h" 35 36 #include <asm/setup.h> 37 38 #ifdef CONFIG_DEBUG_MEMORY_INIT 39 int __meminitdata mminit_loglevel; 40 41 /* The zonelists are simply reported, validation is manual. */ 42 void __init mminit_verify_zonelist(void) 43 { 44 int nid; 45 46 if (mminit_loglevel < MMINIT_VERIFY) 47 return; 48 49 for_each_online_node(nid) { 50 pg_data_t *pgdat = NODE_DATA(nid); 51 struct zone *zone; 52 struct zoneref *z; 53 struct zonelist *zonelist; 54 int i, listid, zoneid; 55 56 BUILD_BUG_ON(MAX_ZONELISTS > 2); 57 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 58 59 /* Identify the zone and nodelist */ 60 zoneid = i % MAX_NR_ZONES; 61 listid = i / MAX_NR_ZONES; 62 zonelist = &pgdat->node_zonelists[listid]; 63 zone = &pgdat->node_zones[zoneid]; 64 if (!populated_zone(zone)) 65 continue; 66 67 /* Print information about the zonelist */ 68 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 69 listid > 0 ? "thisnode" : "general", nid, 70 zone->name); 71 72 /* Iterate the zonelist */ 73 for_each_zone_zonelist(zone, z, zonelist, zoneid) 74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); 75 pr_cont("\n"); 76 } 77 } 78 } 79 80 void __init mminit_verify_pageflags_layout(void) 81 { 82 int shift, width; 83 unsigned long or_mask, add_mask; 84 85 shift = BITS_PER_LONG; 86 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH 87 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; 88 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 89 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 90 SECTIONS_WIDTH, 91 NODES_WIDTH, 92 ZONES_WIDTH, 93 LAST_CPUPID_WIDTH, 94 KASAN_TAG_WIDTH, 95 LRU_GEN_WIDTH, 96 LRU_REFS_WIDTH, 97 NR_PAGEFLAGS); 98 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 99 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", 100 SECTIONS_SHIFT, 101 NODES_SHIFT, 102 ZONES_SHIFT, 103 LAST_CPUPID_SHIFT, 104 KASAN_TAG_WIDTH); 105 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 106 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", 107 (unsigned long)SECTIONS_PGSHIFT, 108 (unsigned long)NODES_PGSHIFT, 109 (unsigned long)ZONES_PGSHIFT, 110 (unsigned long)LAST_CPUPID_PGSHIFT, 111 (unsigned long)KASAN_TAG_PGSHIFT); 112 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 113 "Node/Zone ID: %lu -> %lu\n", 114 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 115 (unsigned long)ZONEID_PGOFF); 116 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 117 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 118 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 119 #ifdef NODE_NOT_IN_PAGE_FLAGS 120 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 121 "Node not in page flags"); 122 #endif 123 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 124 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 125 "Last cpupid not in page flags"); 126 #endif 127 128 if (SECTIONS_WIDTH) { 129 shift -= SECTIONS_WIDTH; 130 BUG_ON(shift != SECTIONS_PGSHIFT); 131 } 132 if (NODES_WIDTH) { 133 shift -= NODES_WIDTH; 134 BUG_ON(shift != NODES_PGSHIFT); 135 } 136 if (ZONES_WIDTH) { 137 shift -= ZONES_WIDTH; 138 BUG_ON(shift != ZONES_PGSHIFT); 139 } 140 141 /* Check for bitmask overlaps */ 142 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 143 (NODES_MASK << NODES_PGSHIFT) | 144 (SECTIONS_MASK << SECTIONS_PGSHIFT); 145 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 146 (NODES_MASK << NODES_PGSHIFT) + 147 (SECTIONS_MASK << SECTIONS_PGSHIFT); 148 BUG_ON(or_mask != add_mask); 149 } 150 151 static __init int set_mminit_loglevel(char *str) 152 { 153 get_option(&str, &mminit_loglevel); 154 return 0; 155 } 156 early_param("mminit_loglevel", set_mminit_loglevel); 157 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 158 159 struct kobject *mm_kobj; 160 161 #ifdef CONFIG_SMP 162 s32 vm_committed_as_batch = 32; 163 164 void mm_compute_batch(int overcommit_policy) 165 { 166 u64 memsized_batch; 167 s32 nr = num_present_cpus(); 168 s32 batch = max_t(s32, nr*2, 32); 169 unsigned long ram_pages = totalram_pages(); 170 171 /* 172 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of 173 * (total memory/#cpus), and lift it to 25% for other policies 174 * to easy the possible lock contention for percpu_counter 175 * vm_committed_as, while the max limit is INT_MAX 176 */ 177 if (overcommit_policy == OVERCOMMIT_NEVER) 178 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); 179 else 180 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); 181 182 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 183 } 184 185 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 186 unsigned long action, void *arg) 187 { 188 switch (action) { 189 case MEM_ONLINE: 190 case MEM_OFFLINE: 191 mm_compute_batch(sysctl_overcommit_memory); 192 break; 193 default: 194 break; 195 } 196 return NOTIFY_OK; 197 } 198 199 static int __init mm_compute_batch_init(void) 200 { 201 mm_compute_batch(sysctl_overcommit_memory); 202 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); 203 return 0; 204 } 205 206 __initcall(mm_compute_batch_init); 207 208 #endif 209 210 static int __init mm_sysfs_init(void) 211 { 212 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 213 if (!mm_kobj) 214 return -ENOMEM; 215 216 return 0; 217 } 218 postcore_initcall(mm_sysfs_init); 219 220 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 221 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 222 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 223 224 static unsigned long required_kernelcore __initdata; 225 static unsigned long required_kernelcore_percent __initdata; 226 static unsigned long required_movablecore __initdata; 227 static unsigned long required_movablecore_percent __initdata; 228 229 static unsigned long nr_kernel_pages __initdata; 230 static unsigned long nr_all_pages __initdata; 231 232 static bool deferred_struct_pages __meminitdata; 233 234 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 235 236 static int __init cmdline_parse_core(char *p, unsigned long *core, 237 unsigned long *percent) 238 { 239 unsigned long long coremem; 240 char *endptr; 241 242 if (!p) 243 return -EINVAL; 244 245 /* Value may be a percentage of total memory, otherwise bytes */ 246 coremem = simple_strtoull(p, &endptr, 0); 247 if (*endptr == '%') { 248 /* Paranoid check for percent values greater than 100 */ 249 WARN_ON(coremem > 100); 250 251 *percent = coremem; 252 } else { 253 coremem = memparse(p, &p); 254 /* Paranoid check that UL is enough for the coremem value */ 255 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 256 257 *core = coremem >> PAGE_SHIFT; 258 *percent = 0UL; 259 } 260 return 0; 261 } 262 263 bool mirrored_kernelcore __initdata_memblock; 264 265 /* 266 * kernelcore=size sets the amount of memory for use for allocations that 267 * cannot be reclaimed or migrated. 268 */ 269 static int __init cmdline_parse_kernelcore(char *p) 270 { 271 /* parse kernelcore=mirror */ 272 if (parse_option_str(p, "mirror")) { 273 mirrored_kernelcore = true; 274 return 0; 275 } 276 277 return cmdline_parse_core(p, &required_kernelcore, 278 &required_kernelcore_percent); 279 } 280 early_param("kernelcore", cmdline_parse_kernelcore); 281 282 /* 283 * movablecore=size sets the amount of memory for use for allocations that 284 * can be reclaimed or migrated. 285 */ 286 static int __init cmdline_parse_movablecore(char *p) 287 { 288 return cmdline_parse_core(p, &required_movablecore, 289 &required_movablecore_percent); 290 } 291 early_param("movablecore", cmdline_parse_movablecore); 292 293 /* 294 * early_calculate_totalpages() 295 * Sum pages in active regions for movable zone. 296 * Populate N_MEMORY for calculating usable_nodes. 297 */ 298 static unsigned long __init early_calculate_totalpages(void) 299 { 300 unsigned long totalpages = 0; 301 unsigned long start_pfn, end_pfn; 302 int i, nid; 303 304 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 305 unsigned long pages = end_pfn - start_pfn; 306 307 totalpages += pages; 308 if (pages) 309 node_set_state(nid, N_MEMORY); 310 } 311 return totalpages; 312 } 313 314 /* 315 * This finds a zone that can be used for ZONE_MOVABLE pages. The 316 * assumption is made that zones within a node are ordered in monotonic 317 * increasing memory addresses so that the "highest" populated zone is used 318 */ 319 static void __init find_usable_zone_for_movable(void) 320 { 321 int zone_index; 322 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 323 if (zone_index == ZONE_MOVABLE) 324 continue; 325 326 if (arch_zone_highest_possible_pfn[zone_index] > 327 arch_zone_lowest_possible_pfn[zone_index]) 328 break; 329 } 330 331 VM_BUG_ON(zone_index == -1); 332 movable_zone = zone_index; 333 } 334 335 /* 336 * Find the PFN the Movable zone begins in each node. Kernel memory 337 * is spread evenly between nodes as long as the nodes have enough 338 * memory. When they don't, some nodes will have more kernelcore than 339 * others 340 */ 341 static void __init find_zone_movable_pfns_for_nodes(void) 342 { 343 int i, nid; 344 unsigned long usable_startpfn; 345 unsigned long kernelcore_node, kernelcore_remaining; 346 /* save the state before borrow the nodemask */ 347 nodemask_t saved_node_state = node_states[N_MEMORY]; 348 unsigned long totalpages = early_calculate_totalpages(); 349 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 350 struct memblock_region *r; 351 352 /* Need to find movable_zone earlier when movable_node is specified. */ 353 find_usable_zone_for_movable(); 354 355 /* 356 * If movable_node is specified, ignore kernelcore and movablecore 357 * options. 358 */ 359 if (movable_node_is_enabled()) { 360 for_each_mem_region(r) { 361 if (!memblock_is_hotpluggable(r)) 362 continue; 363 364 nid = memblock_get_region_node(r); 365 366 usable_startpfn = memblock_region_memory_base_pfn(r); 367 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 368 min(usable_startpfn, zone_movable_pfn[nid]) : 369 usable_startpfn; 370 } 371 372 goto out2; 373 } 374 375 /* 376 * If kernelcore=mirror is specified, ignore movablecore option 377 */ 378 if (mirrored_kernelcore) { 379 bool mem_below_4gb_not_mirrored = false; 380 381 if (!memblock_has_mirror()) { 382 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n"); 383 goto out; 384 } 385 386 if (is_kdump_kernel()) { 387 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n"); 388 goto out; 389 } 390 391 for_each_mem_region(r) { 392 if (memblock_is_mirror(r)) 393 continue; 394 395 nid = memblock_get_region_node(r); 396 397 usable_startpfn = memblock_region_memory_base_pfn(r); 398 399 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 400 mem_below_4gb_not_mirrored = true; 401 continue; 402 } 403 404 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 405 min(usable_startpfn, zone_movable_pfn[nid]) : 406 usable_startpfn; 407 } 408 409 if (mem_below_4gb_not_mirrored) 410 pr_warn("This configuration results in unmirrored kernel memory.\n"); 411 412 goto out2; 413 } 414 415 /* 416 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 417 * amount of necessary memory. 418 */ 419 if (required_kernelcore_percent) 420 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 421 10000UL; 422 if (required_movablecore_percent) 423 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 424 10000UL; 425 426 /* 427 * If movablecore= was specified, calculate what size of 428 * kernelcore that corresponds so that memory usable for 429 * any allocation type is evenly spread. If both kernelcore 430 * and movablecore are specified, then the value of kernelcore 431 * will be used for required_kernelcore if it's greater than 432 * what movablecore would have allowed. 433 */ 434 if (required_movablecore) { 435 unsigned long corepages; 436 437 /* 438 * Round-up so that ZONE_MOVABLE is at least as large as what 439 * was requested by the user 440 */ 441 required_movablecore = 442 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 443 required_movablecore = min(totalpages, required_movablecore); 444 corepages = totalpages - required_movablecore; 445 446 required_kernelcore = max(required_kernelcore, corepages); 447 } 448 449 /* 450 * If kernelcore was not specified or kernelcore size is larger 451 * than totalpages, there is no ZONE_MOVABLE. 452 */ 453 if (!required_kernelcore || required_kernelcore >= totalpages) 454 goto out; 455 456 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 457 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 458 459 restart: 460 /* Spread kernelcore memory as evenly as possible throughout nodes */ 461 kernelcore_node = required_kernelcore / usable_nodes; 462 for_each_node_state(nid, N_MEMORY) { 463 unsigned long start_pfn, end_pfn; 464 465 /* 466 * Recalculate kernelcore_node if the division per node 467 * now exceeds what is necessary to satisfy the requested 468 * amount of memory for the kernel 469 */ 470 if (required_kernelcore < kernelcore_node) 471 kernelcore_node = required_kernelcore / usable_nodes; 472 473 /* 474 * As the map is walked, we track how much memory is usable 475 * by the kernel using kernelcore_remaining. When it is 476 * 0, the rest of the node is usable by ZONE_MOVABLE 477 */ 478 kernelcore_remaining = kernelcore_node; 479 480 /* Go through each range of PFNs within this node */ 481 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 482 unsigned long size_pages; 483 484 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 485 if (start_pfn >= end_pfn) 486 continue; 487 488 /* Account for what is only usable for kernelcore */ 489 if (start_pfn < usable_startpfn) { 490 unsigned long kernel_pages; 491 kernel_pages = min(end_pfn, usable_startpfn) 492 - start_pfn; 493 494 kernelcore_remaining -= min(kernel_pages, 495 kernelcore_remaining); 496 required_kernelcore -= min(kernel_pages, 497 required_kernelcore); 498 499 /* Continue if range is now fully accounted */ 500 if (end_pfn <= usable_startpfn) { 501 502 /* 503 * Push zone_movable_pfn to the end so 504 * that if we have to rebalance 505 * kernelcore across nodes, we will 506 * not double account here 507 */ 508 zone_movable_pfn[nid] = end_pfn; 509 continue; 510 } 511 start_pfn = usable_startpfn; 512 } 513 514 /* 515 * The usable PFN range for ZONE_MOVABLE is from 516 * start_pfn->end_pfn. Calculate size_pages as the 517 * number of pages used as kernelcore 518 */ 519 size_pages = end_pfn - start_pfn; 520 if (size_pages > kernelcore_remaining) 521 size_pages = kernelcore_remaining; 522 zone_movable_pfn[nid] = start_pfn + size_pages; 523 524 /* 525 * Some kernelcore has been met, update counts and 526 * break if the kernelcore for this node has been 527 * satisfied 528 */ 529 required_kernelcore -= min(required_kernelcore, 530 size_pages); 531 kernelcore_remaining -= size_pages; 532 if (!kernelcore_remaining) 533 break; 534 } 535 } 536 537 /* 538 * If there is still required_kernelcore, we do another pass with one 539 * less node in the count. This will push zone_movable_pfn[nid] further 540 * along on the nodes that still have memory until kernelcore is 541 * satisfied 542 */ 543 usable_nodes--; 544 if (usable_nodes && required_kernelcore > usable_nodes) 545 goto restart; 546 547 out2: 548 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 549 for (nid = 0; nid < MAX_NUMNODES; nid++) { 550 unsigned long start_pfn, end_pfn; 551 552 zone_movable_pfn[nid] = 553 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 554 555 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 556 if (zone_movable_pfn[nid] >= end_pfn) 557 zone_movable_pfn[nid] = 0; 558 } 559 560 out: 561 /* restore the node_state */ 562 node_states[N_MEMORY] = saved_node_state; 563 } 564 565 void __meminit __init_single_page(struct page *page, unsigned long pfn, 566 unsigned long zone, int nid) 567 { 568 mm_zero_struct_page(page); 569 set_page_links(page, zone, nid, pfn); 570 init_page_count(page); 571 page_mapcount_reset(page); 572 page_cpupid_reset_last(page); 573 page_kasan_tag_reset(page); 574 575 INIT_LIST_HEAD(&page->lru); 576 #ifdef WANT_PAGE_VIRTUAL 577 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 578 if (!is_highmem_idx(zone)) 579 set_page_address(page, __va(pfn << PAGE_SHIFT)); 580 #endif 581 } 582 583 #ifdef CONFIG_NUMA 584 /* 585 * During memory init memblocks map pfns to nids. The search is expensive and 586 * this caches recent lookups. The implementation of __early_pfn_to_nid 587 * treats start/end as pfns. 588 */ 589 struct mminit_pfnnid_cache { 590 unsigned long last_start; 591 unsigned long last_end; 592 int last_nid; 593 }; 594 595 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 596 597 /* 598 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 599 */ 600 static int __meminit __early_pfn_to_nid(unsigned long pfn, 601 struct mminit_pfnnid_cache *state) 602 { 603 unsigned long start_pfn, end_pfn; 604 int nid; 605 606 if (state->last_start <= pfn && pfn < state->last_end) 607 return state->last_nid; 608 609 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 610 if (nid != NUMA_NO_NODE) { 611 state->last_start = start_pfn; 612 state->last_end = end_pfn; 613 state->last_nid = nid; 614 } 615 616 return nid; 617 } 618 619 int __meminit early_pfn_to_nid(unsigned long pfn) 620 { 621 static DEFINE_SPINLOCK(early_pfn_lock); 622 int nid; 623 624 spin_lock(&early_pfn_lock); 625 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 626 if (nid < 0) 627 nid = first_online_node; 628 spin_unlock(&early_pfn_lock); 629 630 return nid; 631 } 632 633 int hashdist = HASHDIST_DEFAULT; 634 635 static int __init set_hashdist(char *str) 636 { 637 if (!str) 638 return 0; 639 hashdist = simple_strtoul(str, &str, 0); 640 return 1; 641 } 642 __setup("hashdist=", set_hashdist); 643 644 static inline void fixup_hashdist(void) 645 { 646 if (num_node_state(N_MEMORY) == 1) 647 hashdist = 0; 648 } 649 #else 650 static inline void fixup_hashdist(void) {} 651 #endif /* CONFIG_NUMA */ 652 653 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 654 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 655 { 656 pgdat->first_deferred_pfn = ULONG_MAX; 657 } 658 659 /* Returns true if the struct page for the pfn is initialised */ 660 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) 661 { 662 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 663 return false; 664 665 return true; 666 } 667 668 /* 669 * Returns true when the remaining initialisation should be deferred until 670 * later in the boot cycle when it can be parallelised. 671 */ 672 static bool __meminit 673 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 674 { 675 static unsigned long prev_end_pfn, nr_initialised; 676 677 if (early_page_ext_enabled()) 678 return false; 679 680 /* Always populate low zones for address-constrained allocations */ 681 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 682 return false; 683 684 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 685 return true; 686 687 /* 688 * prev_end_pfn static that contains the end of previous zone 689 * No need to protect because called very early in boot before smp_init. 690 */ 691 if (prev_end_pfn != end_pfn) { 692 prev_end_pfn = end_pfn; 693 nr_initialised = 0; 694 } 695 696 /* 697 * We start only with one section of pages, more pages are added as 698 * needed until the rest of deferred pages are initialized. 699 */ 700 nr_initialised++; 701 if ((nr_initialised > PAGES_PER_SECTION) && 702 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 703 NODE_DATA(nid)->first_deferred_pfn = pfn; 704 return true; 705 } 706 return false; 707 } 708 709 static void __meminit init_reserved_page(unsigned long pfn, int nid) 710 { 711 pg_data_t *pgdat; 712 int zid; 713 714 if (early_page_initialised(pfn, nid)) 715 return; 716 717 pgdat = NODE_DATA(nid); 718 719 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 720 struct zone *zone = &pgdat->node_zones[zid]; 721 722 if (zone_spans_pfn(zone, pfn)) 723 break; 724 } 725 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 726 } 727 #else 728 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 729 730 static inline bool early_page_initialised(unsigned long pfn, int nid) 731 { 732 return true; 733 } 734 735 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 736 { 737 return false; 738 } 739 740 static inline void init_reserved_page(unsigned long pfn, int nid) 741 { 742 } 743 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 744 745 /* 746 * Initialised pages do not have PageReserved set. This function is 747 * called for each range allocated by the bootmem allocator and 748 * marks the pages PageReserved. The remaining valid pages are later 749 * sent to the buddy page allocator. 750 */ 751 void __meminit reserve_bootmem_region(phys_addr_t start, 752 phys_addr_t end, int nid) 753 { 754 unsigned long start_pfn = PFN_DOWN(start); 755 unsigned long end_pfn = PFN_UP(end); 756 757 for (; start_pfn < end_pfn; start_pfn++) { 758 if (pfn_valid(start_pfn)) { 759 struct page *page = pfn_to_page(start_pfn); 760 761 init_reserved_page(start_pfn, nid); 762 763 /* 764 * no need for atomic set_bit because the struct 765 * page is not visible yet so nobody should 766 * access it yet. 767 */ 768 __SetPageReserved(page); 769 } 770 } 771 } 772 773 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 774 static bool __meminit 775 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 776 { 777 static struct memblock_region *r; 778 779 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 780 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 781 for_each_mem_region(r) { 782 if (*pfn < memblock_region_memory_end_pfn(r)) 783 break; 784 } 785 } 786 if (*pfn >= memblock_region_memory_base_pfn(r) && 787 memblock_is_mirror(r)) { 788 *pfn = memblock_region_memory_end_pfn(r); 789 return true; 790 } 791 } 792 return false; 793 } 794 795 /* 796 * Only struct pages that correspond to ranges defined by memblock.memory 797 * are zeroed and initialized by going through __init_single_page() during 798 * memmap_init_zone_range(). 799 * 800 * But, there could be struct pages that correspond to holes in 801 * memblock.memory. This can happen because of the following reasons: 802 * - physical memory bank size is not necessarily the exact multiple of the 803 * arbitrary section size 804 * - early reserved memory may not be listed in memblock.memory 805 * - non-memory regions covered by the contigious flatmem mapping 806 * - memory layouts defined with memmap= kernel parameter may not align 807 * nicely with memmap sections 808 * 809 * Explicitly initialize those struct pages so that: 810 * - PG_Reserved is set 811 * - zone and node links point to zone and node that span the page if the 812 * hole is in the middle of a zone 813 * - zone and node links point to adjacent zone/node if the hole falls on 814 * the zone boundary; the pages in such holes will be prepended to the 815 * zone/node above the hole except for the trailing pages in the last 816 * section that will be appended to the zone/node below. 817 */ 818 static void __init init_unavailable_range(unsigned long spfn, 819 unsigned long epfn, 820 int zone, int node) 821 { 822 unsigned long pfn; 823 u64 pgcnt = 0; 824 825 for (pfn = spfn; pfn < epfn; pfn++) { 826 if (!pfn_valid(pageblock_start_pfn(pfn))) { 827 pfn = pageblock_end_pfn(pfn) - 1; 828 continue; 829 } 830 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 831 __SetPageReserved(pfn_to_page(pfn)); 832 pgcnt++; 833 } 834 835 if (pgcnt) 836 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n", 837 node, zone_names[zone], pgcnt); 838 } 839 840 /* 841 * Initially all pages are reserved - free ones are freed 842 * up by memblock_free_all() once the early boot process is 843 * done. Non-atomic initialization, single-pass. 844 * 845 * All aligned pageblocks are initialized to the specified migratetype 846 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 847 * zone stats (e.g., nr_isolate_pageblock) are touched. 848 */ 849 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 850 unsigned long start_pfn, unsigned long zone_end_pfn, 851 enum meminit_context context, 852 struct vmem_altmap *altmap, int migratetype) 853 { 854 unsigned long pfn, end_pfn = start_pfn + size; 855 struct page *page; 856 857 if (highest_memmap_pfn < end_pfn - 1) 858 highest_memmap_pfn = end_pfn - 1; 859 860 #ifdef CONFIG_ZONE_DEVICE 861 /* 862 * Honor reservation requested by the driver for this ZONE_DEVICE 863 * memory. We limit the total number of pages to initialize to just 864 * those that might contain the memory mapping. We will defer the 865 * ZONE_DEVICE page initialization until after we have released 866 * the hotplug lock. 867 */ 868 if (zone == ZONE_DEVICE) { 869 if (!altmap) 870 return; 871 872 if (start_pfn == altmap->base_pfn) 873 start_pfn += altmap->reserve; 874 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 875 } 876 #endif 877 878 for (pfn = start_pfn; pfn < end_pfn; ) { 879 /* 880 * There can be holes in boot-time mem_map[]s handed to this 881 * function. They do not exist on hotplugged memory. 882 */ 883 if (context == MEMINIT_EARLY) { 884 if (overlap_memmap_init(zone, &pfn)) 885 continue; 886 if (defer_init(nid, pfn, zone_end_pfn)) { 887 deferred_struct_pages = true; 888 break; 889 } 890 } 891 892 page = pfn_to_page(pfn); 893 __init_single_page(page, pfn, zone, nid); 894 if (context == MEMINIT_HOTPLUG) 895 __SetPageReserved(page); 896 897 /* 898 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 899 * such that unmovable allocations won't be scattered all 900 * over the place during system boot. 901 */ 902 if (pageblock_aligned(pfn)) { 903 set_pageblock_migratetype(page, migratetype); 904 cond_resched(); 905 } 906 pfn++; 907 } 908 } 909 910 static void __init memmap_init_zone_range(struct zone *zone, 911 unsigned long start_pfn, 912 unsigned long end_pfn, 913 unsigned long *hole_pfn) 914 { 915 unsigned long zone_start_pfn = zone->zone_start_pfn; 916 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 917 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 918 919 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 920 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 921 922 if (start_pfn >= end_pfn) 923 return; 924 925 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 926 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 927 928 if (*hole_pfn < start_pfn) 929 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 930 931 *hole_pfn = end_pfn; 932 } 933 934 static void __init memmap_init(void) 935 { 936 unsigned long start_pfn, end_pfn; 937 unsigned long hole_pfn = 0; 938 int i, j, zone_id = 0, nid; 939 940 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 941 struct pglist_data *node = NODE_DATA(nid); 942 943 for (j = 0; j < MAX_NR_ZONES; j++) { 944 struct zone *zone = node->node_zones + j; 945 946 if (!populated_zone(zone)) 947 continue; 948 949 memmap_init_zone_range(zone, start_pfn, end_pfn, 950 &hole_pfn); 951 zone_id = j; 952 } 953 } 954 955 #ifdef CONFIG_SPARSEMEM 956 /* 957 * Initialize the memory map for hole in the range [memory_end, 958 * section_end]. 959 * Append the pages in this hole to the highest zone in the last 960 * node. 961 * The call to init_unavailable_range() is outside the ifdef to 962 * silence the compiler warining about zone_id set but not used; 963 * for FLATMEM it is a nop anyway 964 */ 965 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 966 if (hole_pfn < end_pfn) 967 #endif 968 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 969 } 970 971 #ifdef CONFIG_ZONE_DEVICE 972 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 973 unsigned long zone_idx, int nid, 974 struct dev_pagemap *pgmap) 975 { 976 977 __init_single_page(page, pfn, zone_idx, nid); 978 979 /* 980 * Mark page reserved as it will need to wait for onlining 981 * phase for it to be fully associated with a zone. 982 * 983 * We can use the non-atomic __set_bit operation for setting 984 * the flag as we are still initializing the pages. 985 */ 986 __SetPageReserved(page); 987 988 /* 989 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 990 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 991 * ever freed or placed on a driver-private list. 992 */ 993 page->pgmap = pgmap; 994 page->zone_device_data = NULL; 995 996 /* 997 * Mark the block movable so that blocks are reserved for 998 * movable at startup. This will force kernel allocations 999 * to reserve their blocks rather than leaking throughout 1000 * the address space during boot when many long-lived 1001 * kernel allocations are made. 1002 * 1003 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 1004 * because this is done early in section_activate() 1005 */ 1006 if (pageblock_aligned(pfn)) { 1007 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1008 cond_resched(); 1009 } 1010 1011 /* 1012 * ZONE_DEVICE pages are released directly to the driver page allocator 1013 * which will set the page count to 1 when allocating the page. 1014 */ 1015 if (pgmap->type == MEMORY_DEVICE_PRIVATE || 1016 pgmap->type == MEMORY_DEVICE_COHERENT) 1017 set_page_count(page, 0); 1018 } 1019 1020 /* 1021 * With compound page geometry and when struct pages are stored in ram most 1022 * tail pages are reused. Consequently, the amount of unique struct pages to 1023 * initialize is a lot smaller that the total amount of struct pages being 1024 * mapped. This is a paired / mild layering violation with explicit knowledge 1025 * of how the sparse_vmemmap internals handle compound pages in the lack 1026 * of an altmap. See vmemmap_populate_compound_pages(). 1027 */ 1028 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 1029 struct dev_pagemap *pgmap) 1030 { 1031 if (!vmemmap_can_optimize(altmap, pgmap)) 1032 return pgmap_vmemmap_nr(pgmap); 1033 1034 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page)); 1035 } 1036 1037 static void __ref memmap_init_compound(struct page *head, 1038 unsigned long head_pfn, 1039 unsigned long zone_idx, int nid, 1040 struct dev_pagemap *pgmap, 1041 unsigned long nr_pages) 1042 { 1043 unsigned long pfn, end_pfn = head_pfn + nr_pages; 1044 unsigned int order = pgmap->vmemmap_shift; 1045 1046 __SetPageHead(head); 1047 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 1048 struct page *page = pfn_to_page(pfn); 1049 1050 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1051 prep_compound_tail(head, pfn - head_pfn); 1052 set_page_count(page, 0); 1053 1054 /* 1055 * The first tail page stores important compound page info. 1056 * Call prep_compound_head() after the first tail page has 1057 * been initialized, to not have the data overwritten. 1058 */ 1059 if (pfn == head_pfn + 1) 1060 prep_compound_head(head, order); 1061 } 1062 } 1063 1064 void __ref memmap_init_zone_device(struct zone *zone, 1065 unsigned long start_pfn, 1066 unsigned long nr_pages, 1067 struct dev_pagemap *pgmap) 1068 { 1069 unsigned long pfn, end_pfn = start_pfn + nr_pages; 1070 struct pglist_data *pgdat = zone->zone_pgdat; 1071 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 1072 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 1073 unsigned long zone_idx = zone_idx(zone); 1074 unsigned long start = jiffies; 1075 int nid = pgdat->node_id; 1076 1077 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 1078 return; 1079 1080 /* 1081 * The call to memmap_init should have already taken care 1082 * of the pages reserved for the memmap, so we can just jump to 1083 * the end of that region and start processing the device pages. 1084 */ 1085 if (altmap) { 1086 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 1087 nr_pages = end_pfn - start_pfn; 1088 } 1089 1090 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 1091 struct page *page = pfn_to_page(pfn); 1092 1093 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1094 1095 if (pfns_per_compound == 1) 1096 continue; 1097 1098 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 1099 compound_nr_pages(altmap, pgmap)); 1100 } 1101 1102 pr_debug("%s initialised %lu pages in %ums\n", __func__, 1103 nr_pages, jiffies_to_msecs(jiffies - start)); 1104 } 1105 #endif 1106 1107 /* 1108 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 1109 * because it is sized independent of architecture. Unlike the other zones, 1110 * the starting point for ZONE_MOVABLE is not fixed. It may be different 1111 * in each node depending on the size of each node and how evenly kernelcore 1112 * is distributed. This helper function adjusts the zone ranges 1113 * provided by the architecture for a given node by using the end of the 1114 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 1115 * zones within a node are in order of monotonic increases memory addresses 1116 */ 1117 static void __init adjust_zone_range_for_zone_movable(int nid, 1118 unsigned long zone_type, 1119 unsigned long node_end_pfn, 1120 unsigned long *zone_start_pfn, 1121 unsigned long *zone_end_pfn) 1122 { 1123 /* Only adjust if ZONE_MOVABLE is on this node */ 1124 if (zone_movable_pfn[nid]) { 1125 /* Size ZONE_MOVABLE */ 1126 if (zone_type == ZONE_MOVABLE) { 1127 *zone_start_pfn = zone_movable_pfn[nid]; 1128 *zone_end_pfn = min(node_end_pfn, 1129 arch_zone_highest_possible_pfn[movable_zone]); 1130 1131 /* Adjust for ZONE_MOVABLE starting within this range */ 1132 } else if (!mirrored_kernelcore && 1133 *zone_start_pfn < zone_movable_pfn[nid] && 1134 *zone_end_pfn > zone_movable_pfn[nid]) { 1135 *zone_end_pfn = zone_movable_pfn[nid]; 1136 1137 /* Check if this whole range is within ZONE_MOVABLE */ 1138 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 1139 *zone_start_pfn = *zone_end_pfn; 1140 } 1141 } 1142 1143 /* 1144 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 1145 * then all holes in the requested range will be accounted for. 1146 */ 1147 static unsigned long __init __absent_pages_in_range(int nid, 1148 unsigned long range_start_pfn, 1149 unsigned long range_end_pfn) 1150 { 1151 unsigned long nr_absent = range_end_pfn - range_start_pfn; 1152 unsigned long start_pfn, end_pfn; 1153 int i; 1154 1155 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 1156 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 1157 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 1158 nr_absent -= end_pfn - start_pfn; 1159 } 1160 return nr_absent; 1161 } 1162 1163 /** 1164 * absent_pages_in_range - Return number of page frames in holes within a range 1165 * @start_pfn: The start PFN to start searching for holes 1166 * @end_pfn: The end PFN to stop searching for holes 1167 * 1168 * Return: the number of pages frames in memory holes within a range. 1169 */ 1170 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 1171 unsigned long end_pfn) 1172 { 1173 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 1174 } 1175 1176 /* Return the number of page frames in holes in a zone on a node */ 1177 static unsigned long __init zone_absent_pages_in_node(int nid, 1178 unsigned long zone_type, 1179 unsigned long zone_start_pfn, 1180 unsigned long zone_end_pfn) 1181 { 1182 unsigned long nr_absent; 1183 1184 /* zone is empty, we don't have any absent pages */ 1185 if (zone_start_pfn == zone_end_pfn) 1186 return 0; 1187 1188 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 1189 1190 /* 1191 * ZONE_MOVABLE handling. 1192 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 1193 * and vice versa. 1194 */ 1195 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 1196 unsigned long start_pfn, end_pfn; 1197 struct memblock_region *r; 1198 1199 for_each_mem_region(r) { 1200 start_pfn = clamp(memblock_region_memory_base_pfn(r), 1201 zone_start_pfn, zone_end_pfn); 1202 end_pfn = clamp(memblock_region_memory_end_pfn(r), 1203 zone_start_pfn, zone_end_pfn); 1204 1205 if (zone_type == ZONE_MOVABLE && 1206 memblock_is_mirror(r)) 1207 nr_absent += end_pfn - start_pfn; 1208 1209 if (zone_type == ZONE_NORMAL && 1210 !memblock_is_mirror(r)) 1211 nr_absent += end_pfn - start_pfn; 1212 } 1213 } 1214 1215 return nr_absent; 1216 } 1217 1218 /* 1219 * Return the number of pages a zone spans in a node, including holes 1220 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1221 */ 1222 static unsigned long __init zone_spanned_pages_in_node(int nid, 1223 unsigned long zone_type, 1224 unsigned long node_start_pfn, 1225 unsigned long node_end_pfn, 1226 unsigned long *zone_start_pfn, 1227 unsigned long *zone_end_pfn) 1228 { 1229 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1230 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1231 1232 /* Get the start and end of the zone */ 1233 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1234 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1235 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn, 1236 zone_start_pfn, zone_end_pfn); 1237 1238 /* Check that this node has pages within the zone's required range */ 1239 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 1240 return 0; 1241 1242 /* Move the zone boundaries inside the node if necessary */ 1243 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 1244 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 1245 1246 /* Return the spanned pages */ 1247 return *zone_end_pfn - *zone_start_pfn; 1248 } 1249 1250 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) 1251 { 1252 struct zone *z; 1253 1254 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { 1255 z->zone_start_pfn = 0; 1256 z->spanned_pages = 0; 1257 z->present_pages = 0; 1258 #if defined(CONFIG_MEMORY_HOTPLUG) 1259 z->present_early_pages = 0; 1260 #endif 1261 } 1262 1263 pgdat->node_spanned_pages = 0; 1264 pgdat->node_present_pages = 0; 1265 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); 1266 } 1267 1268 static void __init calc_nr_kernel_pages(void) 1269 { 1270 unsigned long start_pfn, end_pfn; 1271 phys_addr_t start_addr, end_addr; 1272 u64 u; 1273 #ifdef CONFIG_HIGHMEM 1274 unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]; 1275 #endif 1276 1277 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) { 1278 start_pfn = PFN_UP(start_addr); 1279 end_pfn = PFN_DOWN(end_addr); 1280 1281 if (start_pfn < end_pfn) { 1282 nr_all_pages += end_pfn - start_pfn; 1283 #ifdef CONFIG_HIGHMEM 1284 start_pfn = clamp(start_pfn, 0, high_zone_low); 1285 end_pfn = clamp(end_pfn, 0, high_zone_low); 1286 #endif 1287 nr_kernel_pages += end_pfn - start_pfn; 1288 } 1289 } 1290 } 1291 1292 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 1293 unsigned long node_start_pfn, 1294 unsigned long node_end_pfn) 1295 { 1296 unsigned long realtotalpages = 0, totalpages = 0; 1297 enum zone_type i; 1298 1299 for (i = 0; i < MAX_NR_ZONES; i++) { 1300 struct zone *zone = pgdat->node_zones + i; 1301 unsigned long zone_start_pfn, zone_end_pfn; 1302 unsigned long spanned, absent; 1303 unsigned long real_size; 1304 1305 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 1306 node_start_pfn, 1307 node_end_pfn, 1308 &zone_start_pfn, 1309 &zone_end_pfn); 1310 absent = zone_absent_pages_in_node(pgdat->node_id, i, 1311 zone_start_pfn, 1312 zone_end_pfn); 1313 1314 real_size = spanned - absent; 1315 1316 if (spanned) 1317 zone->zone_start_pfn = zone_start_pfn; 1318 else 1319 zone->zone_start_pfn = 0; 1320 zone->spanned_pages = spanned; 1321 zone->present_pages = real_size; 1322 #if defined(CONFIG_MEMORY_HOTPLUG) 1323 zone->present_early_pages = real_size; 1324 #endif 1325 1326 totalpages += spanned; 1327 realtotalpages += real_size; 1328 } 1329 1330 pgdat->node_spanned_pages = totalpages; 1331 pgdat->node_present_pages = realtotalpages; 1332 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1333 } 1334 1335 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1336 static void pgdat_init_split_queue(struct pglist_data *pgdat) 1337 { 1338 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 1339 1340 spin_lock_init(&ds_queue->split_queue_lock); 1341 INIT_LIST_HEAD(&ds_queue->split_queue); 1342 ds_queue->split_queue_len = 0; 1343 } 1344 #else 1345 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 1346 #endif 1347 1348 #ifdef CONFIG_COMPACTION 1349 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 1350 { 1351 init_waitqueue_head(&pgdat->kcompactd_wait); 1352 } 1353 #else 1354 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 1355 #endif 1356 1357 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 1358 { 1359 int i; 1360 1361 pgdat_resize_init(pgdat); 1362 pgdat_kswapd_lock_init(pgdat); 1363 1364 pgdat_init_split_queue(pgdat); 1365 pgdat_init_kcompactd(pgdat); 1366 1367 init_waitqueue_head(&pgdat->kswapd_wait); 1368 init_waitqueue_head(&pgdat->pfmemalloc_wait); 1369 1370 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 1371 init_waitqueue_head(&pgdat->reclaim_wait[i]); 1372 1373 pgdat_page_ext_init(pgdat); 1374 lruvec_init(&pgdat->__lruvec); 1375 } 1376 1377 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 1378 unsigned long remaining_pages) 1379 { 1380 atomic_long_set(&zone->managed_pages, remaining_pages); 1381 zone_set_nid(zone, nid); 1382 zone->name = zone_names[idx]; 1383 zone->zone_pgdat = NODE_DATA(nid); 1384 spin_lock_init(&zone->lock); 1385 zone_seqlock_init(zone); 1386 zone_pcp_init(zone); 1387 } 1388 1389 static void __meminit zone_init_free_lists(struct zone *zone) 1390 { 1391 unsigned int order, t; 1392 for_each_migratetype_order(order, t) { 1393 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 1394 zone->free_area[order].nr_free = 0; 1395 } 1396 1397 #ifdef CONFIG_UNACCEPTED_MEMORY 1398 INIT_LIST_HEAD(&zone->unaccepted_pages); 1399 #endif 1400 } 1401 1402 void __meminit init_currently_empty_zone(struct zone *zone, 1403 unsigned long zone_start_pfn, 1404 unsigned long size) 1405 { 1406 struct pglist_data *pgdat = zone->zone_pgdat; 1407 int zone_idx = zone_idx(zone) + 1; 1408 1409 if (zone_idx > pgdat->nr_zones) 1410 pgdat->nr_zones = zone_idx; 1411 1412 zone->zone_start_pfn = zone_start_pfn; 1413 1414 mminit_dprintk(MMINIT_TRACE, "memmap_init", 1415 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 1416 pgdat->node_id, 1417 (unsigned long)zone_idx(zone), 1418 zone_start_pfn, (zone_start_pfn + size)); 1419 1420 zone_init_free_lists(zone); 1421 zone->initialized = 1; 1422 } 1423 1424 #ifndef CONFIG_SPARSEMEM 1425 /* 1426 * Calculate the size of the zone->blockflags rounded to an unsigned long 1427 * Start by making sure zonesize is a multiple of pageblock_order by rounding 1428 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 1429 * round what is now in bits to nearest long in bits, then return it in 1430 * bytes. 1431 */ 1432 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 1433 { 1434 unsigned long usemapsize; 1435 1436 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 1437 usemapsize = roundup(zonesize, pageblock_nr_pages); 1438 usemapsize = usemapsize >> pageblock_order; 1439 usemapsize *= NR_PAGEBLOCK_BITS; 1440 usemapsize = roundup(usemapsize, BITS_PER_LONG); 1441 1442 return usemapsize / BITS_PER_BYTE; 1443 } 1444 1445 static void __ref setup_usemap(struct zone *zone) 1446 { 1447 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 1448 zone->spanned_pages); 1449 zone->pageblock_flags = NULL; 1450 if (usemapsize) { 1451 zone->pageblock_flags = 1452 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 1453 zone_to_nid(zone)); 1454 if (!zone->pageblock_flags) 1455 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 1456 usemapsize, zone->name, zone_to_nid(zone)); 1457 } 1458 } 1459 #else 1460 static inline void setup_usemap(struct zone *zone) {} 1461 #endif /* CONFIG_SPARSEMEM */ 1462 1463 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 1464 1465 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 1466 void __init set_pageblock_order(void) 1467 { 1468 unsigned int order = MAX_PAGE_ORDER; 1469 1470 /* Check that pageblock_nr_pages has not already been setup */ 1471 if (pageblock_order) 1472 return; 1473 1474 /* Don't let pageblocks exceed the maximum allocation granularity. */ 1475 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 1476 order = HUGETLB_PAGE_ORDER; 1477 1478 /* 1479 * Assume the largest contiguous order of interest is a huge page. 1480 * This value may be variable depending on boot parameters on powerpc. 1481 */ 1482 pageblock_order = order; 1483 } 1484 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1485 1486 /* 1487 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 1488 * is unused as pageblock_order is set at compile-time. See 1489 * include/linux/pageblock-flags.h for the values of pageblock_order based on 1490 * the kernel config 1491 */ 1492 void __init set_pageblock_order(void) 1493 { 1494 } 1495 1496 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1497 1498 /* 1499 * Set up the zone data structures 1500 * - init pgdat internals 1501 * - init all zones belonging to this node 1502 * 1503 * NOTE: this function is only called during memory hotplug 1504 */ 1505 #ifdef CONFIG_MEMORY_HOTPLUG 1506 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 1507 { 1508 int nid = pgdat->node_id; 1509 enum zone_type z; 1510 int cpu; 1511 1512 pgdat_init_internals(pgdat); 1513 1514 if (pgdat->per_cpu_nodestats == &boot_nodestats) 1515 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1516 1517 /* 1518 * Reset the nr_zones, order and highest_zoneidx before reuse. 1519 * Note that kswapd will init kswapd_highest_zoneidx properly 1520 * when it starts in the near future. 1521 */ 1522 pgdat->nr_zones = 0; 1523 pgdat->kswapd_order = 0; 1524 pgdat->kswapd_highest_zoneidx = 0; 1525 pgdat->node_start_pfn = 0; 1526 pgdat->node_present_pages = 0; 1527 1528 for_each_online_cpu(cpu) { 1529 struct per_cpu_nodestat *p; 1530 1531 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 1532 memset(p, 0, sizeof(*p)); 1533 } 1534 1535 /* 1536 * When memory is hot-added, all the memory is in offline state. So 1537 * clear all zones' present_pages and managed_pages because they will 1538 * be updated in online_pages() and offline_pages(). 1539 */ 1540 for (z = 0; z < MAX_NR_ZONES; z++) { 1541 struct zone *zone = pgdat->node_zones + z; 1542 1543 zone->present_pages = 0; 1544 zone_init_internals(zone, z, nid, 0); 1545 } 1546 } 1547 #endif 1548 1549 static void __init free_area_init_core(struct pglist_data *pgdat) 1550 { 1551 enum zone_type j; 1552 int nid = pgdat->node_id; 1553 1554 pgdat_init_internals(pgdat); 1555 pgdat->per_cpu_nodestats = &boot_nodestats; 1556 1557 for (j = 0; j < MAX_NR_ZONES; j++) { 1558 struct zone *zone = pgdat->node_zones + j; 1559 unsigned long size = zone->spanned_pages; 1560 1561 /* 1562 * Initialize zone->managed_pages as 0 , it will be reset 1563 * when memblock allocator frees pages into buddy system. 1564 */ 1565 zone_init_internals(zone, j, nid, zone->present_pages); 1566 1567 if (!size) 1568 continue; 1569 1570 setup_usemap(zone); 1571 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 1572 } 1573 } 1574 1575 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 1576 phys_addr_t min_addr, int nid, bool exact_nid) 1577 { 1578 void *ptr; 1579 1580 if (exact_nid) 1581 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 1582 MEMBLOCK_ALLOC_ACCESSIBLE, 1583 nid); 1584 else 1585 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 1586 MEMBLOCK_ALLOC_ACCESSIBLE, 1587 nid); 1588 1589 if (ptr && size > 0) 1590 page_init_poison(ptr, size); 1591 1592 return ptr; 1593 } 1594 1595 #ifdef CONFIG_FLATMEM 1596 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 1597 { 1598 unsigned long start, offset, size, end; 1599 struct page *map; 1600 1601 /* Skip empty nodes */ 1602 if (!pgdat->node_spanned_pages) 1603 return; 1604 1605 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1606 offset = pgdat->node_start_pfn - start; 1607 /* 1608 * The zone's endpoints aren't required to be MAX_PAGE_ORDER 1609 * aligned but the node_mem_map endpoints must be in order 1610 * for the buddy allocator to function correctly. 1611 */ 1612 end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES); 1613 size = (end - start) * sizeof(struct page); 1614 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 1615 pgdat->node_id, false); 1616 if (!map) 1617 panic("Failed to allocate %ld bytes for node %d memory map\n", 1618 size, pgdat->node_id); 1619 pgdat->node_mem_map = map + offset; 1620 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1621 __func__, pgdat->node_id, (unsigned long)pgdat, 1622 (unsigned long)pgdat->node_mem_map); 1623 #ifndef CONFIG_NUMA 1624 /* the global mem_map is just set as node 0's */ 1625 if (pgdat == NODE_DATA(0)) { 1626 mem_map = NODE_DATA(0)->node_mem_map; 1627 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1628 mem_map -= offset; 1629 } 1630 #endif 1631 } 1632 #else 1633 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 1634 #endif /* CONFIG_FLATMEM */ 1635 1636 /** 1637 * get_pfn_range_for_nid - Return the start and end page frames for a node 1638 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 1639 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 1640 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 1641 * 1642 * It returns the start and end page frame of a node based on information 1643 * provided by memblock_set_node(). If called for a node 1644 * with no available memory, the start and end PFNs will be 0. 1645 */ 1646 void __init get_pfn_range_for_nid(unsigned int nid, 1647 unsigned long *start_pfn, unsigned long *end_pfn) 1648 { 1649 unsigned long this_start_pfn, this_end_pfn; 1650 int i; 1651 1652 *start_pfn = -1UL; 1653 *end_pfn = 0; 1654 1655 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 1656 *start_pfn = min(*start_pfn, this_start_pfn); 1657 *end_pfn = max(*end_pfn, this_end_pfn); 1658 } 1659 1660 if (*start_pfn == -1UL) 1661 *start_pfn = 0; 1662 } 1663 1664 static void __init free_area_init_node(int nid) 1665 { 1666 pg_data_t *pgdat = NODE_DATA(nid); 1667 unsigned long start_pfn = 0; 1668 unsigned long end_pfn = 0; 1669 1670 /* pg_data_t should be reset to zero when it's allocated */ 1671 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 1672 1673 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1674 1675 pgdat->node_id = nid; 1676 pgdat->node_start_pfn = start_pfn; 1677 pgdat->per_cpu_nodestats = NULL; 1678 1679 if (start_pfn != end_pfn) { 1680 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 1681 (u64)start_pfn << PAGE_SHIFT, 1682 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 1683 1684 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 1685 } else { 1686 pr_info("Initmem setup node %d as memoryless\n", nid); 1687 1688 reset_memoryless_node_totalpages(pgdat); 1689 } 1690 1691 alloc_node_mem_map(pgdat); 1692 pgdat_set_deferred_range(pgdat); 1693 1694 free_area_init_core(pgdat); 1695 lru_gen_init_pgdat(pgdat); 1696 } 1697 1698 /* Any regular or high memory on that node ? */ 1699 static void __init check_for_memory(pg_data_t *pgdat) 1700 { 1701 enum zone_type zone_type; 1702 1703 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 1704 struct zone *zone = &pgdat->node_zones[zone_type]; 1705 if (populated_zone(zone)) { 1706 if (IS_ENABLED(CONFIG_HIGHMEM)) 1707 node_set_state(pgdat->node_id, N_HIGH_MEMORY); 1708 if (zone_type <= ZONE_NORMAL) 1709 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); 1710 break; 1711 } 1712 } 1713 } 1714 1715 #if MAX_NUMNODES > 1 1716 /* 1717 * Figure out the number of possible node ids. 1718 */ 1719 void __init setup_nr_node_ids(void) 1720 { 1721 unsigned int highest; 1722 1723 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 1724 nr_node_ids = highest + 1; 1725 } 1726 #endif 1727 1728 /* 1729 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 1730 * such cases we allow max_zone_pfn sorted in the descending order 1731 */ 1732 static bool arch_has_descending_max_zone_pfns(void) 1733 { 1734 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1735 } 1736 1737 /** 1738 * free_area_init - Initialise all pg_data_t and zone data 1739 * @max_zone_pfn: an array of max PFNs for each zone 1740 * 1741 * This will call free_area_init_node() for each active node in the system. 1742 * Using the page ranges provided by memblock_set_node(), the size of each 1743 * zone in each node and their holes is calculated. If the maximum PFN 1744 * between two adjacent zones match, it is assumed that the zone is empty. 1745 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 1746 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 1747 * starts where the previous one ended. For example, ZONE_DMA32 starts 1748 * at arch_max_dma_pfn. 1749 */ 1750 void __init free_area_init(unsigned long *max_zone_pfn) 1751 { 1752 unsigned long start_pfn, end_pfn; 1753 int i, nid, zone; 1754 bool descending; 1755 1756 /* Record where the zone boundaries are */ 1757 memset(arch_zone_lowest_possible_pfn, 0, 1758 sizeof(arch_zone_lowest_possible_pfn)); 1759 memset(arch_zone_highest_possible_pfn, 0, 1760 sizeof(arch_zone_highest_possible_pfn)); 1761 1762 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 1763 descending = arch_has_descending_max_zone_pfns(); 1764 1765 for (i = 0; i < MAX_NR_ZONES; i++) { 1766 if (descending) 1767 zone = MAX_NR_ZONES - i - 1; 1768 else 1769 zone = i; 1770 1771 if (zone == ZONE_MOVABLE) 1772 continue; 1773 1774 end_pfn = max(max_zone_pfn[zone], start_pfn); 1775 arch_zone_lowest_possible_pfn[zone] = start_pfn; 1776 arch_zone_highest_possible_pfn[zone] = end_pfn; 1777 1778 start_pfn = end_pfn; 1779 } 1780 1781 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 1782 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 1783 find_zone_movable_pfns_for_nodes(); 1784 1785 /* Print out the zone ranges */ 1786 pr_info("Zone ranges:\n"); 1787 for (i = 0; i < MAX_NR_ZONES; i++) { 1788 if (i == ZONE_MOVABLE) 1789 continue; 1790 pr_info(" %-8s ", zone_names[i]); 1791 if (arch_zone_lowest_possible_pfn[i] == 1792 arch_zone_highest_possible_pfn[i]) 1793 pr_cont("empty\n"); 1794 else 1795 pr_cont("[mem %#018Lx-%#018Lx]\n", 1796 (u64)arch_zone_lowest_possible_pfn[i] 1797 << PAGE_SHIFT, 1798 ((u64)arch_zone_highest_possible_pfn[i] 1799 << PAGE_SHIFT) - 1); 1800 } 1801 1802 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 1803 pr_info("Movable zone start for each node\n"); 1804 for (i = 0; i < MAX_NUMNODES; i++) { 1805 if (zone_movable_pfn[i]) 1806 pr_info(" Node %d: %#018Lx\n", i, 1807 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 1808 } 1809 1810 /* 1811 * Print out the early node map, and initialize the 1812 * subsection-map relative to active online memory ranges to 1813 * enable future "sub-section" extensions of the memory map. 1814 */ 1815 pr_info("Early memory node ranges\n"); 1816 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1817 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 1818 (u64)start_pfn << PAGE_SHIFT, 1819 ((u64)end_pfn << PAGE_SHIFT) - 1); 1820 subsection_map_init(start_pfn, end_pfn - start_pfn); 1821 } 1822 1823 /* Initialise every node */ 1824 mminit_verify_pageflags_layout(); 1825 setup_nr_node_ids(); 1826 set_pageblock_order(); 1827 1828 for_each_node(nid) { 1829 pg_data_t *pgdat; 1830 1831 if (!node_online(nid)) { 1832 /* Allocator not initialized yet */ 1833 pgdat = arch_alloc_nodedata(nid); 1834 if (!pgdat) 1835 panic("Cannot allocate %zuB for node %d.\n", 1836 sizeof(*pgdat), nid); 1837 arch_refresh_nodedata(nid, pgdat); 1838 } 1839 1840 pgdat = NODE_DATA(nid); 1841 free_area_init_node(nid); 1842 1843 /* 1844 * No sysfs hierarcy will be created via register_one_node() 1845 *for memory-less node because here it's not marked as N_MEMORY 1846 *and won't be set online later. The benefit is userspace 1847 *program won't be confused by sysfs files/directories of 1848 *memory-less node. The pgdat will get fully initialized by 1849 *hotadd_init_pgdat() when memory is hotplugged into this node. 1850 */ 1851 if (pgdat->node_present_pages) { 1852 node_set_state(nid, N_MEMORY); 1853 check_for_memory(pgdat); 1854 } 1855 } 1856 1857 calc_nr_kernel_pages(); 1858 memmap_init(); 1859 1860 /* disable hash distribution for systems with a single node */ 1861 fixup_hashdist(); 1862 } 1863 1864 /** 1865 * node_map_pfn_alignment - determine the maximum internode alignment 1866 * 1867 * This function should be called after node map is populated and sorted. 1868 * It calculates the maximum power of two alignment which can distinguish 1869 * all the nodes. 1870 * 1871 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 1872 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 1873 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 1874 * shifted, 1GiB is enough and this function will indicate so. 1875 * 1876 * This is used to test whether pfn -> nid mapping of the chosen memory 1877 * model has fine enough granularity to avoid incorrect mapping for the 1878 * populated node map. 1879 * 1880 * Return: the determined alignment in pfn's. 0 if there is no alignment 1881 * requirement (single node). 1882 */ 1883 unsigned long __init node_map_pfn_alignment(void) 1884 { 1885 unsigned long accl_mask = 0, last_end = 0; 1886 unsigned long start, end, mask; 1887 int last_nid = NUMA_NO_NODE; 1888 int i, nid; 1889 1890 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 1891 if (!start || last_nid < 0 || last_nid == nid) { 1892 last_nid = nid; 1893 last_end = end; 1894 continue; 1895 } 1896 1897 /* 1898 * Start with a mask granular enough to pin-point to the 1899 * start pfn and tick off bits one-by-one until it becomes 1900 * too coarse to separate the current node from the last. 1901 */ 1902 mask = ~((1 << __ffs(start)) - 1); 1903 while (mask && last_end <= (start & (mask << 1))) 1904 mask <<= 1; 1905 1906 /* accumulate all internode masks */ 1907 accl_mask |= mask; 1908 } 1909 1910 /* convert mask to number of pages */ 1911 return ~accl_mask + 1; 1912 } 1913 1914 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1915 static void __init deferred_free_range(unsigned long pfn, 1916 unsigned long nr_pages) 1917 { 1918 struct page *page; 1919 unsigned long i; 1920 1921 if (!nr_pages) 1922 return; 1923 1924 page = pfn_to_page(pfn); 1925 1926 /* Free a large naturally-aligned chunk if possible */ 1927 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { 1928 for (i = 0; i < nr_pages; i += pageblock_nr_pages) 1929 set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); 1930 __free_pages_core(page, MAX_PAGE_ORDER); 1931 return; 1932 } 1933 1934 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */ 1935 accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages)); 1936 1937 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1938 if (pageblock_aligned(pfn)) 1939 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1940 __free_pages_core(page, 0); 1941 } 1942 } 1943 1944 /* Completion tracking for deferred_init_memmap() threads */ 1945 static atomic_t pgdat_init_n_undone __initdata; 1946 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1947 1948 static inline void __init pgdat_init_report_one_done(void) 1949 { 1950 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1951 complete(&pgdat_init_all_done_comp); 1952 } 1953 1954 /* 1955 * Returns true if page needs to be initialized or freed to buddy allocator. 1956 * 1957 * We check if a current MAX_PAGE_ORDER block is valid by only checking the 1958 * validity of the head pfn. 1959 */ 1960 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1961 { 1962 if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) 1963 return false; 1964 return true; 1965 } 1966 1967 /* 1968 * Free pages to buddy allocator. Try to free aligned pages in 1969 * MAX_ORDER_NR_PAGES sizes. 1970 */ 1971 static void __init deferred_free_pages(unsigned long pfn, 1972 unsigned long end_pfn) 1973 { 1974 unsigned long nr_free = 0; 1975 1976 for (; pfn < end_pfn; pfn++) { 1977 if (!deferred_pfn_valid(pfn)) { 1978 deferred_free_range(pfn - nr_free, nr_free); 1979 nr_free = 0; 1980 } else if (IS_MAX_ORDER_ALIGNED(pfn)) { 1981 deferred_free_range(pfn - nr_free, nr_free); 1982 nr_free = 1; 1983 } else { 1984 nr_free++; 1985 } 1986 } 1987 /* Free the last block of pages to allocator */ 1988 deferred_free_range(pfn - nr_free, nr_free); 1989 } 1990 1991 /* 1992 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1993 * by performing it only once every MAX_ORDER_NR_PAGES. 1994 * Return number of pages initialized. 1995 */ 1996 static unsigned long __init deferred_init_pages(struct zone *zone, 1997 unsigned long pfn, 1998 unsigned long end_pfn) 1999 { 2000 int nid = zone_to_nid(zone); 2001 unsigned long nr_pages = 0; 2002 int zid = zone_idx(zone); 2003 struct page *page = NULL; 2004 2005 for (; pfn < end_pfn; pfn++) { 2006 if (!deferred_pfn_valid(pfn)) { 2007 page = NULL; 2008 continue; 2009 } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { 2010 page = pfn_to_page(pfn); 2011 } else { 2012 page++; 2013 } 2014 __init_single_page(page, pfn, zid, nid); 2015 nr_pages++; 2016 } 2017 return nr_pages; 2018 } 2019 2020 /* 2021 * This function is meant to pre-load the iterator for the zone init from 2022 * a given point. 2023 * Specifically it walks through the ranges starting with initial index 2024 * passed to it until we are caught up to the first_init_pfn value and 2025 * exits there. If we never encounter the value we return false indicating 2026 * there are no valid ranges left. 2027 */ 2028 static bool __init 2029 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 2030 unsigned long *spfn, unsigned long *epfn, 2031 unsigned long first_init_pfn) 2032 { 2033 u64 j = *i; 2034 2035 if (j == 0) 2036 __next_mem_pfn_range_in_zone(&j, zone, spfn, epfn); 2037 2038 /* 2039 * Start out by walking through the ranges in this zone that have 2040 * already been initialized. We don't need to do anything with them 2041 * so we just need to flush them out of the system. 2042 */ 2043 for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) { 2044 if (*epfn <= first_init_pfn) 2045 continue; 2046 if (*spfn < first_init_pfn) 2047 *spfn = first_init_pfn; 2048 *i = j; 2049 return true; 2050 } 2051 2052 return false; 2053 } 2054 2055 /* 2056 * Initialize and free pages. We do it in two loops: first we initialize 2057 * struct page, then free to buddy allocator, because while we are 2058 * freeing pages we can access pages that are ahead (computing buddy 2059 * page in __free_one_page()). 2060 * 2061 * In order to try and keep some memory in the cache we have the loop 2062 * broken along max page order boundaries. This way we will not cause 2063 * any issues with the buddy page computation. 2064 */ 2065 static unsigned long __init 2066 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 2067 unsigned long *end_pfn) 2068 { 2069 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 2070 unsigned long spfn = *start_pfn, epfn = *end_pfn; 2071 unsigned long nr_pages = 0; 2072 u64 j = *i; 2073 2074 /* First we loop through and initialize the page values */ 2075 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 2076 unsigned long t; 2077 2078 if (mo_pfn <= *start_pfn) 2079 break; 2080 2081 t = min(mo_pfn, *end_pfn); 2082 nr_pages += deferred_init_pages(zone, *start_pfn, t); 2083 2084 if (mo_pfn < *end_pfn) { 2085 *start_pfn = mo_pfn; 2086 break; 2087 } 2088 } 2089 2090 /* Reset values and now loop through freeing pages as needed */ 2091 swap(j, *i); 2092 2093 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 2094 unsigned long t; 2095 2096 if (mo_pfn <= spfn) 2097 break; 2098 2099 t = min(mo_pfn, epfn); 2100 deferred_free_pages(spfn, t); 2101 2102 if (mo_pfn <= epfn) 2103 break; 2104 } 2105 2106 return nr_pages; 2107 } 2108 2109 static void __init 2110 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2111 void *arg) 2112 { 2113 unsigned long spfn, epfn; 2114 struct zone *zone = arg; 2115 u64 i = 0; 2116 2117 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2118 2119 /* 2120 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that 2121 * we can avoid introducing any issues with the buddy allocator. 2122 */ 2123 while (spfn < end_pfn) { 2124 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2125 cond_resched(); 2126 } 2127 } 2128 2129 /* An arch may override for more concurrency. */ 2130 __weak int __init 2131 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2132 { 2133 return 1; 2134 } 2135 2136 /* Initialise remaining memory on a node */ 2137 static int __init deferred_init_memmap(void *data) 2138 { 2139 pg_data_t *pgdat = data; 2140 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2141 unsigned long spfn = 0, epfn = 0; 2142 unsigned long first_init_pfn, flags; 2143 unsigned long start = jiffies; 2144 struct zone *zone; 2145 int max_threads; 2146 u64 i = 0; 2147 2148 /* Bind memory initialisation thread to a local node if possible */ 2149 if (!cpumask_empty(cpumask)) 2150 set_cpus_allowed_ptr(current, cpumask); 2151 2152 pgdat_resize_lock(pgdat, &flags); 2153 first_init_pfn = pgdat->first_deferred_pfn; 2154 if (first_init_pfn == ULONG_MAX) { 2155 pgdat_resize_unlock(pgdat, &flags); 2156 pgdat_init_report_one_done(); 2157 return 0; 2158 } 2159 2160 /* Sanity check boundaries */ 2161 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2162 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2163 pgdat->first_deferred_pfn = ULONG_MAX; 2164 2165 /* 2166 * Once we unlock here, the zone cannot be grown anymore, thus if an 2167 * interrupt thread must allocate this early in boot, zone must be 2168 * pre-grown prior to start of deferred page initialization. 2169 */ 2170 pgdat_resize_unlock(pgdat, &flags); 2171 2172 /* Only the highest zone is deferred */ 2173 zone = pgdat->node_zones + pgdat->nr_zones - 1; 2174 2175 max_threads = deferred_page_init_max_threads(cpumask); 2176 2177 while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) { 2178 first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION); 2179 struct padata_mt_job job = { 2180 .thread_fn = deferred_init_memmap_chunk, 2181 .fn_arg = zone, 2182 .start = spfn, 2183 .size = first_init_pfn - spfn, 2184 .align = PAGES_PER_SECTION, 2185 .min_chunk = PAGES_PER_SECTION, 2186 .max_threads = max_threads, 2187 .numa_aware = false, 2188 }; 2189 2190 padata_do_multithreaded(&job); 2191 } 2192 2193 /* Sanity check that the next zone really is unpopulated */ 2194 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone)); 2195 2196 pr_info("node %d deferred pages initialised in %ums\n", 2197 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2198 2199 pgdat_init_report_one_done(); 2200 return 0; 2201 } 2202 2203 /* 2204 * If this zone has deferred pages, try to grow it by initializing enough 2205 * deferred pages to satisfy the allocation specified by order, rounded up to 2206 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2207 * of SECTION_SIZE bytes by initializing struct pages in increments of 2208 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2209 * 2210 * Return true when zone was grown, otherwise return false. We return true even 2211 * when we grow less than requested, to let the caller decide if there are 2212 * enough pages to satisfy the allocation. 2213 */ 2214 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) 2215 { 2216 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2217 pg_data_t *pgdat = zone->zone_pgdat; 2218 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2219 unsigned long spfn, epfn, flags; 2220 unsigned long nr_pages = 0; 2221 u64 i = 0; 2222 2223 /* Only the last zone may have deferred pages */ 2224 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2225 return false; 2226 2227 pgdat_resize_lock(pgdat, &flags); 2228 2229 /* 2230 * If someone grew this zone while we were waiting for spinlock, return 2231 * true, as there might be enough pages already. 2232 */ 2233 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2234 pgdat_resize_unlock(pgdat, &flags); 2235 return true; 2236 } 2237 2238 /* If the zone is empty somebody else may have cleared out the zone */ 2239 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2240 first_deferred_pfn)) { 2241 pgdat->first_deferred_pfn = ULONG_MAX; 2242 pgdat_resize_unlock(pgdat, &flags); 2243 /* Retry only once. */ 2244 return first_deferred_pfn != ULONG_MAX; 2245 } 2246 2247 /* 2248 * Initialize and free pages in MAX_PAGE_ORDER sized increments so 2249 * that we can avoid introducing any issues with the buddy 2250 * allocator. 2251 */ 2252 while (spfn < epfn) { 2253 /* update our first deferred PFN for this section */ 2254 first_deferred_pfn = spfn; 2255 2256 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2257 touch_nmi_watchdog(); 2258 2259 /* We should only stop along section boundaries */ 2260 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2261 continue; 2262 2263 /* If our quota has been met we can stop here */ 2264 if (nr_pages >= nr_pages_needed) 2265 break; 2266 } 2267 2268 pgdat->first_deferred_pfn = spfn; 2269 pgdat_resize_unlock(pgdat, &flags); 2270 2271 return nr_pages > 0; 2272 } 2273 2274 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2275 2276 #ifdef CONFIG_CMA 2277 void __init init_cma_reserved_pageblock(struct page *page) 2278 { 2279 unsigned i = pageblock_nr_pages; 2280 struct page *p = page; 2281 2282 do { 2283 __ClearPageReserved(p); 2284 set_page_count(p, 0); 2285 } while (++p, --i); 2286 2287 set_pageblock_migratetype(page, MIGRATE_CMA); 2288 set_page_refcounted(page); 2289 __free_pages(page, pageblock_order); 2290 2291 adjust_managed_page_count(page, pageblock_nr_pages); 2292 page_zone(page)->cma_pages += pageblock_nr_pages; 2293 } 2294 #endif 2295 2296 void set_zone_contiguous(struct zone *zone) 2297 { 2298 unsigned long block_start_pfn = zone->zone_start_pfn; 2299 unsigned long block_end_pfn; 2300 2301 block_end_pfn = pageblock_end_pfn(block_start_pfn); 2302 for (; block_start_pfn < zone_end_pfn(zone); 2303 block_start_pfn = block_end_pfn, 2304 block_end_pfn += pageblock_nr_pages) { 2305 2306 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 2307 2308 if (!__pageblock_pfn_to_page(block_start_pfn, 2309 block_end_pfn, zone)) 2310 return; 2311 cond_resched(); 2312 } 2313 2314 /* We confirm that there is no hole */ 2315 zone->contiguous = true; 2316 } 2317 2318 void __init page_alloc_init_late(void) 2319 { 2320 struct zone *zone; 2321 int nid; 2322 2323 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2324 2325 /* There will be num_node_state(N_MEMORY) threads */ 2326 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2327 for_each_node_state(nid, N_MEMORY) { 2328 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2329 } 2330 2331 /* Block until all are initialised */ 2332 wait_for_completion(&pgdat_init_all_done_comp); 2333 2334 /* 2335 * We initialized the rest of the deferred pages. Permanently disable 2336 * on-demand struct page initialization. 2337 */ 2338 static_branch_disable(&deferred_pages); 2339 2340 /* Reinit limits that are based on free pages after the kernel is up */ 2341 files_maxfiles_init(); 2342 #endif 2343 2344 buffer_init(); 2345 2346 /* Discard memblock private memory */ 2347 memblock_discard(); 2348 2349 for_each_node_state(nid, N_MEMORY) 2350 shuffle_free_memory(NODE_DATA(nid)); 2351 2352 for_each_populated_zone(zone) 2353 set_zone_contiguous(zone); 2354 2355 /* Initialize page ext after all struct pages are initialized. */ 2356 if (deferred_struct_pages) 2357 page_ext_init(); 2358 2359 page_alloc_sysctl_init(); 2360 } 2361 2362 /* 2363 * Adaptive scale is meant to reduce sizes of hash tables on large memory 2364 * machines. As memory size is increased the scale is also increased but at 2365 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 2366 * quadruples the scale is increased by one, which means the size of hash table 2367 * only doubles, instead of quadrupling as well. 2368 * Because 32-bit systems cannot have large physical memory, where this scaling 2369 * makes sense, it is disabled on such platforms. 2370 */ 2371 #if __BITS_PER_LONG > 32 2372 #define ADAPT_SCALE_BASE (64ul << 30) 2373 #define ADAPT_SCALE_SHIFT 2 2374 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 2375 #endif 2376 2377 /* 2378 * allocate a large system hash table from bootmem 2379 * - it is assumed that the hash table must contain an exact power-of-2 2380 * quantity of entries 2381 * - limit is the number of hash buckets, not the total allocation size 2382 */ 2383 void *__init alloc_large_system_hash(const char *tablename, 2384 unsigned long bucketsize, 2385 unsigned long numentries, 2386 int scale, 2387 int flags, 2388 unsigned int *_hash_shift, 2389 unsigned int *_hash_mask, 2390 unsigned long low_limit, 2391 unsigned long high_limit) 2392 { 2393 unsigned long long max = high_limit; 2394 unsigned long log2qty, size; 2395 void *table; 2396 gfp_t gfp_flags; 2397 bool virt; 2398 bool huge; 2399 2400 /* allow the kernel cmdline to have a say */ 2401 if (!numentries) { 2402 /* round applicable memory size up to nearest megabyte */ 2403 numentries = nr_kernel_pages; 2404 2405 /* It isn't necessary when PAGE_SIZE >= 1MB */ 2406 if (PAGE_SIZE < SZ_1M) 2407 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 2408 2409 #if __BITS_PER_LONG > 32 2410 if (!high_limit) { 2411 unsigned long adapt; 2412 2413 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 2414 adapt <<= ADAPT_SCALE_SHIFT) 2415 scale++; 2416 } 2417 #endif 2418 2419 /* limit to 1 bucket per 2^scale bytes of low memory */ 2420 if (scale > PAGE_SHIFT) 2421 numentries >>= (scale - PAGE_SHIFT); 2422 else 2423 numentries <<= (PAGE_SHIFT - scale); 2424 2425 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 2426 numentries = PAGE_SIZE / bucketsize; 2427 } 2428 numentries = roundup_pow_of_two(numentries); 2429 2430 /* limit allocation size to 1/16 total memory by default */ 2431 if (max == 0) { 2432 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2433 do_div(max, bucketsize); 2434 } 2435 max = min(max, 0x80000000ULL); 2436 2437 if (numentries < low_limit) 2438 numentries = low_limit; 2439 if (numentries > max) 2440 numentries = max; 2441 2442 log2qty = ilog2(numentries); 2443 2444 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 2445 do { 2446 virt = false; 2447 size = bucketsize << log2qty; 2448 if (flags & HASH_EARLY) { 2449 if (flags & HASH_ZERO) 2450 table = memblock_alloc(size, SMP_CACHE_BYTES); 2451 else 2452 table = memblock_alloc_raw(size, 2453 SMP_CACHE_BYTES); 2454 } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) { 2455 table = vmalloc_huge(size, gfp_flags); 2456 virt = true; 2457 if (table) 2458 huge = is_vm_area_hugepages(table); 2459 } else { 2460 /* 2461 * If bucketsize is not a power-of-two, we may free 2462 * some pages at the end of hash table which 2463 * alloc_pages_exact() automatically does 2464 */ 2465 table = alloc_pages_exact(size, gfp_flags); 2466 kmemleak_alloc(table, size, 1, gfp_flags); 2467 } 2468 } while (!table && size > PAGE_SIZE && --log2qty); 2469 2470 if (!table) 2471 panic("Failed to allocate %s hash table\n", tablename); 2472 2473 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 2474 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 2475 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 2476 2477 if (_hash_shift) 2478 *_hash_shift = log2qty; 2479 if (_hash_mask) 2480 *_hash_mask = (1 << log2qty) - 1; 2481 2482 return table; 2483 } 2484 2485 void __init memblock_free_pages(struct page *page, unsigned long pfn, 2486 unsigned int order) 2487 { 2488 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { 2489 int nid = early_pfn_to_nid(pfn); 2490 2491 if (!early_page_initialised(pfn, nid)) 2492 return; 2493 } 2494 2495 if (!kmsan_memblock_free_pages(page, order)) { 2496 /* KMSAN will take care of these pages. */ 2497 return; 2498 } 2499 2500 /* pages were reserved and not allocated */ 2501 if (mem_alloc_profiling_enabled()) { 2502 union codetag_ref *ref = get_page_tag_ref(page); 2503 2504 if (ref) { 2505 set_codetag_empty(ref); 2506 put_page_tag_ref(ref); 2507 } 2508 } 2509 2510 __free_pages_core(page, order); 2511 } 2512 2513 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2514 EXPORT_SYMBOL(init_on_alloc); 2515 2516 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2517 EXPORT_SYMBOL(init_on_free); 2518 2519 static bool _init_on_alloc_enabled_early __read_mostly 2520 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2521 static int __init early_init_on_alloc(char *buf) 2522 { 2523 2524 return kstrtobool(buf, &_init_on_alloc_enabled_early); 2525 } 2526 early_param("init_on_alloc", early_init_on_alloc); 2527 2528 static bool _init_on_free_enabled_early __read_mostly 2529 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 2530 static int __init early_init_on_free(char *buf) 2531 { 2532 return kstrtobool(buf, &_init_on_free_enabled_early); 2533 } 2534 early_param("init_on_free", early_init_on_free); 2535 2536 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2537 2538 /* 2539 * Enable static keys related to various memory debugging and hardening options. 2540 * Some override others, and depend on early params that are evaluated in the 2541 * order of appearance. So we need to first gather the full picture of what was 2542 * enabled, and then make decisions. 2543 */ 2544 static void __init mem_debugging_and_hardening_init(void) 2545 { 2546 bool page_poisoning_requested = false; 2547 bool want_check_pages = false; 2548 2549 #ifdef CONFIG_PAGE_POISONING 2550 /* 2551 * Page poisoning is debug page alloc for some arches. If 2552 * either of those options are enabled, enable poisoning. 2553 */ 2554 if (page_poisoning_enabled() || 2555 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 2556 debug_pagealloc_enabled())) { 2557 static_branch_enable(&_page_poisoning_enabled); 2558 page_poisoning_requested = true; 2559 want_check_pages = true; 2560 } 2561 #endif 2562 2563 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2564 page_poisoning_requested) { 2565 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2566 "will take precedence over init_on_alloc and init_on_free\n"); 2567 _init_on_alloc_enabled_early = false; 2568 _init_on_free_enabled_early = false; 2569 } 2570 2571 if (_init_on_alloc_enabled_early) { 2572 want_check_pages = true; 2573 static_branch_enable(&init_on_alloc); 2574 } else { 2575 static_branch_disable(&init_on_alloc); 2576 } 2577 2578 if (_init_on_free_enabled_early) { 2579 want_check_pages = true; 2580 static_branch_enable(&init_on_free); 2581 } else { 2582 static_branch_disable(&init_on_free); 2583 } 2584 2585 if (IS_ENABLED(CONFIG_KMSAN) && 2586 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2587 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2588 2589 #ifdef CONFIG_DEBUG_PAGEALLOC 2590 if (debug_pagealloc_enabled()) { 2591 want_check_pages = true; 2592 static_branch_enable(&_debug_pagealloc_enabled); 2593 2594 if (debug_guardpage_minorder()) 2595 static_branch_enable(&_debug_guardpage_enabled); 2596 } 2597 #endif 2598 2599 /* 2600 * Any page debugging or hardening option also enables sanity checking 2601 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's 2602 * enabled already. 2603 */ 2604 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) 2605 static_branch_enable(&check_pages_enabled); 2606 } 2607 2608 /* Report memory auto-initialization states for this boot. */ 2609 static void __init report_meminit(void) 2610 { 2611 const char *stack; 2612 2613 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) 2614 stack = "all(pattern)"; 2615 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) 2616 stack = "all(zero)"; 2617 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) 2618 stack = "byref_all(zero)"; 2619 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) 2620 stack = "byref(zero)"; 2621 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) 2622 stack = "__user(zero)"; 2623 else 2624 stack = "off"; 2625 2626 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2627 stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", 2628 want_init_on_free() ? "on" : "off"); 2629 if (want_init_on_free()) 2630 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2631 } 2632 2633 static void __init mem_init_print_info(void) 2634 { 2635 unsigned long physpages, codesize, datasize, rosize, bss_size; 2636 unsigned long init_code_size, init_data_size; 2637 2638 physpages = get_num_physpages(); 2639 codesize = _etext - _stext; 2640 datasize = _edata - _sdata; 2641 rosize = __end_rodata - __start_rodata; 2642 bss_size = __bss_stop - __bss_start; 2643 init_data_size = __init_end - __init_begin; 2644 init_code_size = _einittext - _sinittext; 2645 2646 /* 2647 * Detect special cases and adjust section sizes accordingly: 2648 * 1) .init.* may be embedded into .data sections 2649 * 2) .init.text.* may be out of [__init_begin, __init_end], 2650 * please refer to arch/tile/kernel/vmlinux.lds.S. 2651 * 3) .rodata.* may be embedded into .text or .data sections. 2652 */ 2653 #define adj_init_size(start, end, size, pos, adj) \ 2654 do { \ 2655 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 2656 size -= adj; \ 2657 } while (0) 2658 2659 adj_init_size(__init_begin, __init_end, init_data_size, 2660 _sinittext, init_code_size); 2661 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 2662 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 2663 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 2664 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 2665 2666 #undef adj_init_size 2667 2668 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 2669 #ifdef CONFIG_HIGHMEM 2670 ", %luK highmem" 2671 #endif 2672 ")\n", 2673 K(nr_free_pages()), K(physpages), 2674 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 2675 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 2676 K(physpages - totalram_pages() - totalcma_pages), 2677 K(totalcma_pages) 2678 #ifdef CONFIG_HIGHMEM 2679 , K(totalhigh_pages()) 2680 #endif 2681 ); 2682 } 2683 2684 /* 2685 * Set up kernel memory allocators 2686 */ 2687 void __init mm_core_init(void) 2688 { 2689 /* Initializations relying on SMP setup */ 2690 build_all_zonelists(NULL); 2691 page_alloc_init_cpuhp(); 2692 2693 /* 2694 * page_ext requires contiguous pages, 2695 * bigger than MAX_PAGE_ORDER unless SPARSEMEM. 2696 */ 2697 page_ext_init_flatmem(); 2698 mem_debugging_and_hardening_init(); 2699 kfence_alloc_pool_and_metadata(); 2700 report_meminit(); 2701 kmsan_init_shadow(); 2702 stack_depot_early_init(); 2703 mem_init(); 2704 mem_init_print_info(); 2705 kmem_cache_init(); 2706 /* 2707 * page_owner must be initialized after buddy is ready, and also after 2708 * slab is ready so that stack_depot_init() works properly 2709 */ 2710 page_ext_init_flatmem_late(); 2711 kmemleak_init(); 2712 ptlock_cache_init(); 2713 pgtable_cache_init(); 2714 debug_objects_mem_init(); 2715 vmalloc_init(); 2716 /* If no deferred init page_ext now, as vmap is fully initialized */ 2717 if (!deferred_struct_pages) 2718 page_ext_init(); 2719 /* Should be run before the first non-init thread is created */ 2720 init_espfix_bsp(); 2721 /* Should be run after espfix64 is set up. */ 2722 pti_init(); 2723 kmsan_init_runtime(); 2724 mm_cache_init(); 2725 execmem_init(); 2726 } 2727