1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm_init.c - Memory initialisation verification and debugging 4 * 5 * Copyright 2008 IBM Corporation, 2008 6 * Author Mel Gorman <mel@csn.ul.ie> 7 * 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/kobject.h> 12 #include <linux/export.h> 13 #include <linux/memory.h> 14 #include <linux/notifier.h> 15 #include <linux/sched.h> 16 #include <linux/mman.h> 17 #include <linux/memblock.h> 18 #include <linux/page-isolation.h> 19 #include <linux/padata.h> 20 #include <linux/nmi.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kmemleak.h> 23 #include <linux/kfence.h> 24 #include <linux/page_ext.h> 25 #include <linux/pti.h> 26 #include <linux/pgtable.h> 27 #include <linux/swap.h> 28 #include <linux/cma.h> 29 #include "internal.h" 30 #include "slab.h" 31 #include "shuffle.h" 32 33 #include <asm/setup.h> 34 35 #ifdef CONFIG_DEBUG_MEMORY_INIT 36 int __meminitdata mminit_loglevel; 37 38 /* The zonelists are simply reported, validation is manual. */ 39 void __init mminit_verify_zonelist(void) 40 { 41 int nid; 42 43 if (mminit_loglevel < MMINIT_VERIFY) 44 return; 45 46 for_each_online_node(nid) { 47 pg_data_t *pgdat = NODE_DATA(nid); 48 struct zone *zone; 49 struct zoneref *z; 50 struct zonelist *zonelist; 51 int i, listid, zoneid; 52 53 BUILD_BUG_ON(MAX_ZONELISTS > 2); 54 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 55 56 /* Identify the zone and nodelist */ 57 zoneid = i % MAX_NR_ZONES; 58 listid = i / MAX_NR_ZONES; 59 zonelist = &pgdat->node_zonelists[listid]; 60 zone = &pgdat->node_zones[zoneid]; 61 if (!populated_zone(zone)) 62 continue; 63 64 /* Print information about the zonelist */ 65 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 66 listid > 0 ? "thisnode" : "general", nid, 67 zone->name); 68 69 /* Iterate the zonelist */ 70 for_each_zone_zonelist(zone, z, zonelist, zoneid) 71 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); 72 pr_cont("\n"); 73 } 74 } 75 } 76 77 void __init mminit_verify_pageflags_layout(void) 78 { 79 int shift, width; 80 unsigned long or_mask, add_mask; 81 82 shift = 8 * sizeof(unsigned long); 83 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH 84 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; 85 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 86 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 87 SECTIONS_WIDTH, 88 NODES_WIDTH, 89 ZONES_WIDTH, 90 LAST_CPUPID_WIDTH, 91 KASAN_TAG_WIDTH, 92 LRU_GEN_WIDTH, 93 LRU_REFS_WIDTH, 94 NR_PAGEFLAGS); 95 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 96 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", 97 SECTIONS_SHIFT, 98 NODES_SHIFT, 99 ZONES_SHIFT, 100 LAST_CPUPID_SHIFT, 101 KASAN_TAG_WIDTH); 102 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 103 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", 104 (unsigned long)SECTIONS_PGSHIFT, 105 (unsigned long)NODES_PGSHIFT, 106 (unsigned long)ZONES_PGSHIFT, 107 (unsigned long)LAST_CPUPID_PGSHIFT, 108 (unsigned long)KASAN_TAG_PGSHIFT); 109 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 110 "Node/Zone ID: %lu -> %lu\n", 111 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 112 (unsigned long)ZONEID_PGOFF); 113 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 114 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 115 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 116 #ifdef NODE_NOT_IN_PAGE_FLAGS 117 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 118 "Node not in page flags"); 119 #endif 120 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 121 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 122 "Last cpupid not in page flags"); 123 #endif 124 125 if (SECTIONS_WIDTH) { 126 shift -= SECTIONS_WIDTH; 127 BUG_ON(shift != SECTIONS_PGSHIFT); 128 } 129 if (NODES_WIDTH) { 130 shift -= NODES_WIDTH; 131 BUG_ON(shift != NODES_PGSHIFT); 132 } 133 if (ZONES_WIDTH) { 134 shift -= ZONES_WIDTH; 135 BUG_ON(shift != ZONES_PGSHIFT); 136 } 137 138 /* Check for bitmask overlaps */ 139 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 140 (NODES_MASK << NODES_PGSHIFT) | 141 (SECTIONS_MASK << SECTIONS_PGSHIFT); 142 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 143 (NODES_MASK << NODES_PGSHIFT) + 144 (SECTIONS_MASK << SECTIONS_PGSHIFT); 145 BUG_ON(or_mask != add_mask); 146 } 147 148 static __init int set_mminit_loglevel(char *str) 149 { 150 get_option(&str, &mminit_loglevel); 151 return 0; 152 } 153 early_param("mminit_loglevel", set_mminit_loglevel); 154 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 155 156 struct kobject *mm_kobj; 157 EXPORT_SYMBOL_GPL(mm_kobj); 158 159 #ifdef CONFIG_SMP 160 s32 vm_committed_as_batch = 32; 161 162 void mm_compute_batch(int overcommit_policy) 163 { 164 u64 memsized_batch; 165 s32 nr = num_present_cpus(); 166 s32 batch = max_t(s32, nr*2, 32); 167 unsigned long ram_pages = totalram_pages(); 168 169 /* 170 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of 171 * (total memory/#cpus), and lift it to 25% for other policies 172 * to easy the possible lock contention for percpu_counter 173 * vm_committed_as, while the max limit is INT_MAX 174 */ 175 if (overcommit_policy == OVERCOMMIT_NEVER) 176 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); 177 else 178 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); 179 180 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 181 } 182 183 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 184 unsigned long action, void *arg) 185 { 186 switch (action) { 187 case MEM_ONLINE: 188 case MEM_OFFLINE: 189 mm_compute_batch(sysctl_overcommit_memory); 190 break; 191 default: 192 break; 193 } 194 return NOTIFY_OK; 195 } 196 197 static int __init mm_compute_batch_init(void) 198 { 199 mm_compute_batch(sysctl_overcommit_memory); 200 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); 201 return 0; 202 } 203 204 __initcall(mm_compute_batch_init); 205 206 #endif 207 208 static int __init mm_sysfs_init(void) 209 { 210 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 211 if (!mm_kobj) 212 return -ENOMEM; 213 214 return 0; 215 } 216 postcore_initcall(mm_sysfs_init); 217 218 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 219 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 220 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 221 222 static unsigned long required_kernelcore __initdata; 223 static unsigned long required_kernelcore_percent __initdata; 224 static unsigned long required_movablecore __initdata; 225 static unsigned long required_movablecore_percent __initdata; 226 227 static unsigned long nr_kernel_pages __initdata; 228 static unsigned long nr_all_pages __initdata; 229 static unsigned long dma_reserve __initdata; 230 231 static bool deferred_struct_pages __meminitdata; 232 233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 234 235 static int __init cmdline_parse_core(char *p, unsigned long *core, 236 unsigned long *percent) 237 { 238 unsigned long long coremem; 239 char *endptr; 240 241 if (!p) 242 return -EINVAL; 243 244 /* Value may be a percentage of total memory, otherwise bytes */ 245 coremem = simple_strtoull(p, &endptr, 0); 246 if (*endptr == '%') { 247 /* Paranoid check for percent values greater than 100 */ 248 WARN_ON(coremem > 100); 249 250 *percent = coremem; 251 } else { 252 coremem = memparse(p, &p); 253 /* Paranoid check that UL is enough for the coremem value */ 254 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 255 256 *core = coremem >> PAGE_SHIFT; 257 *percent = 0UL; 258 } 259 return 0; 260 } 261 262 bool mirrored_kernelcore __initdata_memblock; 263 264 /* 265 * kernelcore=size sets the amount of memory for use for allocations that 266 * cannot be reclaimed or migrated. 267 */ 268 static int __init cmdline_parse_kernelcore(char *p) 269 { 270 /* parse kernelcore=mirror */ 271 if (parse_option_str(p, "mirror")) { 272 mirrored_kernelcore = true; 273 return 0; 274 } 275 276 return cmdline_parse_core(p, &required_kernelcore, 277 &required_kernelcore_percent); 278 } 279 early_param("kernelcore", cmdline_parse_kernelcore); 280 281 /* 282 * movablecore=size sets the amount of memory for use for allocations that 283 * can be reclaimed or migrated. 284 */ 285 static int __init cmdline_parse_movablecore(char *p) 286 { 287 return cmdline_parse_core(p, &required_movablecore, 288 &required_movablecore_percent); 289 } 290 early_param("movablecore", cmdline_parse_movablecore); 291 292 /* 293 * early_calculate_totalpages() 294 * Sum pages in active regions for movable zone. 295 * Populate N_MEMORY for calculating usable_nodes. 296 */ 297 static unsigned long __init early_calculate_totalpages(void) 298 { 299 unsigned long totalpages = 0; 300 unsigned long start_pfn, end_pfn; 301 int i, nid; 302 303 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 304 unsigned long pages = end_pfn - start_pfn; 305 306 totalpages += pages; 307 if (pages) 308 node_set_state(nid, N_MEMORY); 309 } 310 return totalpages; 311 } 312 313 /* 314 * This finds a zone that can be used for ZONE_MOVABLE pages. The 315 * assumption is made that zones within a node are ordered in monotonic 316 * increasing memory addresses so that the "highest" populated zone is used 317 */ 318 static void __init find_usable_zone_for_movable(void) 319 { 320 int zone_index; 321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 322 if (zone_index == ZONE_MOVABLE) 323 continue; 324 325 if (arch_zone_highest_possible_pfn[zone_index] > 326 arch_zone_lowest_possible_pfn[zone_index]) 327 break; 328 } 329 330 VM_BUG_ON(zone_index == -1); 331 movable_zone = zone_index; 332 } 333 334 /* 335 * Find the PFN the Movable zone begins in each node. Kernel memory 336 * is spread evenly between nodes as long as the nodes have enough 337 * memory. When they don't, some nodes will have more kernelcore than 338 * others 339 */ 340 static void __init find_zone_movable_pfns_for_nodes(void) 341 { 342 int i, nid; 343 unsigned long usable_startpfn; 344 unsigned long kernelcore_node, kernelcore_remaining; 345 /* save the state before borrow the nodemask */ 346 nodemask_t saved_node_state = node_states[N_MEMORY]; 347 unsigned long totalpages = early_calculate_totalpages(); 348 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 349 struct memblock_region *r; 350 351 /* Need to find movable_zone earlier when movable_node is specified. */ 352 find_usable_zone_for_movable(); 353 354 /* 355 * If movable_node is specified, ignore kernelcore and movablecore 356 * options. 357 */ 358 if (movable_node_is_enabled()) { 359 for_each_mem_region(r) { 360 if (!memblock_is_hotpluggable(r)) 361 continue; 362 363 nid = memblock_get_region_node(r); 364 365 usable_startpfn = PFN_DOWN(r->base); 366 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 367 min(usable_startpfn, zone_movable_pfn[nid]) : 368 usable_startpfn; 369 } 370 371 goto out2; 372 } 373 374 /* 375 * If kernelcore=mirror is specified, ignore movablecore option 376 */ 377 if (mirrored_kernelcore) { 378 bool mem_below_4gb_not_mirrored = false; 379 380 for_each_mem_region(r) { 381 if (memblock_is_mirror(r)) 382 continue; 383 384 nid = memblock_get_region_node(r); 385 386 usable_startpfn = memblock_region_memory_base_pfn(r); 387 388 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 389 mem_below_4gb_not_mirrored = true; 390 continue; 391 } 392 393 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 394 min(usable_startpfn, zone_movable_pfn[nid]) : 395 usable_startpfn; 396 } 397 398 if (mem_below_4gb_not_mirrored) 399 pr_warn("This configuration results in unmirrored kernel memory.\n"); 400 401 goto out2; 402 } 403 404 /* 405 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 406 * amount of necessary memory. 407 */ 408 if (required_kernelcore_percent) 409 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 410 10000UL; 411 if (required_movablecore_percent) 412 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 413 10000UL; 414 415 /* 416 * If movablecore= was specified, calculate what size of 417 * kernelcore that corresponds so that memory usable for 418 * any allocation type is evenly spread. If both kernelcore 419 * and movablecore are specified, then the value of kernelcore 420 * will be used for required_kernelcore if it's greater than 421 * what movablecore would have allowed. 422 */ 423 if (required_movablecore) { 424 unsigned long corepages; 425 426 /* 427 * Round-up so that ZONE_MOVABLE is at least as large as what 428 * was requested by the user 429 */ 430 required_movablecore = 431 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 432 required_movablecore = min(totalpages, required_movablecore); 433 corepages = totalpages - required_movablecore; 434 435 required_kernelcore = max(required_kernelcore, corepages); 436 } 437 438 /* 439 * If kernelcore was not specified or kernelcore size is larger 440 * than totalpages, there is no ZONE_MOVABLE. 441 */ 442 if (!required_kernelcore || required_kernelcore >= totalpages) 443 goto out; 444 445 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 446 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 447 448 restart: 449 /* Spread kernelcore memory as evenly as possible throughout nodes */ 450 kernelcore_node = required_kernelcore / usable_nodes; 451 for_each_node_state(nid, N_MEMORY) { 452 unsigned long start_pfn, end_pfn; 453 454 /* 455 * Recalculate kernelcore_node if the division per node 456 * now exceeds what is necessary to satisfy the requested 457 * amount of memory for the kernel 458 */ 459 if (required_kernelcore < kernelcore_node) 460 kernelcore_node = required_kernelcore / usable_nodes; 461 462 /* 463 * As the map is walked, we track how much memory is usable 464 * by the kernel using kernelcore_remaining. When it is 465 * 0, the rest of the node is usable by ZONE_MOVABLE 466 */ 467 kernelcore_remaining = kernelcore_node; 468 469 /* Go through each range of PFNs within this node */ 470 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 471 unsigned long size_pages; 472 473 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 474 if (start_pfn >= end_pfn) 475 continue; 476 477 /* Account for what is only usable for kernelcore */ 478 if (start_pfn < usable_startpfn) { 479 unsigned long kernel_pages; 480 kernel_pages = min(end_pfn, usable_startpfn) 481 - start_pfn; 482 483 kernelcore_remaining -= min(kernel_pages, 484 kernelcore_remaining); 485 required_kernelcore -= min(kernel_pages, 486 required_kernelcore); 487 488 /* Continue if range is now fully accounted */ 489 if (end_pfn <= usable_startpfn) { 490 491 /* 492 * Push zone_movable_pfn to the end so 493 * that if we have to rebalance 494 * kernelcore across nodes, we will 495 * not double account here 496 */ 497 zone_movable_pfn[nid] = end_pfn; 498 continue; 499 } 500 start_pfn = usable_startpfn; 501 } 502 503 /* 504 * The usable PFN range for ZONE_MOVABLE is from 505 * start_pfn->end_pfn. Calculate size_pages as the 506 * number of pages used as kernelcore 507 */ 508 size_pages = end_pfn - start_pfn; 509 if (size_pages > kernelcore_remaining) 510 size_pages = kernelcore_remaining; 511 zone_movable_pfn[nid] = start_pfn + size_pages; 512 513 /* 514 * Some kernelcore has been met, update counts and 515 * break if the kernelcore for this node has been 516 * satisfied 517 */ 518 required_kernelcore -= min(required_kernelcore, 519 size_pages); 520 kernelcore_remaining -= size_pages; 521 if (!kernelcore_remaining) 522 break; 523 } 524 } 525 526 /* 527 * If there is still required_kernelcore, we do another pass with one 528 * less node in the count. This will push zone_movable_pfn[nid] further 529 * along on the nodes that still have memory until kernelcore is 530 * satisfied 531 */ 532 usable_nodes--; 533 if (usable_nodes && required_kernelcore > usable_nodes) 534 goto restart; 535 536 out2: 537 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 538 for (nid = 0; nid < MAX_NUMNODES; nid++) { 539 unsigned long start_pfn, end_pfn; 540 541 zone_movable_pfn[nid] = 542 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 543 544 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 545 if (zone_movable_pfn[nid] >= end_pfn) 546 zone_movable_pfn[nid] = 0; 547 } 548 549 out: 550 /* restore the node_state */ 551 node_states[N_MEMORY] = saved_node_state; 552 } 553 554 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 555 unsigned long zone, int nid) 556 { 557 mm_zero_struct_page(page); 558 set_page_links(page, zone, nid, pfn); 559 init_page_count(page); 560 page_mapcount_reset(page); 561 page_cpupid_reset_last(page); 562 page_kasan_tag_reset(page); 563 564 INIT_LIST_HEAD(&page->lru); 565 #ifdef WANT_PAGE_VIRTUAL 566 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 567 if (!is_highmem_idx(zone)) 568 set_page_address(page, __va(pfn << PAGE_SHIFT)); 569 #endif 570 } 571 572 #ifdef CONFIG_NUMA 573 /* 574 * During memory init memblocks map pfns to nids. The search is expensive and 575 * this caches recent lookups. The implementation of __early_pfn_to_nid 576 * treats start/end as pfns. 577 */ 578 struct mminit_pfnnid_cache { 579 unsigned long last_start; 580 unsigned long last_end; 581 int last_nid; 582 }; 583 584 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 585 586 /* 587 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 588 */ 589 static int __meminit __early_pfn_to_nid(unsigned long pfn, 590 struct mminit_pfnnid_cache *state) 591 { 592 unsigned long start_pfn, end_pfn; 593 int nid; 594 595 if (state->last_start <= pfn && pfn < state->last_end) 596 return state->last_nid; 597 598 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 599 if (nid != NUMA_NO_NODE) { 600 state->last_start = start_pfn; 601 state->last_end = end_pfn; 602 state->last_nid = nid; 603 } 604 605 return nid; 606 } 607 608 int __meminit early_pfn_to_nid(unsigned long pfn) 609 { 610 static DEFINE_SPINLOCK(early_pfn_lock); 611 int nid; 612 613 spin_lock(&early_pfn_lock); 614 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 615 if (nid < 0) 616 nid = first_online_node; 617 spin_unlock(&early_pfn_lock); 618 619 return nid; 620 } 621 622 int hashdist = HASHDIST_DEFAULT; 623 624 static int __init set_hashdist(char *str) 625 { 626 if (!str) 627 return 0; 628 hashdist = simple_strtoul(str, &str, 0); 629 return 1; 630 } 631 __setup("hashdist=", set_hashdist); 632 633 static inline void fixup_hashdist(void) 634 { 635 if (num_node_state(N_MEMORY) == 1) 636 hashdist = 0; 637 } 638 #else 639 static inline void fixup_hashdist(void) {} 640 #endif /* CONFIG_NUMA */ 641 642 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 643 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 644 { 645 pgdat->first_deferred_pfn = ULONG_MAX; 646 } 647 648 /* Returns true if the struct page for the pfn is initialised */ 649 static inline bool __meminit early_page_initialised(unsigned long pfn) 650 { 651 int nid = early_pfn_to_nid(pfn); 652 653 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 654 return false; 655 656 return true; 657 } 658 659 /* 660 * Returns true when the remaining initialisation should be deferred until 661 * later in the boot cycle when it can be parallelised. 662 */ 663 static bool __meminit 664 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 665 { 666 static unsigned long prev_end_pfn, nr_initialised; 667 668 if (early_page_ext_enabled()) 669 return false; 670 /* 671 * prev_end_pfn static that contains the end of previous zone 672 * No need to protect because called very early in boot before smp_init. 673 */ 674 if (prev_end_pfn != end_pfn) { 675 prev_end_pfn = end_pfn; 676 nr_initialised = 0; 677 } 678 679 /* Always populate low zones for address-constrained allocations */ 680 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 681 return false; 682 683 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 684 return true; 685 /* 686 * We start only with one section of pages, more pages are added as 687 * needed until the rest of deferred pages are initialized. 688 */ 689 nr_initialised++; 690 if ((nr_initialised > PAGES_PER_SECTION) && 691 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 692 NODE_DATA(nid)->first_deferred_pfn = pfn; 693 return true; 694 } 695 return false; 696 } 697 698 static void __meminit init_reserved_page(unsigned long pfn) 699 { 700 pg_data_t *pgdat; 701 int nid, zid; 702 703 if (early_page_initialised(pfn)) 704 return; 705 706 nid = early_pfn_to_nid(pfn); 707 pgdat = NODE_DATA(nid); 708 709 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 710 struct zone *zone = &pgdat->node_zones[zid]; 711 712 if (zone_spans_pfn(zone, pfn)) 713 break; 714 } 715 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 716 } 717 #else 718 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 719 720 static inline bool early_page_initialised(unsigned long pfn) 721 { 722 return true; 723 } 724 725 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 726 { 727 return false; 728 } 729 730 static inline void init_reserved_page(unsigned long pfn) 731 { 732 } 733 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 734 735 /* 736 * Initialised pages do not have PageReserved set. This function is 737 * called for each range allocated by the bootmem allocator and 738 * marks the pages PageReserved. The remaining valid pages are later 739 * sent to the buddy page allocator. 740 */ 741 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 742 { 743 unsigned long start_pfn = PFN_DOWN(start); 744 unsigned long end_pfn = PFN_UP(end); 745 746 for (; start_pfn < end_pfn; start_pfn++) { 747 if (pfn_valid(start_pfn)) { 748 struct page *page = pfn_to_page(start_pfn); 749 750 init_reserved_page(start_pfn); 751 752 /* Avoid false-positive PageTail() */ 753 INIT_LIST_HEAD(&page->lru); 754 755 /* 756 * no need for atomic set_bit because the struct 757 * page is not visible yet so nobody should 758 * access it yet. 759 */ 760 __SetPageReserved(page); 761 } 762 } 763 } 764 765 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 766 static bool __meminit 767 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 768 { 769 static struct memblock_region *r; 770 771 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 772 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 773 for_each_mem_region(r) { 774 if (*pfn < memblock_region_memory_end_pfn(r)) 775 break; 776 } 777 } 778 if (*pfn >= memblock_region_memory_base_pfn(r) && 779 memblock_is_mirror(r)) { 780 *pfn = memblock_region_memory_end_pfn(r); 781 return true; 782 } 783 } 784 return false; 785 } 786 787 /* 788 * Only struct pages that correspond to ranges defined by memblock.memory 789 * are zeroed and initialized by going through __init_single_page() during 790 * memmap_init_zone_range(). 791 * 792 * But, there could be struct pages that correspond to holes in 793 * memblock.memory. This can happen because of the following reasons: 794 * - physical memory bank size is not necessarily the exact multiple of the 795 * arbitrary section size 796 * - early reserved memory may not be listed in memblock.memory 797 * - memory layouts defined with memmap= kernel parameter may not align 798 * nicely with memmap sections 799 * 800 * Explicitly initialize those struct pages so that: 801 * - PG_Reserved is set 802 * - zone and node links point to zone and node that span the page if the 803 * hole is in the middle of a zone 804 * - zone and node links point to adjacent zone/node if the hole falls on 805 * the zone boundary; the pages in such holes will be prepended to the 806 * zone/node above the hole except for the trailing pages in the last 807 * section that will be appended to the zone/node below. 808 */ 809 static void __init init_unavailable_range(unsigned long spfn, 810 unsigned long epfn, 811 int zone, int node) 812 { 813 unsigned long pfn; 814 u64 pgcnt = 0; 815 816 for (pfn = spfn; pfn < epfn; pfn++) { 817 if (!pfn_valid(pageblock_start_pfn(pfn))) { 818 pfn = pageblock_end_pfn(pfn) - 1; 819 continue; 820 } 821 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 822 __SetPageReserved(pfn_to_page(pfn)); 823 pgcnt++; 824 } 825 826 if (pgcnt) 827 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 828 node, zone_names[zone], pgcnt); 829 } 830 831 /* 832 * Initially all pages are reserved - free ones are freed 833 * up by memblock_free_all() once the early boot process is 834 * done. Non-atomic initialization, single-pass. 835 * 836 * All aligned pageblocks are initialized to the specified migratetype 837 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 838 * zone stats (e.g., nr_isolate_pageblock) are touched. 839 */ 840 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 841 unsigned long start_pfn, unsigned long zone_end_pfn, 842 enum meminit_context context, 843 struct vmem_altmap *altmap, int migratetype) 844 { 845 unsigned long pfn, end_pfn = start_pfn + size; 846 struct page *page; 847 848 if (highest_memmap_pfn < end_pfn - 1) 849 highest_memmap_pfn = end_pfn - 1; 850 851 #ifdef CONFIG_ZONE_DEVICE 852 /* 853 * Honor reservation requested by the driver for this ZONE_DEVICE 854 * memory. We limit the total number of pages to initialize to just 855 * those that might contain the memory mapping. We will defer the 856 * ZONE_DEVICE page initialization until after we have released 857 * the hotplug lock. 858 */ 859 if (zone == ZONE_DEVICE) { 860 if (!altmap) 861 return; 862 863 if (start_pfn == altmap->base_pfn) 864 start_pfn += altmap->reserve; 865 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 866 } 867 #endif 868 869 for (pfn = start_pfn; pfn < end_pfn; ) { 870 /* 871 * There can be holes in boot-time mem_map[]s handed to this 872 * function. They do not exist on hotplugged memory. 873 */ 874 if (context == MEMINIT_EARLY) { 875 if (overlap_memmap_init(zone, &pfn)) 876 continue; 877 if (defer_init(nid, pfn, zone_end_pfn)) { 878 deferred_struct_pages = true; 879 break; 880 } 881 } 882 883 page = pfn_to_page(pfn); 884 __init_single_page(page, pfn, zone, nid); 885 if (context == MEMINIT_HOTPLUG) 886 __SetPageReserved(page); 887 888 /* 889 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 890 * such that unmovable allocations won't be scattered all 891 * over the place during system boot. 892 */ 893 if (pageblock_aligned(pfn)) { 894 set_pageblock_migratetype(page, migratetype); 895 cond_resched(); 896 } 897 pfn++; 898 } 899 } 900 901 static void __init memmap_init_zone_range(struct zone *zone, 902 unsigned long start_pfn, 903 unsigned long end_pfn, 904 unsigned long *hole_pfn) 905 { 906 unsigned long zone_start_pfn = zone->zone_start_pfn; 907 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 908 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 909 910 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 911 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 912 913 if (start_pfn >= end_pfn) 914 return; 915 916 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 917 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 918 919 if (*hole_pfn < start_pfn) 920 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 921 922 *hole_pfn = end_pfn; 923 } 924 925 static void __init memmap_init(void) 926 { 927 unsigned long start_pfn, end_pfn; 928 unsigned long hole_pfn = 0; 929 int i, j, zone_id = 0, nid; 930 931 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 932 struct pglist_data *node = NODE_DATA(nid); 933 934 for (j = 0; j < MAX_NR_ZONES; j++) { 935 struct zone *zone = node->node_zones + j; 936 937 if (!populated_zone(zone)) 938 continue; 939 940 memmap_init_zone_range(zone, start_pfn, end_pfn, 941 &hole_pfn); 942 zone_id = j; 943 } 944 } 945 946 #ifdef CONFIG_SPARSEMEM 947 /* 948 * Initialize the memory map for hole in the range [memory_end, 949 * section_end]. 950 * Append the pages in this hole to the highest zone in the last 951 * node. 952 * The call to init_unavailable_range() is outside the ifdef to 953 * silence the compiler warining about zone_id set but not used; 954 * for FLATMEM it is a nop anyway 955 */ 956 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 957 if (hole_pfn < end_pfn) 958 #endif 959 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 960 } 961 962 #ifdef CONFIG_ZONE_DEVICE 963 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 964 unsigned long zone_idx, int nid, 965 struct dev_pagemap *pgmap) 966 { 967 968 __init_single_page(page, pfn, zone_idx, nid); 969 970 /* 971 * Mark page reserved as it will need to wait for onlining 972 * phase for it to be fully associated with a zone. 973 * 974 * We can use the non-atomic __set_bit operation for setting 975 * the flag as we are still initializing the pages. 976 */ 977 __SetPageReserved(page); 978 979 /* 980 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 981 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 982 * ever freed or placed on a driver-private list. 983 */ 984 page->pgmap = pgmap; 985 page->zone_device_data = NULL; 986 987 /* 988 * Mark the block movable so that blocks are reserved for 989 * movable at startup. This will force kernel allocations 990 * to reserve their blocks rather than leaking throughout 991 * the address space during boot when many long-lived 992 * kernel allocations are made. 993 * 994 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 995 * because this is done early in section_activate() 996 */ 997 if (pageblock_aligned(pfn)) { 998 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 999 cond_resched(); 1000 } 1001 1002 /* 1003 * ZONE_DEVICE pages are released directly to the driver page allocator 1004 * which will set the page count to 1 when allocating the page. 1005 */ 1006 if (pgmap->type == MEMORY_DEVICE_PRIVATE || 1007 pgmap->type == MEMORY_DEVICE_COHERENT) 1008 set_page_count(page, 0); 1009 } 1010 1011 /* 1012 * With compound page geometry and when struct pages are stored in ram most 1013 * tail pages are reused. Consequently, the amount of unique struct pages to 1014 * initialize is a lot smaller that the total amount of struct pages being 1015 * mapped. This is a paired / mild layering violation with explicit knowledge 1016 * of how the sparse_vmemmap internals handle compound pages in the lack 1017 * of an altmap. See vmemmap_populate_compound_pages(). 1018 */ 1019 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 1020 struct dev_pagemap *pgmap) 1021 { 1022 if (!vmemmap_can_optimize(altmap, pgmap)) 1023 return pgmap_vmemmap_nr(pgmap); 1024 1025 return 2 * (PAGE_SIZE / sizeof(struct page)); 1026 } 1027 1028 static void __ref memmap_init_compound(struct page *head, 1029 unsigned long head_pfn, 1030 unsigned long zone_idx, int nid, 1031 struct dev_pagemap *pgmap, 1032 unsigned long nr_pages) 1033 { 1034 unsigned long pfn, end_pfn = head_pfn + nr_pages; 1035 unsigned int order = pgmap->vmemmap_shift; 1036 1037 __SetPageHead(head); 1038 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 1039 struct page *page = pfn_to_page(pfn); 1040 1041 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1042 prep_compound_tail(head, pfn - head_pfn); 1043 set_page_count(page, 0); 1044 1045 /* 1046 * The first tail page stores important compound page info. 1047 * Call prep_compound_head() after the first tail page has 1048 * been initialized, to not have the data overwritten. 1049 */ 1050 if (pfn == head_pfn + 1) 1051 prep_compound_head(head, order); 1052 } 1053 } 1054 1055 void __ref memmap_init_zone_device(struct zone *zone, 1056 unsigned long start_pfn, 1057 unsigned long nr_pages, 1058 struct dev_pagemap *pgmap) 1059 { 1060 unsigned long pfn, end_pfn = start_pfn + nr_pages; 1061 struct pglist_data *pgdat = zone->zone_pgdat; 1062 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 1063 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 1064 unsigned long zone_idx = zone_idx(zone); 1065 unsigned long start = jiffies; 1066 int nid = pgdat->node_id; 1067 1068 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 1069 return; 1070 1071 /* 1072 * The call to memmap_init should have already taken care 1073 * of the pages reserved for the memmap, so we can just jump to 1074 * the end of that region and start processing the device pages. 1075 */ 1076 if (altmap) { 1077 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 1078 nr_pages = end_pfn - start_pfn; 1079 } 1080 1081 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 1082 struct page *page = pfn_to_page(pfn); 1083 1084 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1085 1086 if (pfns_per_compound == 1) 1087 continue; 1088 1089 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 1090 compound_nr_pages(altmap, pgmap)); 1091 } 1092 1093 pr_debug("%s initialised %lu pages in %ums\n", __func__, 1094 nr_pages, jiffies_to_msecs(jiffies - start)); 1095 } 1096 #endif 1097 1098 /* 1099 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 1100 * because it is sized independent of architecture. Unlike the other zones, 1101 * the starting point for ZONE_MOVABLE is not fixed. It may be different 1102 * in each node depending on the size of each node and how evenly kernelcore 1103 * is distributed. This helper function adjusts the zone ranges 1104 * provided by the architecture for a given node by using the end of the 1105 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 1106 * zones within a node are in order of monotonic increases memory addresses 1107 */ 1108 static void __init adjust_zone_range_for_zone_movable(int nid, 1109 unsigned long zone_type, 1110 unsigned long node_start_pfn, 1111 unsigned long node_end_pfn, 1112 unsigned long *zone_start_pfn, 1113 unsigned long *zone_end_pfn) 1114 { 1115 /* Only adjust if ZONE_MOVABLE is on this node */ 1116 if (zone_movable_pfn[nid]) { 1117 /* Size ZONE_MOVABLE */ 1118 if (zone_type == ZONE_MOVABLE) { 1119 *zone_start_pfn = zone_movable_pfn[nid]; 1120 *zone_end_pfn = min(node_end_pfn, 1121 arch_zone_highest_possible_pfn[movable_zone]); 1122 1123 /* Adjust for ZONE_MOVABLE starting within this range */ 1124 } else if (!mirrored_kernelcore && 1125 *zone_start_pfn < zone_movable_pfn[nid] && 1126 *zone_end_pfn > zone_movable_pfn[nid]) { 1127 *zone_end_pfn = zone_movable_pfn[nid]; 1128 1129 /* Check if this whole range is within ZONE_MOVABLE */ 1130 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 1131 *zone_start_pfn = *zone_end_pfn; 1132 } 1133 } 1134 1135 /* 1136 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 1137 * then all holes in the requested range will be accounted for. 1138 */ 1139 unsigned long __init __absent_pages_in_range(int nid, 1140 unsigned long range_start_pfn, 1141 unsigned long range_end_pfn) 1142 { 1143 unsigned long nr_absent = range_end_pfn - range_start_pfn; 1144 unsigned long start_pfn, end_pfn; 1145 int i; 1146 1147 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 1148 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 1149 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 1150 nr_absent -= end_pfn - start_pfn; 1151 } 1152 return nr_absent; 1153 } 1154 1155 /** 1156 * absent_pages_in_range - Return number of page frames in holes within a range 1157 * @start_pfn: The start PFN to start searching for holes 1158 * @end_pfn: The end PFN to stop searching for holes 1159 * 1160 * Return: the number of pages frames in memory holes within a range. 1161 */ 1162 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 1163 unsigned long end_pfn) 1164 { 1165 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 1166 } 1167 1168 /* Return the number of page frames in holes in a zone on a node */ 1169 static unsigned long __init zone_absent_pages_in_node(int nid, 1170 unsigned long zone_type, 1171 unsigned long node_start_pfn, 1172 unsigned long node_end_pfn) 1173 { 1174 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1175 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1176 unsigned long zone_start_pfn, zone_end_pfn; 1177 unsigned long nr_absent; 1178 1179 /* When hotadd a new node from cpu_up(), the node should be empty */ 1180 if (!node_start_pfn && !node_end_pfn) 1181 return 0; 1182 1183 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1184 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1185 1186 adjust_zone_range_for_zone_movable(nid, zone_type, 1187 node_start_pfn, node_end_pfn, 1188 &zone_start_pfn, &zone_end_pfn); 1189 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 1190 1191 /* 1192 * ZONE_MOVABLE handling. 1193 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 1194 * and vice versa. 1195 */ 1196 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 1197 unsigned long start_pfn, end_pfn; 1198 struct memblock_region *r; 1199 1200 for_each_mem_region(r) { 1201 start_pfn = clamp(memblock_region_memory_base_pfn(r), 1202 zone_start_pfn, zone_end_pfn); 1203 end_pfn = clamp(memblock_region_memory_end_pfn(r), 1204 zone_start_pfn, zone_end_pfn); 1205 1206 if (zone_type == ZONE_MOVABLE && 1207 memblock_is_mirror(r)) 1208 nr_absent += end_pfn - start_pfn; 1209 1210 if (zone_type == ZONE_NORMAL && 1211 !memblock_is_mirror(r)) 1212 nr_absent += end_pfn - start_pfn; 1213 } 1214 } 1215 1216 return nr_absent; 1217 } 1218 1219 /* 1220 * Return the number of pages a zone spans in a node, including holes 1221 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1222 */ 1223 static unsigned long __init zone_spanned_pages_in_node(int nid, 1224 unsigned long zone_type, 1225 unsigned long node_start_pfn, 1226 unsigned long node_end_pfn, 1227 unsigned long *zone_start_pfn, 1228 unsigned long *zone_end_pfn) 1229 { 1230 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1231 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1232 /* When hotadd a new node from cpu_up(), the node should be empty */ 1233 if (!node_start_pfn && !node_end_pfn) 1234 return 0; 1235 1236 /* Get the start and end of the zone */ 1237 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1238 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1239 adjust_zone_range_for_zone_movable(nid, zone_type, 1240 node_start_pfn, node_end_pfn, 1241 zone_start_pfn, zone_end_pfn); 1242 1243 /* Check that this node has pages within the zone's required range */ 1244 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 1245 return 0; 1246 1247 /* Move the zone boundaries inside the node if necessary */ 1248 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 1249 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 1250 1251 /* Return the spanned pages */ 1252 return *zone_end_pfn - *zone_start_pfn; 1253 } 1254 1255 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 1256 unsigned long node_start_pfn, 1257 unsigned long node_end_pfn) 1258 { 1259 unsigned long realtotalpages = 0, totalpages = 0; 1260 enum zone_type i; 1261 1262 for (i = 0; i < MAX_NR_ZONES; i++) { 1263 struct zone *zone = pgdat->node_zones + i; 1264 unsigned long zone_start_pfn, zone_end_pfn; 1265 unsigned long spanned, absent; 1266 unsigned long size, real_size; 1267 1268 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 1269 node_start_pfn, 1270 node_end_pfn, 1271 &zone_start_pfn, 1272 &zone_end_pfn); 1273 absent = zone_absent_pages_in_node(pgdat->node_id, i, 1274 node_start_pfn, 1275 node_end_pfn); 1276 1277 size = spanned; 1278 real_size = size - absent; 1279 1280 if (size) 1281 zone->zone_start_pfn = zone_start_pfn; 1282 else 1283 zone->zone_start_pfn = 0; 1284 zone->spanned_pages = size; 1285 zone->present_pages = real_size; 1286 #if defined(CONFIG_MEMORY_HOTPLUG) 1287 zone->present_early_pages = real_size; 1288 #endif 1289 1290 totalpages += size; 1291 realtotalpages += real_size; 1292 } 1293 1294 pgdat->node_spanned_pages = totalpages; 1295 pgdat->node_present_pages = realtotalpages; 1296 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1297 } 1298 1299 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 1300 unsigned long present_pages) 1301 { 1302 unsigned long pages = spanned_pages; 1303 1304 /* 1305 * Provide a more accurate estimation if there are holes within 1306 * the zone and SPARSEMEM is in use. If there are holes within the 1307 * zone, each populated memory region may cost us one or two extra 1308 * memmap pages due to alignment because memmap pages for each 1309 * populated regions may not be naturally aligned on page boundary. 1310 * So the (present_pages >> 4) heuristic is a tradeoff for that. 1311 */ 1312 if (spanned_pages > present_pages + (present_pages >> 4) && 1313 IS_ENABLED(CONFIG_SPARSEMEM)) 1314 pages = present_pages; 1315 1316 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 1317 } 1318 1319 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1320 static void pgdat_init_split_queue(struct pglist_data *pgdat) 1321 { 1322 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 1323 1324 spin_lock_init(&ds_queue->split_queue_lock); 1325 INIT_LIST_HEAD(&ds_queue->split_queue); 1326 ds_queue->split_queue_len = 0; 1327 } 1328 #else 1329 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 1330 #endif 1331 1332 #ifdef CONFIG_COMPACTION 1333 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 1334 { 1335 init_waitqueue_head(&pgdat->kcompactd_wait); 1336 } 1337 #else 1338 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 1339 #endif 1340 1341 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 1342 { 1343 int i; 1344 1345 pgdat_resize_init(pgdat); 1346 pgdat_kswapd_lock_init(pgdat); 1347 1348 pgdat_init_split_queue(pgdat); 1349 pgdat_init_kcompactd(pgdat); 1350 1351 init_waitqueue_head(&pgdat->kswapd_wait); 1352 init_waitqueue_head(&pgdat->pfmemalloc_wait); 1353 1354 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 1355 init_waitqueue_head(&pgdat->reclaim_wait[i]); 1356 1357 pgdat_page_ext_init(pgdat); 1358 lruvec_init(&pgdat->__lruvec); 1359 } 1360 1361 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 1362 unsigned long remaining_pages) 1363 { 1364 atomic_long_set(&zone->managed_pages, remaining_pages); 1365 zone_set_nid(zone, nid); 1366 zone->name = zone_names[idx]; 1367 zone->zone_pgdat = NODE_DATA(nid); 1368 spin_lock_init(&zone->lock); 1369 zone_seqlock_init(zone); 1370 zone_pcp_init(zone); 1371 } 1372 1373 static void __meminit zone_init_free_lists(struct zone *zone) 1374 { 1375 unsigned int order, t; 1376 for_each_migratetype_order(order, t) { 1377 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 1378 zone->free_area[order].nr_free = 0; 1379 } 1380 } 1381 1382 void __meminit init_currently_empty_zone(struct zone *zone, 1383 unsigned long zone_start_pfn, 1384 unsigned long size) 1385 { 1386 struct pglist_data *pgdat = zone->zone_pgdat; 1387 int zone_idx = zone_idx(zone) + 1; 1388 1389 if (zone_idx > pgdat->nr_zones) 1390 pgdat->nr_zones = zone_idx; 1391 1392 zone->zone_start_pfn = zone_start_pfn; 1393 1394 mminit_dprintk(MMINIT_TRACE, "memmap_init", 1395 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 1396 pgdat->node_id, 1397 (unsigned long)zone_idx(zone), 1398 zone_start_pfn, (zone_start_pfn + size)); 1399 1400 zone_init_free_lists(zone); 1401 zone->initialized = 1; 1402 } 1403 1404 #ifndef CONFIG_SPARSEMEM 1405 /* 1406 * Calculate the size of the zone->blockflags rounded to an unsigned long 1407 * Start by making sure zonesize is a multiple of pageblock_order by rounding 1408 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 1409 * round what is now in bits to nearest long in bits, then return it in 1410 * bytes. 1411 */ 1412 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 1413 { 1414 unsigned long usemapsize; 1415 1416 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 1417 usemapsize = roundup(zonesize, pageblock_nr_pages); 1418 usemapsize = usemapsize >> pageblock_order; 1419 usemapsize *= NR_PAGEBLOCK_BITS; 1420 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 1421 1422 return usemapsize / 8; 1423 } 1424 1425 static void __ref setup_usemap(struct zone *zone) 1426 { 1427 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 1428 zone->spanned_pages); 1429 zone->pageblock_flags = NULL; 1430 if (usemapsize) { 1431 zone->pageblock_flags = 1432 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 1433 zone_to_nid(zone)); 1434 if (!zone->pageblock_flags) 1435 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 1436 usemapsize, zone->name, zone_to_nid(zone)); 1437 } 1438 } 1439 #else 1440 static inline void setup_usemap(struct zone *zone) {} 1441 #endif /* CONFIG_SPARSEMEM */ 1442 1443 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 1444 1445 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 1446 void __init set_pageblock_order(void) 1447 { 1448 unsigned int order = MAX_ORDER; 1449 1450 /* Check that pageblock_nr_pages has not already been setup */ 1451 if (pageblock_order) 1452 return; 1453 1454 /* Don't let pageblocks exceed the maximum allocation granularity. */ 1455 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 1456 order = HUGETLB_PAGE_ORDER; 1457 1458 /* 1459 * Assume the largest contiguous order of interest is a huge page. 1460 * This value may be variable depending on boot parameters on IA64 and 1461 * powerpc. 1462 */ 1463 pageblock_order = order; 1464 } 1465 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1466 1467 /* 1468 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 1469 * is unused as pageblock_order is set at compile-time. See 1470 * include/linux/pageblock-flags.h for the values of pageblock_order based on 1471 * the kernel config 1472 */ 1473 void __init set_pageblock_order(void) 1474 { 1475 } 1476 1477 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1478 1479 /* 1480 * Set up the zone data structures 1481 * - init pgdat internals 1482 * - init all zones belonging to this node 1483 * 1484 * NOTE: this function is only called during memory hotplug 1485 */ 1486 #ifdef CONFIG_MEMORY_HOTPLUG 1487 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 1488 { 1489 int nid = pgdat->node_id; 1490 enum zone_type z; 1491 int cpu; 1492 1493 pgdat_init_internals(pgdat); 1494 1495 if (pgdat->per_cpu_nodestats == &boot_nodestats) 1496 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1497 1498 /* 1499 * Reset the nr_zones, order and highest_zoneidx before reuse. 1500 * Note that kswapd will init kswapd_highest_zoneidx properly 1501 * when it starts in the near future. 1502 */ 1503 pgdat->nr_zones = 0; 1504 pgdat->kswapd_order = 0; 1505 pgdat->kswapd_highest_zoneidx = 0; 1506 pgdat->node_start_pfn = 0; 1507 for_each_online_cpu(cpu) { 1508 struct per_cpu_nodestat *p; 1509 1510 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 1511 memset(p, 0, sizeof(*p)); 1512 } 1513 1514 for (z = 0; z < MAX_NR_ZONES; z++) 1515 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 1516 } 1517 #endif 1518 1519 /* 1520 * Set up the zone data structures: 1521 * - mark all pages reserved 1522 * - mark all memory queues empty 1523 * - clear the memory bitmaps 1524 * 1525 * NOTE: pgdat should get zeroed by caller. 1526 * NOTE: this function is only called during early init. 1527 */ 1528 static void __init free_area_init_core(struct pglist_data *pgdat) 1529 { 1530 enum zone_type j; 1531 int nid = pgdat->node_id; 1532 1533 pgdat_init_internals(pgdat); 1534 pgdat->per_cpu_nodestats = &boot_nodestats; 1535 1536 for (j = 0; j < MAX_NR_ZONES; j++) { 1537 struct zone *zone = pgdat->node_zones + j; 1538 unsigned long size, freesize, memmap_pages; 1539 1540 size = zone->spanned_pages; 1541 freesize = zone->present_pages; 1542 1543 /* 1544 * Adjust freesize so that it accounts for how much memory 1545 * is used by this zone for memmap. This affects the watermark 1546 * and per-cpu initialisations 1547 */ 1548 memmap_pages = calc_memmap_size(size, freesize); 1549 if (!is_highmem_idx(j)) { 1550 if (freesize >= memmap_pages) { 1551 freesize -= memmap_pages; 1552 if (memmap_pages) 1553 pr_debug(" %s zone: %lu pages used for memmap\n", 1554 zone_names[j], memmap_pages); 1555 } else 1556 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 1557 zone_names[j], memmap_pages, freesize); 1558 } 1559 1560 /* Account for reserved pages */ 1561 if (j == 0 && freesize > dma_reserve) { 1562 freesize -= dma_reserve; 1563 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 1564 } 1565 1566 if (!is_highmem_idx(j)) 1567 nr_kernel_pages += freesize; 1568 /* Charge for highmem memmap if there are enough kernel pages */ 1569 else if (nr_kernel_pages > memmap_pages * 2) 1570 nr_kernel_pages -= memmap_pages; 1571 nr_all_pages += freesize; 1572 1573 /* 1574 * Set an approximate value for lowmem here, it will be adjusted 1575 * when the bootmem allocator frees pages into the buddy system. 1576 * And all highmem pages will be managed by the buddy system. 1577 */ 1578 zone_init_internals(zone, j, nid, freesize); 1579 1580 if (!size) 1581 continue; 1582 1583 set_pageblock_order(); 1584 setup_usemap(zone); 1585 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 1586 } 1587 } 1588 1589 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 1590 phys_addr_t min_addr, int nid, bool exact_nid) 1591 { 1592 void *ptr; 1593 1594 if (exact_nid) 1595 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 1596 MEMBLOCK_ALLOC_ACCESSIBLE, 1597 nid); 1598 else 1599 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 1600 MEMBLOCK_ALLOC_ACCESSIBLE, 1601 nid); 1602 1603 if (ptr && size > 0) 1604 page_init_poison(ptr, size); 1605 1606 return ptr; 1607 } 1608 1609 #ifdef CONFIG_FLATMEM 1610 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 1611 { 1612 unsigned long __maybe_unused start = 0; 1613 unsigned long __maybe_unused offset = 0; 1614 1615 /* Skip empty nodes */ 1616 if (!pgdat->node_spanned_pages) 1617 return; 1618 1619 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1620 offset = pgdat->node_start_pfn - start; 1621 /* ia64 gets its own node_mem_map, before this, without bootmem */ 1622 if (!pgdat->node_mem_map) { 1623 unsigned long size, end; 1624 struct page *map; 1625 1626 /* 1627 * The zone's endpoints aren't required to be MAX_ORDER 1628 * aligned but the node_mem_map endpoints must be in order 1629 * for the buddy allocator to function correctly. 1630 */ 1631 end = pgdat_end_pfn(pgdat); 1632 end = ALIGN(end, MAX_ORDER_NR_PAGES); 1633 size = (end - start) * sizeof(struct page); 1634 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 1635 pgdat->node_id, false); 1636 if (!map) 1637 panic("Failed to allocate %ld bytes for node %d memory map\n", 1638 size, pgdat->node_id); 1639 pgdat->node_mem_map = map + offset; 1640 } 1641 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1642 __func__, pgdat->node_id, (unsigned long)pgdat, 1643 (unsigned long)pgdat->node_mem_map); 1644 #ifndef CONFIG_NUMA 1645 /* 1646 * With no DISCONTIG, the global mem_map is just set as node 0's 1647 */ 1648 if (pgdat == NODE_DATA(0)) { 1649 mem_map = NODE_DATA(0)->node_mem_map; 1650 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1651 mem_map -= offset; 1652 } 1653 #endif 1654 } 1655 #else 1656 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 1657 #endif /* CONFIG_FLATMEM */ 1658 1659 /** 1660 * get_pfn_range_for_nid - Return the start and end page frames for a node 1661 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 1662 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 1663 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 1664 * 1665 * It returns the start and end page frame of a node based on information 1666 * provided by memblock_set_node(). If called for a node 1667 * with no available memory, a warning is printed and the start and end 1668 * PFNs will be 0. 1669 */ 1670 void __init get_pfn_range_for_nid(unsigned int nid, 1671 unsigned long *start_pfn, unsigned long *end_pfn) 1672 { 1673 unsigned long this_start_pfn, this_end_pfn; 1674 int i; 1675 1676 *start_pfn = -1UL; 1677 *end_pfn = 0; 1678 1679 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 1680 *start_pfn = min(*start_pfn, this_start_pfn); 1681 *end_pfn = max(*end_pfn, this_end_pfn); 1682 } 1683 1684 if (*start_pfn == -1UL) 1685 *start_pfn = 0; 1686 } 1687 1688 static void __init free_area_init_node(int nid) 1689 { 1690 pg_data_t *pgdat = NODE_DATA(nid); 1691 unsigned long start_pfn = 0; 1692 unsigned long end_pfn = 0; 1693 1694 /* pg_data_t should be reset to zero when it's allocated */ 1695 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 1696 1697 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1698 1699 pgdat->node_id = nid; 1700 pgdat->node_start_pfn = start_pfn; 1701 pgdat->per_cpu_nodestats = NULL; 1702 1703 if (start_pfn != end_pfn) { 1704 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 1705 (u64)start_pfn << PAGE_SHIFT, 1706 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 1707 } else { 1708 pr_info("Initmem setup node %d as memoryless\n", nid); 1709 } 1710 1711 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 1712 1713 alloc_node_mem_map(pgdat); 1714 pgdat_set_deferred_range(pgdat); 1715 1716 free_area_init_core(pgdat); 1717 lru_gen_init_pgdat(pgdat); 1718 } 1719 1720 /* Any regular or high memory on that node ? */ 1721 static void check_for_memory(pg_data_t *pgdat, int nid) 1722 { 1723 enum zone_type zone_type; 1724 1725 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 1726 struct zone *zone = &pgdat->node_zones[zone_type]; 1727 if (populated_zone(zone)) { 1728 if (IS_ENABLED(CONFIG_HIGHMEM)) 1729 node_set_state(nid, N_HIGH_MEMORY); 1730 if (zone_type <= ZONE_NORMAL) 1731 node_set_state(nid, N_NORMAL_MEMORY); 1732 break; 1733 } 1734 } 1735 } 1736 1737 #if MAX_NUMNODES > 1 1738 /* 1739 * Figure out the number of possible node ids. 1740 */ 1741 void __init setup_nr_node_ids(void) 1742 { 1743 unsigned int highest; 1744 1745 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 1746 nr_node_ids = highest + 1; 1747 } 1748 #endif 1749 1750 static void __init free_area_init_memoryless_node(int nid) 1751 { 1752 free_area_init_node(nid); 1753 } 1754 1755 /* 1756 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 1757 * such cases we allow max_zone_pfn sorted in the descending order 1758 */ 1759 static bool arch_has_descending_max_zone_pfns(void) 1760 { 1761 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1762 } 1763 1764 /** 1765 * free_area_init - Initialise all pg_data_t and zone data 1766 * @max_zone_pfn: an array of max PFNs for each zone 1767 * 1768 * This will call free_area_init_node() for each active node in the system. 1769 * Using the page ranges provided by memblock_set_node(), the size of each 1770 * zone in each node and their holes is calculated. If the maximum PFN 1771 * between two adjacent zones match, it is assumed that the zone is empty. 1772 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 1773 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 1774 * starts where the previous one ended. For example, ZONE_DMA32 starts 1775 * at arch_max_dma_pfn. 1776 */ 1777 void __init free_area_init(unsigned long *max_zone_pfn) 1778 { 1779 unsigned long start_pfn, end_pfn; 1780 int i, nid, zone; 1781 bool descending; 1782 1783 /* Record where the zone boundaries are */ 1784 memset(arch_zone_lowest_possible_pfn, 0, 1785 sizeof(arch_zone_lowest_possible_pfn)); 1786 memset(arch_zone_highest_possible_pfn, 0, 1787 sizeof(arch_zone_highest_possible_pfn)); 1788 1789 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 1790 descending = arch_has_descending_max_zone_pfns(); 1791 1792 for (i = 0; i < MAX_NR_ZONES; i++) { 1793 if (descending) 1794 zone = MAX_NR_ZONES - i - 1; 1795 else 1796 zone = i; 1797 1798 if (zone == ZONE_MOVABLE) 1799 continue; 1800 1801 end_pfn = max(max_zone_pfn[zone], start_pfn); 1802 arch_zone_lowest_possible_pfn[zone] = start_pfn; 1803 arch_zone_highest_possible_pfn[zone] = end_pfn; 1804 1805 start_pfn = end_pfn; 1806 } 1807 1808 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 1809 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 1810 find_zone_movable_pfns_for_nodes(); 1811 1812 /* Print out the zone ranges */ 1813 pr_info("Zone ranges:\n"); 1814 for (i = 0; i < MAX_NR_ZONES; i++) { 1815 if (i == ZONE_MOVABLE) 1816 continue; 1817 pr_info(" %-8s ", zone_names[i]); 1818 if (arch_zone_lowest_possible_pfn[i] == 1819 arch_zone_highest_possible_pfn[i]) 1820 pr_cont("empty\n"); 1821 else 1822 pr_cont("[mem %#018Lx-%#018Lx]\n", 1823 (u64)arch_zone_lowest_possible_pfn[i] 1824 << PAGE_SHIFT, 1825 ((u64)arch_zone_highest_possible_pfn[i] 1826 << PAGE_SHIFT) - 1); 1827 } 1828 1829 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 1830 pr_info("Movable zone start for each node\n"); 1831 for (i = 0; i < MAX_NUMNODES; i++) { 1832 if (zone_movable_pfn[i]) 1833 pr_info(" Node %d: %#018Lx\n", i, 1834 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 1835 } 1836 1837 /* 1838 * Print out the early node map, and initialize the 1839 * subsection-map relative to active online memory ranges to 1840 * enable future "sub-section" extensions of the memory map. 1841 */ 1842 pr_info("Early memory node ranges\n"); 1843 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1844 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 1845 (u64)start_pfn << PAGE_SHIFT, 1846 ((u64)end_pfn << PAGE_SHIFT) - 1); 1847 subsection_map_init(start_pfn, end_pfn - start_pfn); 1848 } 1849 1850 /* Initialise every node */ 1851 mminit_verify_pageflags_layout(); 1852 setup_nr_node_ids(); 1853 for_each_node(nid) { 1854 pg_data_t *pgdat; 1855 1856 if (!node_online(nid)) { 1857 pr_info("Initializing node %d as memoryless\n", nid); 1858 1859 /* Allocator not initialized yet */ 1860 pgdat = arch_alloc_nodedata(nid); 1861 if (!pgdat) 1862 panic("Cannot allocate %zuB for node %d.\n", 1863 sizeof(*pgdat), nid); 1864 arch_refresh_nodedata(nid, pgdat); 1865 free_area_init_memoryless_node(nid); 1866 1867 /* 1868 * We do not want to confuse userspace by sysfs 1869 * files/directories for node without any memory 1870 * attached to it, so this node is not marked as 1871 * N_MEMORY and not marked online so that no sysfs 1872 * hierarchy will be created via register_one_node for 1873 * it. The pgdat will get fully initialized by 1874 * hotadd_init_pgdat() when memory is hotplugged into 1875 * this node. 1876 */ 1877 continue; 1878 } 1879 1880 pgdat = NODE_DATA(nid); 1881 free_area_init_node(nid); 1882 1883 /* Any memory on that node */ 1884 if (pgdat->node_present_pages) 1885 node_set_state(nid, N_MEMORY); 1886 check_for_memory(pgdat, nid); 1887 } 1888 1889 memmap_init(); 1890 1891 /* disable hash distribution for systems with a single node */ 1892 fixup_hashdist(); 1893 } 1894 1895 /** 1896 * node_map_pfn_alignment - determine the maximum internode alignment 1897 * 1898 * This function should be called after node map is populated and sorted. 1899 * It calculates the maximum power of two alignment which can distinguish 1900 * all the nodes. 1901 * 1902 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 1903 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 1904 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 1905 * shifted, 1GiB is enough and this function will indicate so. 1906 * 1907 * This is used to test whether pfn -> nid mapping of the chosen memory 1908 * model has fine enough granularity to avoid incorrect mapping for the 1909 * populated node map. 1910 * 1911 * Return: the determined alignment in pfn's. 0 if there is no alignment 1912 * requirement (single node). 1913 */ 1914 unsigned long __init node_map_pfn_alignment(void) 1915 { 1916 unsigned long accl_mask = 0, last_end = 0; 1917 unsigned long start, end, mask; 1918 int last_nid = NUMA_NO_NODE; 1919 int i, nid; 1920 1921 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 1922 if (!start || last_nid < 0 || last_nid == nid) { 1923 last_nid = nid; 1924 last_end = end; 1925 continue; 1926 } 1927 1928 /* 1929 * Start with a mask granular enough to pin-point to the 1930 * start pfn and tick off bits one-by-one until it becomes 1931 * too coarse to separate the current node from the last. 1932 */ 1933 mask = ~((1 << __ffs(start)) - 1); 1934 while (mask && last_end <= (start & (mask << 1))) 1935 mask <<= 1; 1936 1937 /* accumulate all internode masks */ 1938 accl_mask |= mask; 1939 } 1940 1941 /* convert mask to number of pages */ 1942 return ~accl_mask + 1; 1943 } 1944 1945 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1946 static void __init deferred_free_range(unsigned long pfn, 1947 unsigned long nr_pages) 1948 { 1949 struct page *page; 1950 unsigned long i; 1951 1952 if (!nr_pages) 1953 return; 1954 1955 page = pfn_to_page(pfn); 1956 1957 /* Free a large naturally-aligned chunk if possible */ 1958 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { 1959 for (i = 0; i < nr_pages; i += pageblock_nr_pages) 1960 set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); 1961 __free_pages_core(page, MAX_ORDER); 1962 return; 1963 } 1964 1965 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1966 if (pageblock_aligned(pfn)) 1967 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1968 __free_pages_core(page, 0); 1969 } 1970 } 1971 1972 /* Completion tracking for deferred_init_memmap() threads */ 1973 static atomic_t pgdat_init_n_undone __initdata; 1974 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1975 1976 static inline void __init pgdat_init_report_one_done(void) 1977 { 1978 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1979 complete(&pgdat_init_all_done_comp); 1980 } 1981 1982 /* 1983 * Returns true if page needs to be initialized or freed to buddy allocator. 1984 * 1985 * We check if a current MAX_ORDER block is valid by only checking the validity 1986 * of the head pfn. 1987 */ 1988 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1989 { 1990 if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) 1991 return false; 1992 return true; 1993 } 1994 1995 /* 1996 * Free pages to buddy allocator. Try to free aligned pages in 1997 * MAX_ORDER_NR_PAGES sizes. 1998 */ 1999 static void __init deferred_free_pages(unsigned long pfn, 2000 unsigned long end_pfn) 2001 { 2002 unsigned long nr_free = 0; 2003 2004 for (; pfn < end_pfn; pfn++) { 2005 if (!deferred_pfn_valid(pfn)) { 2006 deferred_free_range(pfn - nr_free, nr_free); 2007 nr_free = 0; 2008 } else if (IS_MAX_ORDER_ALIGNED(pfn)) { 2009 deferred_free_range(pfn - nr_free, nr_free); 2010 nr_free = 1; 2011 } else { 2012 nr_free++; 2013 } 2014 } 2015 /* Free the last block of pages to allocator */ 2016 deferred_free_range(pfn - nr_free, nr_free); 2017 } 2018 2019 /* 2020 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 2021 * by performing it only once every MAX_ORDER_NR_PAGES. 2022 * Return number of pages initialized. 2023 */ 2024 static unsigned long __init deferred_init_pages(struct zone *zone, 2025 unsigned long pfn, 2026 unsigned long end_pfn) 2027 { 2028 int nid = zone_to_nid(zone); 2029 unsigned long nr_pages = 0; 2030 int zid = zone_idx(zone); 2031 struct page *page = NULL; 2032 2033 for (; pfn < end_pfn; pfn++) { 2034 if (!deferred_pfn_valid(pfn)) { 2035 page = NULL; 2036 continue; 2037 } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { 2038 page = pfn_to_page(pfn); 2039 } else { 2040 page++; 2041 } 2042 __init_single_page(page, pfn, zid, nid); 2043 nr_pages++; 2044 } 2045 return (nr_pages); 2046 } 2047 2048 /* 2049 * This function is meant to pre-load the iterator for the zone init. 2050 * Specifically it walks through the ranges until we are caught up to the 2051 * first_init_pfn value and exits there. If we never encounter the value we 2052 * return false indicating there are no valid ranges left. 2053 */ 2054 static bool __init 2055 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 2056 unsigned long *spfn, unsigned long *epfn, 2057 unsigned long first_init_pfn) 2058 { 2059 u64 j; 2060 2061 /* 2062 * Start out by walking through the ranges in this zone that have 2063 * already been initialized. We don't need to do anything with them 2064 * so we just need to flush them out of the system. 2065 */ 2066 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 2067 if (*epfn <= first_init_pfn) 2068 continue; 2069 if (*spfn < first_init_pfn) 2070 *spfn = first_init_pfn; 2071 *i = j; 2072 return true; 2073 } 2074 2075 return false; 2076 } 2077 2078 /* 2079 * Initialize and free pages. We do it in two loops: first we initialize 2080 * struct page, then free to buddy allocator, because while we are 2081 * freeing pages we can access pages that are ahead (computing buddy 2082 * page in __free_one_page()). 2083 * 2084 * In order to try and keep some memory in the cache we have the loop 2085 * broken along max page order boundaries. This way we will not cause 2086 * any issues with the buddy page computation. 2087 */ 2088 static unsigned long __init 2089 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 2090 unsigned long *end_pfn) 2091 { 2092 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 2093 unsigned long spfn = *start_pfn, epfn = *end_pfn; 2094 unsigned long nr_pages = 0; 2095 u64 j = *i; 2096 2097 /* First we loop through and initialize the page values */ 2098 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 2099 unsigned long t; 2100 2101 if (mo_pfn <= *start_pfn) 2102 break; 2103 2104 t = min(mo_pfn, *end_pfn); 2105 nr_pages += deferred_init_pages(zone, *start_pfn, t); 2106 2107 if (mo_pfn < *end_pfn) { 2108 *start_pfn = mo_pfn; 2109 break; 2110 } 2111 } 2112 2113 /* Reset values and now loop through freeing pages as needed */ 2114 swap(j, *i); 2115 2116 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 2117 unsigned long t; 2118 2119 if (mo_pfn <= spfn) 2120 break; 2121 2122 t = min(mo_pfn, epfn); 2123 deferred_free_pages(spfn, t); 2124 2125 if (mo_pfn <= epfn) 2126 break; 2127 } 2128 2129 return nr_pages; 2130 } 2131 2132 static void __init 2133 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2134 void *arg) 2135 { 2136 unsigned long spfn, epfn; 2137 struct zone *zone = arg; 2138 u64 i; 2139 2140 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2141 2142 /* 2143 * Initialize and free pages in MAX_ORDER sized increments so that we 2144 * can avoid introducing any issues with the buddy allocator. 2145 */ 2146 while (spfn < end_pfn) { 2147 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2148 cond_resched(); 2149 } 2150 } 2151 2152 /* An arch may override for more concurrency. */ 2153 __weak int __init 2154 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2155 { 2156 return 1; 2157 } 2158 2159 /* Initialise remaining memory on a node */ 2160 static int __init deferred_init_memmap(void *data) 2161 { 2162 pg_data_t *pgdat = data; 2163 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2164 unsigned long spfn = 0, epfn = 0; 2165 unsigned long first_init_pfn, flags; 2166 unsigned long start = jiffies; 2167 struct zone *zone; 2168 int zid, max_threads; 2169 u64 i; 2170 2171 /* Bind memory initialisation thread to a local node if possible */ 2172 if (!cpumask_empty(cpumask)) 2173 set_cpus_allowed_ptr(current, cpumask); 2174 2175 pgdat_resize_lock(pgdat, &flags); 2176 first_init_pfn = pgdat->first_deferred_pfn; 2177 if (first_init_pfn == ULONG_MAX) { 2178 pgdat_resize_unlock(pgdat, &flags); 2179 pgdat_init_report_one_done(); 2180 return 0; 2181 } 2182 2183 /* Sanity check boundaries */ 2184 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2185 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2186 pgdat->first_deferred_pfn = ULONG_MAX; 2187 2188 /* 2189 * Once we unlock here, the zone cannot be grown anymore, thus if an 2190 * interrupt thread must allocate this early in boot, zone must be 2191 * pre-grown prior to start of deferred page initialization. 2192 */ 2193 pgdat_resize_unlock(pgdat, &flags); 2194 2195 /* Only the highest zone is deferred so find it */ 2196 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2197 zone = pgdat->node_zones + zid; 2198 if (first_init_pfn < zone_end_pfn(zone)) 2199 break; 2200 } 2201 2202 /* If the zone is empty somebody else may have cleared out the zone */ 2203 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2204 first_init_pfn)) 2205 goto zone_empty; 2206 2207 max_threads = deferred_page_init_max_threads(cpumask); 2208 2209 while (spfn < epfn) { 2210 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2211 struct padata_mt_job job = { 2212 .thread_fn = deferred_init_memmap_chunk, 2213 .fn_arg = zone, 2214 .start = spfn, 2215 .size = epfn_align - spfn, 2216 .align = PAGES_PER_SECTION, 2217 .min_chunk = PAGES_PER_SECTION, 2218 .max_threads = max_threads, 2219 }; 2220 2221 padata_do_multithreaded(&job); 2222 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2223 epfn_align); 2224 } 2225 zone_empty: 2226 /* Sanity check that the next zone really is unpopulated */ 2227 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2228 2229 pr_info("node %d deferred pages initialised in %ums\n", 2230 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2231 2232 pgdat_init_report_one_done(); 2233 return 0; 2234 } 2235 2236 /* 2237 * If this zone has deferred pages, try to grow it by initializing enough 2238 * deferred pages to satisfy the allocation specified by order, rounded up to 2239 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2240 * of SECTION_SIZE bytes by initializing struct pages in increments of 2241 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2242 * 2243 * Return true when zone was grown, otherwise return false. We return true even 2244 * when we grow less than requested, to let the caller decide if there are 2245 * enough pages to satisfy the allocation. 2246 * 2247 * Note: We use noinline because this function is needed only during boot, and 2248 * it is called from a __ref function _deferred_grow_zone. This way we are 2249 * making sure that it is not inlined into permanent text section. 2250 */ 2251 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) 2252 { 2253 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2254 pg_data_t *pgdat = zone->zone_pgdat; 2255 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2256 unsigned long spfn, epfn, flags; 2257 unsigned long nr_pages = 0; 2258 u64 i; 2259 2260 /* Only the last zone may have deferred pages */ 2261 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2262 return false; 2263 2264 pgdat_resize_lock(pgdat, &flags); 2265 2266 /* 2267 * If someone grew this zone while we were waiting for spinlock, return 2268 * true, as there might be enough pages already. 2269 */ 2270 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2271 pgdat_resize_unlock(pgdat, &flags); 2272 return true; 2273 } 2274 2275 /* If the zone is empty somebody else may have cleared out the zone */ 2276 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2277 first_deferred_pfn)) { 2278 pgdat->first_deferred_pfn = ULONG_MAX; 2279 pgdat_resize_unlock(pgdat, &flags); 2280 /* Retry only once. */ 2281 return first_deferred_pfn != ULONG_MAX; 2282 } 2283 2284 /* 2285 * Initialize and free pages in MAX_ORDER sized increments so 2286 * that we can avoid introducing any issues with the buddy 2287 * allocator. 2288 */ 2289 while (spfn < epfn) { 2290 /* update our first deferred PFN for this section */ 2291 first_deferred_pfn = spfn; 2292 2293 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2294 touch_nmi_watchdog(); 2295 2296 /* We should only stop along section boundaries */ 2297 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2298 continue; 2299 2300 /* If our quota has been met we can stop here */ 2301 if (nr_pages >= nr_pages_needed) 2302 break; 2303 } 2304 2305 pgdat->first_deferred_pfn = spfn; 2306 pgdat_resize_unlock(pgdat, &flags); 2307 2308 return nr_pages > 0; 2309 } 2310 2311 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2312 2313 #ifdef CONFIG_CMA 2314 void __init init_cma_reserved_pageblock(struct page *page) 2315 { 2316 unsigned i = pageblock_nr_pages; 2317 struct page *p = page; 2318 2319 do { 2320 __ClearPageReserved(p); 2321 set_page_count(p, 0); 2322 } while (++p, --i); 2323 2324 set_pageblock_migratetype(page, MIGRATE_CMA); 2325 set_page_refcounted(page); 2326 __free_pages(page, pageblock_order); 2327 2328 adjust_managed_page_count(page, pageblock_nr_pages); 2329 page_zone(page)->cma_pages += pageblock_nr_pages; 2330 } 2331 #endif 2332 2333 void set_zone_contiguous(struct zone *zone) 2334 { 2335 unsigned long block_start_pfn = zone->zone_start_pfn; 2336 unsigned long block_end_pfn; 2337 2338 block_end_pfn = pageblock_end_pfn(block_start_pfn); 2339 for (; block_start_pfn < zone_end_pfn(zone); 2340 block_start_pfn = block_end_pfn, 2341 block_end_pfn += pageblock_nr_pages) { 2342 2343 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 2344 2345 if (!__pageblock_pfn_to_page(block_start_pfn, 2346 block_end_pfn, zone)) 2347 return; 2348 cond_resched(); 2349 } 2350 2351 /* We confirm that there is no hole */ 2352 zone->contiguous = true; 2353 } 2354 2355 void __init page_alloc_init_late(void) 2356 { 2357 struct zone *zone; 2358 int nid; 2359 2360 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2361 2362 /* There will be num_node_state(N_MEMORY) threads */ 2363 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2364 for_each_node_state(nid, N_MEMORY) { 2365 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2366 } 2367 2368 /* Block until all are initialised */ 2369 wait_for_completion(&pgdat_init_all_done_comp); 2370 2371 /* 2372 * We initialized the rest of the deferred pages. Permanently disable 2373 * on-demand struct page initialization. 2374 */ 2375 static_branch_disable(&deferred_pages); 2376 2377 /* Reinit limits that are based on free pages after the kernel is up */ 2378 files_maxfiles_init(); 2379 #endif 2380 2381 buffer_init(); 2382 2383 /* Discard memblock private memory */ 2384 memblock_discard(); 2385 2386 for_each_node_state(nid, N_MEMORY) 2387 shuffle_free_memory(NODE_DATA(nid)); 2388 2389 for_each_populated_zone(zone) 2390 set_zone_contiguous(zone); 2391 2392 /* Initialize page ext after all struct pages are initialized. */ 2393 if (deferred_struct_pages) 2394 page_ext_init(); 2395 2396 page_alloc_sysctl_init(); 2397 } 2398 2399 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2400 /* 2401 * Returns the number of pages that arch has reserved but 2402 * is not known to alloc_large_system_hash(). 2403 */ 2404 static unsigned long __init arch_reserved_kernel_pages(void) 2405 { 2406 return 0; 2407 } 2408 #endif 2409 2410 /* 2411 * Adaptive scale is meant to reduce sizes of hash tables on large memory 2412 * machines. As memory size is increased the scale is also increased but at 2413 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 2414 * quadruples the scale is increased by one, which means the size of hash table 2415 * only doubles, instead of quadrupling as well. 2416 * Because 32-bit systems cannot have large physical memory, where this scaling 2417 * makes sense, it is disabled on such platforms. 2418 */ 2419 #if __BITS_PER_LONG > 32 2420 #define ADAPT_SCALE_BASE (64ul << 30) 2421 #define ADAPT_SCALE_SHIFT 2 2422 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 2423 #endif 2424 2425 /* 2426 * allocate a large system hash table from bootmem 2427 * - it is assumed that the hash table must contain an exact power-of-2 2428 * quantity of entries 2429 * - limit is the number of hash buckets, not the total allocation size 2430 */ 2431 void *__init alloc_large_system_hash(const char *tablename, 2432 unsigned long bucketsize, 2433 unsigned long numentries, 2434 int scale, 2435 int flags, 2436 unsigned int *_hash_shift, 2437 unsigned int *_hash_mask, 2438 unsigned long low_limit, 2439 unsigned long high_limit) 2440 { 2441 unsigned long long max = high_limit; 2442 unsigned long log2qty, size; 2443 void *table; 2444 gfp_t gfp_flags; 2445 bool virt; 2446 bool huge; 2447 2448 /* allow the kernel cmdline to have a say */ 2449 if (!numentries) { 2450 /* round applicable memory size up to nearest megabyte */ 2451 numentries = nr_kernel_pages; 2452 numentries -= arch_reserved_kernel_pages(); 2453 2454 /* It isn't necessary when PAGE_SIZE >= 1MB */ 2455 if (PAGE_SIZE < SZ_1M) 2456 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 2457 2458 #if __BITS_PER_LONG > 32 2459 if (!high_limit) { 2460 unsigned long adapt; 2461 2462 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 2463 adapt <<= ADAPT_SCALE_SHIFT) 2464 scale++; 2465 } 2466 #endif 2467 2468 /* limit to 1 bucket per 2^scale bytes of low memory */ 2469 if (scale > PAGE_SHIFT) 2470 numentries >>= (scale - PAGE_SHIFT); 2471 else 2472 numentries <<= (PAGE_SHIFT - scale); 2473 2474 /* Make sure we've got at least a 0-order allocation.. */ 2475 if (unlikely(flags & HASH_SMALL)) { 2476 /* Makes no sense without HASH_EARLY */ 2477 WARN_ON(!(flags & HASH_EARLY)); 2478 if (!(numentries >> *_hash_shift)) { 2479 numentries = 1UL << *_hash_shift; 2480 BUG_ON(!numentries); 2481 } 2482 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 2483 numentries = PAGE_SIZE / bucketsize; 2484 } 2485 numentries = roundup_pow_of_two(numentries); 2486 2487 /* limit allocation size to 1/16 total memory by default */ 2488 if (max == 0) { 2489 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2490 do_div(max, bucketsize); 2491 } 2492 max = min(max, 0x80000000ULL); 2493 2494 if (numentries < low_limit) 2495 numentries = low_limit; 2496 if (numentries > max) 2497 numentries = max; 2498 2499 log2qty = ilog2(numentries); 2500 2501 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 2502 do { 2503 virt = false; 2504 size = bucketsize << log2qty; 2505 if (flags & HASH_EARLY) { 2506 if (flags & HASH_ZERO) 2507 table = memblock_alloc(size, SMP_CACHE_BYTES); 2508 else 2509 table = memblock_alloc_raw(size, 2510 SMP_CACHE_BYTES); 2511 } else if (get_order(size) > MAX_ORDER || hashdist) { 2512 table = vmalloc_huge(size, gfp_flags); 2513 virt = true; 2514 if (table) 2515 huge = is_vm_area_hugepages(table); 2516 } else { 2517 /* 2518 * If bucketsize is not a power-of-two, we may free 2519 * some pages at the end of hash table which 2520 * alloc_pages_exact() automatically does 2521 */ 2522 table = alloc_pages_exact(size, gfp_flags); 2523 kmemleak_alloc(table, size, 1, gfp_flags); 2524 } 2525 } while (!table && size > PAGE_SIZE && --log2qty); 2526 2527 if (!table) 2528 panic("Failed to allocate %s hash table\n", tablename); 2529 2530 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 2531 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 2532 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 2533 2534 if (_hash_shift) 2535 *_hash_shift = log2qty; 2536 if (_hash_mask) 2537 *_hash_mask = (1 << log2qty) - 1; 2538 2539 return table; 2540 } 2541 2542 /** 2543 * set_dma_reserve - set the specified number of pages reserved in the first zone 2544 * @new_dma_reserve: The number of pages to mark reserved 2545 * 2546 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 2547 * In the DMA zone, a significant percentage may be consumed by kernel image 2548 * and other unfreeable allocations which can skew the watermarks badly. This 2549 * function may optionally be used to account for unfreeable pages in the 2550 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 2551 * smaller per-cpu batchsize. 2552 */ 2553 void __init set_dma_reserve(unsigned long new_dma_reserve) 2554 { 2555 dma_reserve = new_dma_reserve; 2556 } 2557 2558 void __init memblock_free_pages(struct page *page, unsigned long pfn, 2559 unsigned int order) 2560 { 2561 if (!early_page_initialised(pfn)) 2562 return; 2563 if (!kmsan_memblock_free_pages(page, order)) { 2564 /* KMSAN will take care of these pages. */ 2565 return; 2566 } 2567 __free_pages_core(page, order); 2568 } 2569 2570 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2571 EXPORT_SYMBOL(init_on_alloc); 2572 2573 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2574 EXPORT_SYMBOL(init_on_free); 2575 2576 static bool _init_on_alloc_enabled_early __read_mostly 2577 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2578 static int __init early_init_on_alloc(char *buf) 2579 { 2580 2581 return kstrtobool(buf, &_init_on_alloc_enabled_early); 2582 } 2583 early_param("init_on_alloc", early_init_on_alloc); 2584 2585 static bool _init_on_free_enabled_early __read_mostly 2586 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 2587 static int __init early_init_on_free(char *buf) 2588 { 2589 return kstrtobool(buf, &_init_on_free_enabled_early); 2590 } 2591 early_param("init_on_free", early_init_on_free); 2592 2593 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2594 2595 /* 2596 * Enable static keys related to various memory debugging and hardening options. 2597 * Some override others, and depend on early params that are evaluated in the 2598 * order of appearance. So we need to first gather the full picture of what was 2599 * enabled, and then make decisions. 2600 */ 2601 static void __init mem_debugging_and_hardening_init(void) 2602 { 2603 bool page_poisoning_requested = false; 2604 bool want_check_pages = false; 2605 2606 #ifdef CONFIG_PAGE_POISONING 2607 /* 2608 * Page poisoning is debug page alloc for some arches. If 2609 * either of those options are enabled, enable poisoning. 2610 */ 2611 if (page_poisoning_enabled() || 2612 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 2613 debug_pagealloc_enabled())) { 2614 static_branch_enable(&_page_poisoning_enabled); 2615 page_poisoning_requested = true; 2616 want_check_pages = true; 2617 } 2618 #endif 2619 2620 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2621 page_poisoning_requested) { 2622 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2623 "will take precedence over init_on_alloc and init_on_free\n"); 2624 _init_on_alloc_enabled_early = false; 2625 _init_on_free_enabled_early = false; 2626 } 2627 2628 if (_init_on_alloc_enabled_early) { 2629 want_check_pages = true; 2630 static_branch_enable(&init_on_alloc); 2631 } else { 2632 static_branch_disable(&init_on_alloc); 2633 } 2634 2635 if (_init_on_free_enabled_early) { 2636 want_check_pages = true; 2637 static_branch_enable(&init_on_free); 2638 } else { 2639 static_branch_disable(&init_on_free); 2640 } 2641 2642 if (IS_ENABLED(CONFIG_KMSAN) && 2643 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2644 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2645 2646 #ifdef CONFIG_DEBUG_PAGEALLOC 2647 if (debug_pagealloc_enabled()) { 2648 want_check_pages = true; 2649 static_branch_enable(&_debug_pagealloc_enabled); 2650 2651 if (debug_guardpage_minorder()) 2652 static_branch_enable(&_debug_guardpage_enabled); 2653 } 2654 #endif 2655 2656 /* 2657 * Any page debugging or hardening option also enables sanity checking 2658 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's 2659 * enabled already. 2660 */ 2661 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) 2662 static_branch_enable(&check_pages_enabled); 2663 } 2664 2665 /* Report memory auto-initialization states for this boot. */ 2666 static void __init report_meminit(void) 2667 { 2668 const char *stack; 2669 2670 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) 2671 stack = "all(pattern)"; 2672 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) 2673 stack = "all(zero)"; 2674 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) 2675 stack = "byref_all(zero)"; 2676 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) 2677 stack = "byref(zero)"; 2678 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) 2679 stack = "__user(zero)"; 2680 else 2681 stack = "off"; 2682 2683 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2684 stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", 2685 want_init_on_free() ? "on" : "off"); 2686 if (want_init_on_free()) 2687 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2688 } 2689 2690 static void __init mem_init_print_info(void) 2691 { 2692 unsigned long physpages, codesize, datasize, rosize, bss_size; 2693 unsigned long init_code_size, init_data_size; 2694 2695 physpages = get_num_physpages(); 2696 codesize = _etext - _stext; 2697 datasize = _edata - _sdata; 2698 rosize = __end_rodata - __start_rodata; 2699 bss_size = __bss_stop - __bss_start; 2700 init_data_size = __init_end - __init_begin; 2701 init_code_size = _einittext - _sinittext; 2702 2703 /* 2704 * Detect special cases and adjust section sizes accordingly: 2705 * 1) .init.* may be embedded into .data sections 2706 * 2) .init.text.* may be out of [__init_begin, __init_end], 2707 * please refer to arch/tile/kernel/vmlinux.lds.S. 2708 * 3) .rodata.* may be embedded into .text or .data sections. 2709 */ 2710 #define adj_init_size(start, end, size, pos, adj) \ 2711 do { \ 2712 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 2713 size -= adj; \ 2714 } while (0) 2715 2716 adj_init_size(__init_begin, __init_end, init_data_size, 2717 _sinittext, init_code_size); 2718 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 2719 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 2720 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 2721 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 2722 2723 #undef adj_init_size 2724 2725 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 2726 #ifdef CONFIG_HIGHMEM 2727 ", %luK highmem" 2728 #endif 2729 ")\n", 2730 K(nr_free_pages()), K(physpages), 2731 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 2732 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 2733 K(physpages - totalram_pages() - totalcma_pages), 2734 K(totalcma_pages) 2735 #ifdef CONFIG_HIGHMEM 2736 , K(totalhigh_pages()) 2737 #endif 2738 ); 2739 } 2740 2741 /* 2742 * Set up kernel memory allocators 2743 */ 2744 void __init mm_core_init(void) 2745 { 2746 /* Initializations relying on SMP setup */ 2747 build_all_zonelists(NULL); 2748 page_alloc_init_cpuhp(); 2749 2750 /* 2751 * page_ext requires contiguous pages, 2752 * bigger than MAX_ORDER unless SPARSEMEM. 2753 */ 2754 page_ext_init_flatmem(); 2755 mem_debugging_and_hardening_init(); 2756 kfence_alloc_pool(); 2757 report_meminit(); 2758 kmsan_init_shadow(); 2759 stack_depot_early_init(); 2760 mem_init(); 2761 mem_init_print_info(); 2762 kmem_cache_init(); 2763 /* 2764 * page_owner must be initialized after buddy is ready, and also after 2765 * slab is ready so that stack_depot_init() works properly 2766 */ 2767 page_ext_init_flatmem_late(); 2768 kmemleak_init(); 2769 ptlock_cache_init(); 2770 pgtable_cache_init(); 2771 debug_objects_mem_init(); 2772 vmalloc_init(); 2773 /* If no deferred init page_ext now, as vmap is fully initialized */ 2774 if (!deferred_struct_pages) 2775 page_ext_init(); 2776 /* Should be run before the first non-init thread is created */ 2777 init_espfix_bsp(); 2778 /* Should be run after espfix64 is set up. */ 2779 pti_init(); 2780 kmsan_init_runtime(); 2781 mm_cache_init(); 2782 } 2783