1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm_init.c - Memory initialisation verification and debugging
4 *
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
7 *
8 */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/stackdepot.h>
28 #include <linux/swap.h>
29 #include <linux/cma.h>
30 #include <linux/crash_dump.h>
31 #include <linux/execmem.h>
32 #include <linux/vmstat.h>
33 #include <linux/kexec_handover.h>
34 #include <linux/hugetlb.h>
35 #include "internal.h"
36 #include "slab.h"
37 #include "shuffle.h"
38
39 #include <asm/setup.h>
40
41 #ifndef CONFIG_NUMA
42 unsigned long max_mapnr;
43 EXPORT_SYMBOL(max_mapnr);
44
45 struct page *mem_map;
46 EXPORT_SYMBOL(mem_map);
47 #endif
48
49 /*
50 * high_memory defines the upper bound on direct map memory, then end
51 * of ZONE_NORMAL.
52 */
53 void *high_memory;
54 EXPORT_SYMBOL(high_memory);
55
56 unsigned long zero_page_pfn __ro_after_init;
57 EXPORT_SYMBOL(zero_page_pfn);
58
59 #ifndef __HAVE_COLOR_ZERO_PAGE
60 uint8_t empty_zero_page[PAGE_SIZE] __page_aligned_bss;
61 EXPORT_SYMBOL(empty_zero_page);
62
63 struct page *__zero_page __ro_after_init;
64 EXPORT_SYMBOL(__zero_page);
65 #endif /* __HAVE_COLOR_ZERO_PAGE */
66
67 #ifdef CONFIG_DEBUG_MEMORY_INIT
68 int __meminitdata mminit_loglevel;
69
70 /* The zonelists are simply reported, validation is manual. */
mminit_verify_zonelist(void)71 void __init mminit_verify_zonelist(void)
72 {
73 int nid;
74
75 if (mminit_loglevel < MMINIT_VERIFY)
76 return;
77
78 for_each_online_node(nid) {
79 pg_data_t *pgdat = NODE_DATA(nid);
80 struct zone *zone;
81 struct zoneref *z;
82 struct zonelist *zonelist;
83 int i, listid, zoneid;
84
85 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
86
87 /* Identify the zone and nodelist */
88 zoneid = i % MAX_NR_ZONES;
89 listid = i / MAX_NR_ZONES;
90 zonelist = &pgdat->node_zonelists[listid];
91 zone = &pgdat->node_zones[zoneid];
92 if (!populated_zone(zone))
93 continue;
94
95 /* Print information about the zonelist */
96 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
97 listid > 0 ? "thisnode" : "general", nid,
98 zone->name);
99
100 /* Iterate the zonelist */
101 for_each_zone_zonelist(zone, z, zonelist, zoneid)
102 pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
103 pr_cont("\n");
104 }
105 }
106 }
107
mminit_verify_pageflags_layout(void)108 void __init mminit_verify_pageflags_layout(void)
109 {
110 int shift, width;
111 unsigned long or_mask, add_mask;
112
113 shift = BITS_PER_LONG;
114 width = shift - NR_NON_PAGEFLAG_BITS;
115 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
116 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
117 SECTIONS_WIDTH,
118 NODES_WIDTH,
119 ZONES_WIDTH,
120 LAST_CPUPID_WIDTH,
121 KASAN_TAG_WIDTH,
122 LRU_GEN_WIDTH,
123 LRU_REFS_WIDTH,
124 NR_PAGEFLAGS);
125 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
126 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
127 SECTIONS_SHIFT,
128 NODES_SHIFT,
129 ZONES_SHIFT,
130 LAST_CPUPID_SHIFT,
131 KASAN_TAG_WIDTH);
132 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
133 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
134 (unsigned long)SECTIONS_PGSHIFT,
135 (unsigned long)NODES_PGSHIFT,
136 (unsigned long)ZONES_PGSHIFT,
137 (unsigned long)LAST_CPUPID_PGSHIFT,
138 (unsigned long)KASAN_TAG_PGSHIFT);
139 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
140 "Node/Zone ID: %lu -> %lu\n",
141 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
142 (unsigned long)ZONEID_PGOFF);
143 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
144 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
145 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
146 #ifdef NODE_NOT_IN_PAGE_FLAGS
147 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
148 "Node not in page flags");
149 #endif
150 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
151 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
152 "Last cpupid not in page flags");
153 #endif
154
155 if (SECTIONS_WIDTH) {
156 shift -= SECTIONS_WIDTH;
157 BUG_ON(shift != SECTIONS_PGSHIFT);
158 }
159 if (NODES_WIDTH) {
160 shift -= NODES_WIDTH;
161 BUG_ON(shift != NODES_PGSHIFT);
162 }
163 if (ZONES_WIDTH) {
164 shift -= ZONES_WIDTH;
165 BUG_ON(shift != ZONES_PGSHIFT);
166 }
167
168 /* Check for bitmask overlaps */
169 or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
170 (NODES_MASK << NODES_PGSHIFT) |
171 (SECTIONS_MASK << SECTIONS_PGSHIFT);
172 add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
173 (NODES_MASK << NODES_PGSHIFT) +
174 (SECTIONS_MASK << SECTIONS_PGSHIFT);
175 BUG_ON(or_mask != add_mask);
176 }
177
set_mminit_loglevel(char * str)178 static __init int set_mminit_loglevel(char *str)
179 {
180 get_option(&str, &mminit_loglevel);
181 return 0;
182 }
183 early_param("mminit_loglevel", set_mminit_loglevel);
184 #endif /* CONFIG_DEBUG_MEMORY_INIT */
185
186 struct kobject *mm_kobj;
187
188 #ifdef CONFIG_SMP
189 s32 vm_committed_as_batch = 32;
190
mm_compute_batch(int overcommit_policy)191 void mm_compute_batch(int overcommit_policy)
192 {
193 u64 memsized_batch;
194 s32 nr = num_present_cpus();
195 s32 batch = max_t(s32, nr*2, 32);
196 unsigned long ram_pages = totalram_pages();
197
198 /*
199 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
200 * (total memory/#cpus), and lift it to 25% for other policies
201 * to ease the possible lock contention for percpu_counter
202 * vm_committed_as, while the max limit is INT_MAX
203 */
204 if (overcommit_policy == OVERCOMMIT_NEVER)
205 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
206 else
207 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
208
209 vm_committed_as_batch = max_t(s32, memsized_batch, batch);
210 }
211
mm_compute_batch_notifier(struct notifier_block * self,unsigned long action,void * arg)212 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
213 unsigned long action, void *arg)
214 {
215 switch (action) {
216 case MEM_ONLINE:
217 case MEM_OFFLINE:
218 mm_compute_batch(sysctl_overcommit_memory);
219 break;
220 default:
221 break;
222 }
223 return NOTIFY_OK;
224 }
225
mm_compute_batch_init(void)226 static int __init mm_compute_batch_init(void)
227 {
228 mm_compute_batch(sysctl_overcommit_memory);
229 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
230 return 0;
231 }
232
233 __initcall(mm_compute_batch_init);
234
235 #endif
236
mm_sysfs_init(void)237 static int __init mm_sysfs_init(void)
238 {
239 mm_kobj = kobject_create_and_add("mm", kernel_kobj);
240 if (!mm_kobj)
241 return -ENOMEM;
242
243 return 0;
244 }
245 postcore_initcall(mm_sysfs_init);
246
247 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
248 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
249 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
250
251 static unsigned long required_kernelcore __initdata;
252 static unsigned long required_kernelcore_percent __initdata;
253 static unsigned long required_movablecore __initdata;
254 static unsigned long required_movablecore_percent __initdata;
255
256 static unsigned long nr_kernel_pages __initdata;
257 static unsigned long nr_all_pages __initdata;
258
259 static bool deferred_struct_pages __meminitdata;
260
261 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
262
cmdline_parse_core(char * p,unsigned long * core,unsigned long * percent)263 static int __init cmdline_parse_core(char *p, unsigned long *core,
264 unsigned long *percent)
265 {
266 unsigned long long coremem;
267 char *endptr;
268
269 if (!p)
270 return -EINVAL;
271
272 /* Value may be a percentage of total memory, otherwise bytes */
273 coremem = simple_strtoull(p, &endptr, 0);
274 if (*endptr == '%') {
275 /* Paranoid check for percent values greater than 100 */
276 WARN_ON(coremem > 100);
277
278 *percent = coremem;
279 } else {
280 coremem = memparse(p, &p);
281 /* Paranoid check that UL is enough for the coremem value */
282 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
283
284 *core = coremem >> PAGE_SHIFT;
285 *percent = 0UL;
286 }
287 return 0;
288 }
289
290 bool mirrored_kernelcore __initdata_memblock;
291
292 /*
293 * kernelcore=size sets the amount of memory for use for allocations that
294 * cannot be reclaimed or migrated.
295 */
cmdline_parse_kernelcore(char * p)296 static int __init cmdline_parse_kernelcore(char *p)
297 {
298 /* parse kernelcore=mirror */
299 if (parse_option_str(p, "mirror")) {
300 mirrored_kernelcore = true;
301 return 0;
302 }
303
304 return cmdline_parse_core(p, &required_kernelcore,
305 &required_kernelcore_percent);
306 }
307 early_param("kernelcore", cmdline_parse_kernelcore);
308
309 /*
310 * movablecore=size sets the amount of memory for use for allocations that
311 * can be reclaimed or migrated.
312 */
cmdline_parse_movablecore(char * p)313 static int __init cmdline_parse_movablecore(char *p)
314 {
315 return cmdline_parse_core(p, &required_movablecore,
316 &required_movablecore_percent);
317 }
318 early_param("movablecore", cmdline_parse_movablecore);
319
320 /*
321 * early_calculate_totalpages()
322 * Sum pages in active regions for movable zone.
323 * Populate N_MEMORY for calculating usable_nodes.
324 */
early_calculate_totalpages(void)325 static unsigned long __init early_calculate_totalpages(void)
326 {
327 unsigned long totalpages = 0;
328 unsigned long start_pfn, end_pfn;
329 int i, nid;
330
331 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
332 unsigned long pages = end_pfn - start_pfn;
333
334 totalpages += pages;
335 if (pages)
336 node_set_state(nid, N_MEMORY);
337 }
338 return totalpages;
339 }
340
341 /*
342 * This finds a zone that can be used for ZONE_MOVABLE pages. The
343 * assumption is made that zones within a node are ordered in monotonic
344 * increasing memory addresses so that the "highest" populated zone is used
345 */
find_usable_zone_for_movable(void)346 static void __init find_usable_zone_for_movable(void)
347 {
348 int zone_index;
349 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
350 if (zone_index == ZONE_MOVABLE)
351 continue;
352
353 if (arch_zone_highest_possible_pfn[zone_index] >
354 arch_zone_lowest_possible_pfn[zone_index])
355 break;
356 }
357
358 VM_BUG_ON(zone_index == -1);
359 movable_zone = zone_index;
360 }
361
362 /*
363 * Find the PFN the Movable zone begins in each node. Kernel memory
364 * is spread evenly between nodes as long as the nodes have enough
365 * memory. When they don't, some nodes will have more kernelcore than
366 * others
367 */
find_zone_movable_pfns_for_nodes(void)368 static void __init find_zone_movable_pfns_for_nodes(void)
369 {
370 int i, nid;
371 unsigned long usable_startpfn;
372 unsigned long kernelcore_node, kernelcore_remaining;
373 /* save the state before borrow the nodemask */
374 nodemask_t saved_node_state = node_states[N_MEMORY];
375 unsigned long totalpages = early_calculate_totalpages();
376 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
377 struct memblock_region *r;
378
379 /* Need to find movable_zone earlier when movable_node is specified. */
380 find_usable_zone_for_movable();
381
382 /*
383 * If movable_node is specified, ignore kernelcore and movablecore
384 * options.
385 */
386 if (movable_node_is_enabled()) {
387 for_each_mem_region(r) {
388 if (!memblock_is_hotpluggable(r))
389 continue;
390
391 nid = memblock_get_region_node(r);
392
393 usable_startpfn = memblock_region_memory_base_pfn(r);
394 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
395 min(usable_startpfn, zone_movable_pfn[nid]) :
396 usable_startpfn;
397 }
398
399 goto out2;
400 }
401
402 /*
403 * If kernelcore=mirror is specified, ignore movablecore option
404 */
405 if (mirrored_kernelcore) {
406 bool mem_below_4gb_not_mirrored = false;
407
408 if (!memblock_has_mirror()) {
409 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
410 goto out;
411 }
412
413 if (is_kdump_kernel()) {
414 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
415 goto out;
416 }
417
418 for_each_mem_region(r) {
419 if (memblock_is_mirror(r))
420 continue;
421
422 nid = memblock_get_region_node(r);
423
424 usable_startpfn = memblock_region_memory_base_pfn(r);
425
426 if (usable_startpfn < PHYS_PFN(SZ_4G)) {
427 mem_below_4gb_not_mirrored = true;
428 continue;
429 }
430
431 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
432 min(usable_startpfn, zone_movable_pfn[nid]) :
433 usable_startpfn;
434 }
435
436 if (mem_below_4gb_not_mirrored)
437 pr_warn("This configuration results in unmirrored kernel memory.\n");
438
439 goto out2;
440 }
441
442 /*
443 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
444 * amount of necessary memory.
445 */
446 if (required_kernelcore_percent)
447 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
448 10000UL;
449 if (required_movablecore_percent)
450 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
451 10000UL;
452
453 /*
454 * If movablecore= was specified, calculate what size of
455 * kernelcore that corresponds so that memory usable for
456 * any allocation type is evenly spread. If both kernelcore
457 * and movablecore are specified, then the value of kernelcore
458 * will be used for required_kernelcore if it's greater than
459 * what movablecore would have allowed.
460 */
461 if (required_movablecore) {
462 unsigned long corepages;
463
464 /*
465 * Round-up so that ZONE_MOVABLE is at least as large as what
466 * was requested by the user
467 */
468 required_movablecore =
469 round_up(required_movablecore, MAX_ORDER_NR_PAGES);
470 required_movablecore = min(totalpages, required_movablecore);
471 corepages = totalpages - required_movablecore;
472
473 required_kernelcore = max(required_kernelcore, corepages);
474 }
475
476 /*
477 * If kernelcore was not specified or kernelcore size is larger
478 * than totalpages, there is no ZONE_MOVABLE.
479 */
480 if (!required_kernelcore || required_kernelcore >= totalpages)
481 goto out;
482
483 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
484 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
485
486 restart:
487 /* Spread kernelcore memory as evenly as possible throughout nodes */
488 kernelcore_node = required_kernelcore / usable_nodes;
489 for_each_node_state(nid, N_MEMORY) {
490 unsigned long start_pfn, end_pfn;
491
492 /*
493 * Recalculate kernelcore_node if the division per node
494 * now exceeds what is necessary to satisfy the requested
495 * amount of memory for the kernel
496 */
497 if (required_kernelcore < kernelcore_node)
498 kernelcore_node = required_kernelcore / usable_nodes;
499
500 /*
501 * As the map is walked, we track how much memory is usable
502 * by the kernel using kernelcore_remaining. When it is
503 * 0, the rest of the node is usable by ZONE_MOVABLE
504 */
505 kernelcore_remaining = kernelcore_node;
506
507 /* Go through each range of PFNs within this node */
508 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
509 unsigned long size_pages;
510
511 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
512 if (start_pfn >= end_pfn)
513 continue;
514
515 /* Account for what is only usable for kernelcore */
516 if (start_pfn < usable_startpfn) {
517 unsigned long kernel_pages;
518 kernel_pages = min(end_pfn, usable_startpfn)
519 - start_pfn;
520
521 kernelcore_remaining -= min(kernel_pages,
522 kernelcore_remaining);
523 required_kernelcore -= min(kernel_pages,
524 required_kernelcore);
525
526 /* Continue if range is now fully accounted */
527 if (end_pfn <= usable_startpfn) {
528
529 /*
530 * Push zone_movable_pfn to the end so
531 * that if we have to rebalance
532 * kernelcore across nodes, we will
533 * not double account here
534 */
535 zone_movable_pfn[nid] = end_pfn;
536 continue;
537 }
538 start_pfn = usable_startpfn;
539 }
540
541 /*
542 * The usable PFN range for ZONE_MOVABLE is from
543 * start_pfn->end_pfn. Calculate size_pages as the
544 * number of pages used as kernelcore
545 */
546 size_pages = end_pfn - start_pfn;
547 if (size_pages > kernelcore_remaining)
548 size_pages = kernelcore_remaining;
549 zone_movable_pfn[nid] = start_pfn + size_pages;
550
551 /*
552 * Some kernelcore has been met, update counts and
553 * break if the kernelcore for this node has been
554 * satisfied
555 */
556 required_kernelcore -= min(required_kernelcore,
557 size_pages);
558 kernelcore_remaining -= size_pages;
559 if (!kernelcore_remaining)
560 break;
561 }
562 }
563
564 /*
565 * If there is still required_kernelcore, we do another pass with one
566 * less node in the count. This will push zone_movable_pfn[nid] further
567 * along on the nodes that still have memory until kernelcore is
568 * satisfied
569 */
570 usable_nodes--;
571 if (usable_nodes && required_kernelcore > usable_nodes)
572 goto restart;
573
574 out2:
575 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
576 for_each_node_state(nid, N_MEMORY) {
577 unsigned long start_pfn, end_pfn;
578
579 zone_movable_pfn[nid] =
580 round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
581
582 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
583 if (zone_movable_pfn[nid] >= end_pfn)
584 zone_movable_pfn[nid] = 0;
585 }
586
587 out:
588 /* restore the node_state */
589 node_states[N_MEMORY] = saved_node_state;
590 }
591
__init_single_page(struct page * page,unsigned long pfn,unsigned long zone,int nid)592 void __meminit __init_single_page(struct page *page, unsigned long pfn,
593 unsigned long zone, int nid)
594 {
595 mm_zero_struct_page(page);
596 set_page_links(page, zone, nid, pfn);
597 init_page_count(page);
598 atomic_set(&page->_mapcount, -1);
599 page_cpupid_reset_last(page);
600 page_kasan_tag_reset(page);
601
602 INIT_LIST_HEAD(&page->lru);
603 #ifdef WANT_PAGE_VIRTUAL
604 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
605 if (!is_highmem_idx(zone))
606 set_page_address(page, __va(pfn << PAGE_SHIFT));
607 #endif
608 }
609
610 #ifdef CONFIG_NUMA
611 /*
612 * During memory init memblocks map pfns to nids. The search is expensive and
613 * this caches recent lookups. The implementation of __early_pfn_to_nid
614 * treats start/end as pfns.
615 */
616 struct mminit_pfnnid_cache {
617 unsigned long last_start;
618 unsigned long last_end;
619 int last_nid;
620 };
621
622 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
623
624 /*
625 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
626 */
__early_pfn_to_nid(unsigned long pfn,struct mminit_pfnnid_cache * state)627 static int __meminit __early_pfn_to_nid(unsigned long pfn,
628 struct mminit_pfnnid_cache *state)
629 {
630 unsigned long start_pfn, end_pfn;
631 int nid;
632
633 if (state->last_start <= pfn && pfn < state->last_end)
634 return state->last_nid;
635
636 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
637 if (nid != NUMA_NO_NODE) {
638 state->last_start = start_pfn;
639 state->last_end = end_pfn;
640 state->last_nid = nid;
641 }
642
643 return nid;
644 }
645
early_pfn_to_nid(unsigned long pfn)646 int __meminit early_pfn_to_nid(unsigned long pfn)
647 {
648 static DEFINE_SPINLOCK(early_pfn_lock);
649 int nid;
650
651 spin_lock(&early_pfn_lock);
652 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
653 if (nid < 0)
654 nid = first_online_node;
655 spin_unlock(&early_pfn_lock);
656
657 return nid;
658 }
659
660 bool hashdist = HASHDIST_DEFAULT;
661
set_hashdist(char * str)662 static int __init set_hashdist(char *str)
663 {
664 return kstrtobool(str, &hashdist) == 0;
665 }
666 __setup("hashdist=", set_hashdist);
667
fixup_hashdist(void)668 static inline void fixup_hashdist(void)
669 {
670 if (num_node_state(N_MEMORY) == 1)
671 hashdist = false;
672 }
673 #else
fixup_hashdist(void)674 static inline void fixup_hashdist(void) {}
675 #endif /* CONFIG_NUMA */
676
677 /*
678 * Initialize a reserved page unconditionally, finding its zone first.
679 */
__init_page_from_nid(unsigned long pfn,int nid)680 void __meminit __init_page_from_nid(unsigned long pfn, int nid)
681 {
682 pg_data_t *pgdat;
683 int zid;
684
685 pgdat = NODE_DATA(nid);
686
687 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
688 struct zone *zone = &pgdat->node_zones[zid];
689
690 if (zone_spans_pfn(zone, pfn))
691 break;
692 }
693 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
694
695 if (pageblock_aligned(pfn))
696 init_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE,
697 false);
698 }
699
700 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
pgdat_set_deferred_range(pg_data_t * pgdat)701 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
702 {
703 pgdat->first_deferred_pfn = ULONG_MAX;
704 }
705
706 /* Returns true if the struct page for the pfn is initialised */
early_page_initialised(unsigned long pfn,int nid)707 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
708 {
709 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
710 return false;
711
712 return true;
713 }
714
715 /*
716 * Returns true when the remaining initialisation should be deferred until
717 * later in the boot cycle when it can be parallelised.
718 */
719 static bool __meminit
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)720 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
721 {
722 static unsigned long prev_end_pfn, nr_initialised;
723
724 if (early_page_ext_enabled())
725 return false;
726
727 /* Always populate low zones for address-constrained allocations */
728 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
729 return false;
730
731 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
732 return true;
733
734 /*
735 * prev_end_pfn static that contains the end of previous zone
736 * No need to protect because called very early in boot before smp_init.
737 */
738 if (prev_end_pfn != end_pfn) {
739 prev_end_pfn = end_pfn;
740 nr_initialised = 0;
741 }
742
743 /*
744 * We start only with one section of pages, more pages are added as
745 * needed until the rest of deferred pages are initialized.
746 */
747 nr_initialised++;
748 if ((nr_initialised > PAGES_PER_SECTION) &&
749 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
750 NODE_DATA(nid)->first_deferred_pfn = pfn;
751 return true;
752 }
753 return false;
754 }
755
__init_deferred_page(unsigned long pfn,int nid)756 static void __meminit __init_deferred_page(unsigned long pfn, int nid)
757 {
758 if (early_page_initialised(pfn, nid))
759 return;
760
761 __init_page_from_nid(pfn, nid);
762 }
763 #else
pgdat_set_deferred_range(pg_data_t * pgdat)764 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
765
early_page_initialised(unsigned long pfn,int nid)766 static inline bool early_page_initialised(unsigned long pfn, int nid)
767 {
768 return true;
769 }
770
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)771 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
772 {
773 return false;
774 }
775
__init_deferred_page(unsigned long pfn,int nid)776 static inline void __init_deferred_page(unsigned long pfn, int nid)
777 {
778 }
779 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
780
init_deferred_page(unsigned long pfn,int nid)781 void __meminit init_deferred_page(unsigned long pfn, int nid)
782 {
783 __init_deferred_page(pfn, nid);
784 }
785
786 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
787 static bool __meminit
overlap_memmap_init(unsigned long zone,unsigned long * pfn)788 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
789 {
790 static struct memblock_region *r __meminitdata;
791
792 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
793 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
794 for_each_mem_region(r) {
795 if (*pfn < memblock_region_memory_end_pfn(r))
796 break;
797 }
798 }
799 if (*pfn >= memblock_region_memory_base_pfn(r) &&
800 memblock_is_mirror(r)) {
801 *pfn = memblock_region_memory_end_pfn(r);
802 return true;
803 }
804 }
805 return false;
806 }
807
808 /*
809 * Only struct pages that correspond to ranges defined by memblock.memory
810 * are zeroed and initialized by going through __init_single_page() during
811 * memmap_init_zone_range().
812 *
813 * But, there could be struct pages that correspond to holes in
814 * memblock.memory. This can happen because of the following reasons:
815 * - physical memory bank size is not necessarily the exact multiple of the
816 * arbitrary section size
817 * - early reserved memory may not be listed in memblock.memory
818 * - non-memory regions covered by the contiguous flatmem mapping
819 * - memory layouts defined with memmap= kernel parameter may not align
820 * nicely with memmap sections
821 *
822 * Explicitly initialize those struct pages so that:
823 * - PG_Reserved is set
824 * - zone and node links point to zone and node that span the page if the
825 * hole is in the middle of a zone
826 * - zone and node links point to adjacent zone/node if the hole falls on
827 * the zone boundary; the pages in such holes will be prepended to the
828 * zone/node above the hole except for the trailing pages in the last
829 * section that will be appended to the zone/node below.
830 */
init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node)831 static void __init init_unavailable_range(unsigned long spfn,
832 unsigned long epfn,
833 int zone, int node)
834 {
835 unsigned long pfn;
836 u64 pgcnt = 0;
837
838 for_each_valid_pfn(pfn, spfn, epfn) {
839 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
840 __SetPageReserved(pfn_to_page(pfn));
841 pgcnt++;
842 }
843
844 if (pgcnt)
845 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
846 node, zone_names[zone], pgcnt);
847 }
848
849 /*
850 * Initially all pages are reserved - free ones are freed
851 * up by memblock_free_all() once the early boot process is
852 * done. Non-atomic initialization, single-pass.
853 *
854 * All aligned pageblocks are initialized to the specified migratetype
855 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
856 * zone stats (e.g., nr_isolate_pageblock) are touched.
857 */
memmap_init_range(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,unsigned long zone_end_pfn,enum meminit_context context,struct vmem_altmap * altmap,int migratetype,bool isolate_pageblock)858 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
859 unsigned long start_pfn, unsigned long zone_end_pfn,
860 enum meminit_context context,
861 struct vmem_altmap *altmap, int migratetype,
862 bool isolate_pageblock)
863 {
864 unsigned long pfn, end_pfn = start_pfn + size;
865 struct page *page;
866
867 if (highest_memmap_pfn < end_pfn - 1)
868 highest_memmap_pfn = end_pfn - 1;
869
870 #ifdef CONFIG_ZONE_DEVICE
871 /*
872 * Honor reservation requested by the driver for this ZONE_DEVICE
873 * memory. We limit the total number of pages to initialize to just
874 * those that might contain the memory mapping. We will defer the
875 * ZONE_DEVICE page initialization until after we have released
876 * the hotplug lock.
877 */
878 if (zone == ZONE_DEVICE) {
879 if (!altmap)
880 return;
881
882 if (start_pfn == altmap->base_pfn)
883 start_pfn += altmap->reserve;
884 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
885 }
886 #endif
887
888 for (pfn = start_pfn; pfn < end_pfn; ) {
889 /*
890 * There can be holes in boot-time mem_map[]s handed to this
891 * function. They do not exist on hotplugged memory.
892 */
893 if (context == MEMINIT_EARLY) {
894 if (overlap_memmap_init(zone, &pfn))
895 continue;
896 if (defer_init(nid, pfn, zone_end_pfn)) {
897 deferred_struct_pages = true;
898 break;
899 }
900 }
901
902 page = pfn_to_page(pfn);
903 __init_single_page(page, pfn, zone, nid);
904 if (context == MEMINIT_HOTPLUG) {
905 #ifdef CONFIG_ZONE_DEVICE
906 if (zone == ZONE_DEVICE)
907 __SetPageReserved(page);
908 else
909 #endif
910 __SetPageOffline(page);
911 }
912
913 /*
914 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
915 * such that unmovable allocations won't be scattered all
916 * over the place during system boot.
917 */
918 if (pageblock_aligned(pfn)) {
919 init_pageblock_migratetype(page, migratetype,
920 isolate_pageblock);
921 cond_resched();
922 }
923 pfn++;
924 }
925 }
926
memmap_init_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,unsigned long * hole_pfn)927 static void __init memmap_init_zone_range(struct zone *zone,
928 unsigned long start_pfn,
929 unsigned long end_pfn,
930 unsigned long *hole_pfn)
931 {
932 unsigned long zone_start_pfn = zone->zone_start_pfn;
933 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
934 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
935
936 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
937 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
938
939 if (start_pfn >= end_pfn)
940 return;
941
942 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
943 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE,
944 false);
945
946 if (*hole_pfn < start_pfn)
947 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
948
949 *hole_pfn = end_pfn;
950 }
951
memmap_init(void)952 static void __init memmap_init(void)
953 {
954 unsigned long start_pfn, end_pfn;
955 unsigned long hole_pfn = 0;
956 int i, j, zone_id = 0, nid;
957
958 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
959 struct pglist_data *node = NODE_DATA(nid);
960
961 for (j = 0; j < MAX_NR_ZONES; j++) {
962 struct zone *zone = node->node_zones + j;
963
964 if (!populated_zone(zone))
965 continue;
966
967 memmap_init_zone_range(zone, start_pfn, end_pfn,
968 &hole_pfn);
969 zone_id = j;
970 }
971 }
972
973 /*
974 * Initialize the memory map for hole in the range [memory_end,
975 * section_end] for SPARSEMEM and in the range [memory_end, memmap_end]
976 * for FLATMEM.
977 * Append the pages in this hole to the highest zone in the last
978 * node.
979 */
980 #ifdef CONFIG_SPARSEMEM
981 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
982 #else
983 end_pfn = round_up(end_pfn, MAX_ORDER_NR_PAGES);
984 #endif
985 if (hole_pfn < end_pfn)
986 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
987 }
988
989 #ifdef CONFIG_ZONE_DEVICE
__init_zone_device_page(struct page * page,unsigned long pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap)990 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
991 unsigned long zone_idx, int nid,
992 struct dev_pagemap *pgmap)
993 {
994
995 __init_single_page(page, pfn, zone_idx, nid);
996
997 /*
998 * Mark page reserved as it will need to wait for onlining
999 * phase for it to be fully associated with a zone.
1000 *
1001 * We can use the non-atomic __set_bit operation for setting
1002 * the flag as we are still initializing the pages.
1003 */
1004 __SetPageReserved(page);
1005
1006 /*
1007 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
1008 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
1009 * ever freed or placed on a driver-private list.
1010 */
1011 page_folio(page)->pgmap = pgmap;
1012 page->zone_device_data = NULL;
1013
1014 /*
1015 * Mark the block movable so that blocks are reserved for
1016 * movable at startup. This will force kernel allocations
1017 * to reserve their blocks rather than leaking throughout
1018 * the address space during boot when many long-lived
1019 * kernel allocations are made.
1020 *
1021 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1022 * because this is done early in section_activate()
1023 */
1024 if (pageblock_aligned(pfn)) {
1025 init_pageblock_migratetype(page, MIGRATE_MOVABLE, false);
1026 cond_resched();
1027 }
1028
1029 /*
1030 * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released
1031 * directly to the driver page allocator which will set the page count
1032 * to 1 when allocating the page.
1033 *
1034 * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have
1035 * their refcount reset to one whenever they are freed (ie. after
1036 * their refcount drops to 0).
1037 */
1038 switch (pgmap->type) {
1039 case MEMORY_DEVICE_FS_DAX:
1040 case MEMORY_DEVICE_PRIVATE:
1041 case MEMORY_DEVICE_COHERENT:
1042 case MEMORY_DEVICE_PCI_P2PDMA:
1043 set_page_count(page, 0);
1044 break;
1045
1046 case MEMORY_DEVICE_GENERIC:
1047 break;
1048 }
1049 }
1050
1051 /*
1052 * With compound page geometry and when struct pages are stored in ram most
1053 * tail pages are reused. Consequently, the amount of unique struct pages to
1054 * initialize is a lot smaller that the total amount of struct pages being
1055 * mapped. This is a paired / mild layering violation with explicit knowledge
1056 * of how the sparse_vmemmap internals handle compound pages in the lack
1057 * of an altmap. See vmemmap_populate_compound_pages().
1058 */
compound_nr_pages(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)1059 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1060 struct dev_pagemap *pgmap)
1061 {
1062 if (!vmemmap_can_optimize(altmap, pgmap))
1063 return pgmap_vmemmap_nr(pgmap);
1064
1065 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1066 }
1067
memmap_init_compound(struct page * head,unsigned long head_pfn,unsigned long zone_idx,int nid,struct dev_pagemap * pgmap,unsigned long nr_pages)1068 static void __ref memmap_init_compound(struct page *head,
1069 unsigned long head_pfn,
1070 unsigned long zone_idx, int nid,
1071 struct dev_pagemap *pgmap,
1072 unsigned long nr_pages)
1073 {
1074 unsigned long pfn, end_pfn = head_pfn + nr_pages;
1075 unsigned int order = pgmap->vmemmap_shift;
1076
1077 /*
1078 * We have to initialize the pages, including setting up page links.
1079 * prep_compound_page() does not take care of that, so instead we
1080 * open-code prep_compound_page() so we can take care of initializing
1081 * the pages in the same go.
1082 */
1083 __SetPageHead(head);
1084 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1085 struct page *page = pfn_to_page(pfn);
1086
1087 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1088 prep_compound_tail(page, head, order);
1089 set_page_count(page, 0);
1090 }
1091 prep_compound_head(head, order);
1092 }
1093
memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap)1094 void __ref memmap_init_zone_device(struct zone *zone,
1095 unsigned long start_pfn,
1096 unsigned long nr_pages,
1097 struct dev_pagemap *pgmap)
1098 {
1099 unsigned long pfn, end_pfn = start_pfn + nr_pages;
1100 struct pglist_data *pgdat = zone->zone_pgdat;
1101 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1102 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1103 unsigned long zone_idx = zone_idx(zone);
1104 unsigned long start = jiffies;
1105 int nid = pgdat->node_id;
1106
1107 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1108 return;
1109
1110 /*
1111 * The call to memmap_init should have already taken care
1112 * of the pages reserved for the memmap, so we can just jump to
1113 * the end of that region and start processing the device pages.
1114 */
1115 if (altmap) {
1116 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1117 nr_pages = end_pfn - start_pfn;
1118 }
1119
1120 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1121 struct page *page = pfn_to_page(pfn);
1122
1123 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1124
1125 if (pfns_per_compound == 1)
1126 continue;
1127
1128 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1129 compound_nr_pages(altmap, pgmap));
1130 }
1131
1132 pr_debug("%s initialised %lu pages in %ums\n", __func__,
1133 nr_pages, jiffies_to_msecs(jiffies - start));
1134 }
1135 #endif
1136
1137 /*
1138 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1139 * because it is sized independent of architecture. Unlike the other zones,
1140 * the starting point for ZONE_MOVABLE is not fixed. It may be different
1141 * in each node depending on the size of each node and how evenly kernelcore
1142 * is distributed. This helper function adjusts the zone ranges
1143 * provided by the architecture for a given node by using the end of the
1144 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1145 * zones within a node are in order of monotonic increases memory addresses
1146 */
adjust_zone_range_for_zone_movable(int nid,unsigned long zone_type,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)1147 static void __init adjust_zone_range_for_zone_movable(int nid,
1148 unsigned long zone_type,
1149 unsigned long node_end_pfn,
1150 unsigned long *zone_start_pfn,
1151 unsigned long *zone_end_pfn)
1152 {
1153 /* Only adjust if ZONE_MOVABLE is on this node */
1154 if (zone_movable_pfn[nid]) {
1155 /* Size ZONE_MOVABLE */
1156 if (zone_type == ZONE_MOVABLE) {
1157 *zone_start_pfn = zone_movable_pfn[nid];
1158 *zone_end_pfn = min(node_end_pfn,
1159 arch_zone_highest_possible_pfn[movable_zone]);
1160
1161 /* Adjust for ZONE_MOVABLE starting within this range */
1162 } else if (!mirrored_kernelcore &&
1163 *zone_start_pfn < zone_movable_pfn[nid] &&
1164 *zone_end_pfn > zone_movable_pfn[nid]) {
1165 *zone_end_pfn = zone_movable_pfn[nid];
1166
1167 /* Check if this whole range is within ZONE_MOVABLE */
1168 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
1169 *zone_start_pfn = *zone_end_pfn;
1170 }
1171 }
1172
1173 /*
1174 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1175 * then all holes in the requested range will be accounted for.
1176 */
__absent_pages_in_range(int nid,unsigned long range_start_pfn,unsigned long range_end_pfn)1177 static unsigned long __init __absent_pages_in_range(int nid,
1178 unsigned long range_start_pfn,
1179 unsigned long range_end_pfn)
1180 {
1181 unsigned long nr_absent = range_end_pfn - range_start_pfn;
1182 unsigned long start_pfn, end_pfn;
1183 int i;
1184
1185 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1186 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1187 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1188 nr_absent -= end_pfn - start_pfn;
1189 }
1190 return nr_absent;
1191 }
1192
1193 /**
1194 * absent_pages_in_range - Return number of page frames in holes within a range
1195 * @start_pfn: The start PFN to start searching for holes
1196 * @end_pfn: The end PFN to stop searching for holes
1197 *
1198 * Return: the number of pages frames in memory holes within a range.
1199 */
absent_pages_in_range(unsigned long start_pfn,unsigned long end_pfn)1200 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1201 unsigned long end_pfn)
1202 {
1203 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1204 }
1205
1206 /* Return the number of page frames in holes in a zone on a node */
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long zone_start_pfn,unsigned long zone_end_pfn)1207 static unsigned long __init zone_absent_pages_in_node(int nid,
1208 unsigned long zone_type,
1209 unsigned long zone_start_pfn,
1210 unsigned long zone_end_pfn)
1211 {
1212 unsigned long nr_absent;
1213
1214 /* zone is empty, we don't have any absent pages */
1215 if (zone_start_pfn == zone_end_pfn)
1216 return 0;
1217
1218 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1219
1220 /*
1221 * ZONE_MOVABLE handling.
1222 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1223 * and vice versa.
1224 */
1225 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1226 unsigned long start_pfn, end_pfn;
1227 struct memblock_region *r;
1228
1229 for_each_mem_region(r) {
1230 start_pfn = clamp(memblock_region_memory_base_pfn(r),
1231 zone_start_pfn, zone_end_pfn);
1232 end_pfn = clamp(memblock_region_memory_end_pfn(r),
1233 zone_start_pfn, zone_end_pfn);
1234
1235 if (zone_type == ZONE_MOVABLE &&
1236 memblock_is_mirror(r))
1237 nr_absent += end_pfn - start_pfn;
1238
1239 if (zone_type == ZONE_NORMAL &&
1240 !memblock_is_mirror(r))
1241 nr_absent += end_pfn - start_pfn;
1242 }
1243 }
1244
1245 return nr_absent;
1246 }
1247
1248 /*
1249 * Return the number of pages a zone spans in a node, including holes
1250 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1251 */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)1252 static unsigned long __init zone_spanned_pages_in_node(int nid,
1253 unsigned long zone_type,
1254 unsigned long node_start_pfn,
1255 unsigned long node_end_pfn,
1256 unsigned long *zone_start_pfn,
1257 unsigned long *zone_end_pfn)
1258 {
1259 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1260 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1261
1262 /* Get the start and end of the zone */
1263 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1264 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1265 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
1266 zone_start_pfn, zone_end_pfn);
1267
1268 /* Check that this node has pages within the zone's required range */
1269 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1270 return 0;
1271
1272 /* Move the zone boundaries inside the node if necessary */
1273 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1274 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1275
1276 /* Return the spanned pages */
1277 return *zone_end_pfn - *zone_start_pfn;
1278 }
1279
reset_memoryless_node_totalpages(struct pglist_data * pgdat)1280 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1281 {
1282 struct zone *z;
1283
1284 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1285 z->zone_start_pfn = 0;
1286 z->spanned_pages = 0;
1287 z->present_pages = 0;
1288 #if defined(CONFIG_MEMORY_HOTPLUG)
1289 z->present_early_pages = 0;
1290 #endif
1291 }
1292
1293 pgdat->node_spanned_pages = 0;
1294 pgdat->node_present_pages = 0;
1295 pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1296 }
1297
calc_nr_kernel_pages(void)1298 static void __init calc_nr_kernel_pages(void)
1299 {
1300 unsigned long start_pfn, end_pfn;
1301 phys_addr_t start_addr, end_addr;
1302 u64 u;
1303 #ifdef CONFIG_HIGHMEM
1304 unsigned long high_zone_low = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
1305 #endif
1306
1307 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
1308 start_pfn = PFN_UP(start_addr);
1309 end_pfn = PFN_DOWN(end_addr);
1310
1311 if (start_pfn < end_pfn) {
1312 nr_all_pages += end_pfn - start_pfn;
1313 #ifdef CONFIG_HIGHMEM
1314 start_pfn = clamp(start_pfn, 0, high_zone_low);
1315 end_pfn = clamp(end_pfn, 0, high_zone_low);
1316 #endif
1317 nr_kernel_pages += end_pfn - start_pfn;
1318 }
1319 }
1320 }
1321
calculate_node_totalpages(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn)1322 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1323 unsigned long node_start_pfn,
1324 unsigned long node_end_pfn)
1325 {
1326 unsigned long realtotalpages = 0, totalpages = 0;
1327 enum zone_type i;
1328
1329 for (i = 0; i < MAX_NR_ZONES; i++) {
1330 struct zone *zone = pgdat->node_zones + i;
1331 unsigned long zone_start_pfn, zone_end_pfn;
1332 unsigned long spanned, absent;
1333 unsigned long real_size;
1334
1335 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1336 node_start_pfn,
1337 node_end_pfn,
1338 &zone_start_pfn,
1339 &zone_end_pfn);
1340 absent = zone_absent_pages_in_node(pgdat->node_id, i,
1341 zone_start_pfn,
1342 zone_end_pfn);
1343
1344 real_size = spanned - absent;
1345
1346 if (spanned)
1347 zone->zone_start_pfn = zone_start_pfn;
1348 else
1349 zone->zone_start_pfn = 0;
1350 zone->spanned_pages = spanned;
1351 zone->present_pages = real_size;
1352 #if defined(CONFIG_MEMORY_HOTPLUG)
1353 zone->present_early_pages = real_size;
1354 #endif
1355
1356 totalpages += spanned;
1357 realtotalpages += real_size;
1358 }
1359
1360 pgdat->node_spanned_pages = totalpages;
1361 pgdat->node_present_pages = realtotalpages;
1362 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1363 }
1364
1365 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgdat_init_split_queue(struct pglist_data * pgdat)1366 static void pgdat_init_split_queue(struct pglist_data *pgdat)
1367 {
1368 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1369
1370 spin_lock_init(&ds_queue->split_queue_lock);
1371 INIT_LIST_HEAD(&ds_queue->split_queue);
1372 ds_queue->split_queue_len = 0;
1373 }
1374 #else
pgdat_init_split_queue(struct pglist_data * pgdat)1375 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1376 #endif
1377
1378 #ifdef CONFIG_COMPACTION
pgdat_init_kcompactd(struct pglist_data * pgdat)1379 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1380 {
1381 init_waitqueue_head(&pgdat->kcompactd_wait);
1382 }
1383 #else
pgdat_init_kcompactd(struct pglist_data * pgdat)1384 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1385 #endif
1386
pgdat_init_internals(struct pglist_data * pgdat)1387 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1388 {
1389 int i;
1390
1391 pgdat_resize_init(pgdat);
1392 pgdat_kswapd_lock_init(pgdat);
1393
1394 pgdat_init_split_queue(pgdat);
1395 pgdat_init_kcompactd(pgdat);
1396
1397 init_waitqueue_head(&pgdat->kswapd_wait);
1398 init_waitqueue_head(&pgdat->pfmemalloc_wait);
1399
1400 for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1401 init_waitqueue_head(&pgdat->reclaim_wait[i]);
1402
1403 pgdat_page_ext_init(pgdat);
1404 lruvec_init(&pgdat->__lruvec);
1405 }
1406
zone_init_internals(struct zone * zone,enum zone_type idx,int nid,unsigned long remaining_pages)1407 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1408 unsigned long remaining_pages)
1409 {
1410 atomic_long_set(&zone->managed_pages, remaining_pages);
1411 zone_set_nid(zone, nid);
1412 zone->name = zone_names[idx];
1413 zone->zone_pgdat = NODE_DATA(nid);
1414 spin_lock_init(&zone->lock);
1415 zone_seqlock_init(zone);
1416 zone_pcp_init(zone);
1417 }
1418
zone_init_free_lists(struct zone * zone)1419 static void __meminit zone_init_free_lists(struct zone *zone)
1420 {
1421 unsigned int order, t;
1422 for_each_migratetype_order(order, t) {
1423 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1424 zone->free_area[order].nr_free = 0;
1425 }
1426
1427 #ifdef CONFIG_UNACCEPTED_MEMORY
1428 INIT_LIST_HEAD(&zone->unaccepted_pages);
1429 #endif
1430 }
1431
init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size)1432 void __meminit init_currently_empty_zone(struct zone *zone,
1433 unsigned long zone_start_pfn,
1434 unsigned long size)
1435 {
1436 struct pglist_data *pgdat = zone->zone_pgdat;
1437 int zone_idx = zone_idx(zone) + 1;
1438
1439 if (zone_idx > pgdat->nr_zones)
1440 pgdat->nr_zones = zone_idx;
1441
1442 zone->zone_start_pfn = zone_start_pfn;
1443
1444 mminit_dprintk(MMINIT_TRACE, "memmap_init",
1445 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
1446 pgdat->node_id,
1447 (unsigned long)zone_idx(zone),
1448 zone_start_pfn, (zone_start_pfn + size));
1449
1450 zone_init_free_lists(zone);
1451 zone->initialized = 1;
1452 }
1453
1454 #ifndef CONFIG_SPARSEMEM
1455 /*
1456 * Calculate the size of the zone->pageblock_flags rounded to an unsigned long
1457 * Start by making sure zonesize is a multiple of pageblock_order by rounding
1458 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1459 * round what is now in bits to nearest long in bits, then return it in
1460 * bytes.
1461 */
usemap_size(unsigned long zone_start_pfn,unsigned long zonesize)1462 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1463 {
1464 unsigned long usemapsize;
1465
1466 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1467 usemapsize = round_up(zonesize, pageblock_nr_pages);
1468 usemapsize = usemapsize >> pageblock_order;
1469 usemapsize *= NR_PAGEBLOCK_BITS;
1470 usemapsize = round_up(usemapsize, BITS_PER_LONG);
1471
1472 return usemapsize / BITS_PER_BYTE;
1473 }
1474
setup_usemap(struct zone * zone)1475 static void __ref setup_usemap(struct zone *zone)
1476 {
1477 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1478 zone->spanned_pages);
1479 zone->pageblock_flags = NULL;
1480 if (usemapsize) {
1481 zone->pageblock_flags =
1482 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1483 zone_to_nid(zone));
1484 if (!zone->pageblock_flags)
1485 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1486 usemapsize, zone->name, zone_to_nid(zone));
1487 }
1488 }
1489 #else
setup_usemap(struct zone * zone)1490 static inline void setup_usemap(struct zone *zone) {}
1491 #endif /* CONFIG_SPARSEMEM */
1492
1493 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1494
1495 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
set_pageblock_order(void)1496 void __init set_pageblock_order(void)
1497 {
1498 unsigned int order = PAGE_BLOCK_MAX_ORDER;
1499
1500 /* Check that pageblock_nr_pages has not already been setup */
1501 if (pageblock_order)
1502 return;
1503
1504 /* Don't let pageblocks exceed the maximum allocation granularity. */
1505 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1506 order = HUGETLB_PAGE_ORDER;
1507
1508 /*
1509 * Assume the largest contiguous order of interest is a huge page.
1510 * This value may be variable depending on boot parameters on powerpc.
1511 */
1512 pageblock_order = order;
1513 }
1514 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1515
1516 /*
1517 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1518 * is unused as pageblock_order is set at compile-time. See
1519 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1520 * the kernel config
1521 */
set_pageblock_order(void)1522 void __init set_pageblock_order(void)
1523 {
1524 }
1525
1526 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1527
1528 /*
1529 * Set up the zone data structures
1530 * - init pgdat internals
1531 * - init all zones belonging to this node
1532 *
1533 * NOTE: this function is only called during memory hotplug
1534 */
1535 #ifdef CONFIG_MEMORY_HOTPLUG
free_area_init_core_hotplug(struct pglist_data * pgdat)1536 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1537 {
1538 int nid = pgdat->node_id;
1539 enum zone_type z;
1540 int cpu;
1541
1542 pgdat_init_internals(pgdat);
1543
1544 if (pgdat->per_cpu_nodestats == &boot_nodestats)
1545 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1546
1547 /*
1548 * Reset the nr_zones, order and highest_zoneidx before reuse.
1549 * Note that kswapd will init kswapd_highest_zoneidx properly
1550 * when it starts in the near future.
1551 */
1552 pgdat->nr_zones = 0;
1553 pgdat->kswapd_order = 0;
1554 pgdat->kswapd_highest_zoneidx = 0;
1555 pgdat->node_start_pfn = 0;
1556 pgdat->node_present_pages = 0;
1557
1558 for_each_online_cpu(cpu) {
1559 struct per_cpu_nodestat *p;
1560
1561 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1562 memset(p, 0, sizeof(*p));
1563 }
1564
1565 /*
1566 * When memory is hot-added, all the memory is in offline state. So
1567 * clear all zones' present_pages and managed_pages because they will
1568 * be updated in online_pages() and offline_pages().
1569 */
1570 for (z = 0; z < MAX_NR_ZONES; z++) {
1571 struct zone *zone = pgdat->node_zones + z;
1572
1573 zone->present_pages = 0;
1574 zone_init_internals(zone, z, nid, 0);
1575 }
1576 }
1577 #endif
1578
free_area_init_core(struct pglist_data * pgdat)1579 static void __init free_area_init_core(struct pglist_data *pgdat)
1580 {
1581 enum zone_type j;
1582 int nid = pgdat->node_id;
1583
1584 pgdat_init_internals(pgdat);
1585 pgdat->per_cpu_nodestats = &boot_nodestats;
1586
1587 for (j = 0; j < MAX_NR_ZONES; j++) {
1588 struct zone *zone = pgdat->node_zones + j;
1589 unsigned long size = zone->spanned_pages;
1590
1591 /*
1592 * Initialize zone->managed_pages as 0 , it will be reset
1593 * when memblock allocator frees pages into buddy system.
1594 */
1595 zone_init_internals(zone, j, nid, zone->present_pages);
1596
1597 if (!size)
1598 continue;
1599
1600 setup_usemap(zone);
1601 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1602 }
1603 }
1604
memmap_alloc(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,int nid,bool exact_nid)1605 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1606 phys_addr_t min_addr, int nid, bool exact_nid)
1607 {
1608 void *ptr;
1609
1610 /*
1611 * Kmemleak will explicitly scan mem_map by traversing all valid
1612 * `struct *page`,so memblock does not need to be added to the scan list.
1613 */
1614 if (exact_nid)
1615 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1616 MEMBLOCK_ALLOC_NOLEAKTRACE,
1617 nid);
1618 else
1619 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1620 MEMBLOCK_ALLOC_NOLEAKTRACE,
1621 nid);
1622
1623 if (ptr && size > 0)
1624 page_init_poison(ptr, size);
1625
1626 return ptr;
1627 }
1628
1629 #ifdef CONFIG_FLATMEM
alloc_node_mem_map(struct pglist_data * pgdat)1630 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1631 {
1632 unsigned long start, offset, size, end;
1633 struct page *map;
1634
1635 /* Skip empty nodes */
1636 if (!pgdat->node_spanned_pages)
1637 return;
1638
1639 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1640 offset = pgdat->node_start_pfn - start;
1641 /*
1642 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1643 * aligned but the node_mem_map endpoints must be in order
1644 * for the buddy allocator to function correctly.
1645 */
1646 end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
1647 size = (end - start) * sizeof(struct page);
1648 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1649 pgdat->node_id, false);
1650 if (!map)
1651 panic("Failed to allocate %ld bytes for node %d memory map\n",
1652 size, pgdat->node_id);
1653 pgdat->node_mem_map = map + offset;
1654 memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
1655 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1656 __func__, pgdat->node_id, (unsigned long)pgdat,
1657 (unsigned long)pgdat->node_mem_map);
1658
1659 /* the global mem_map is just set as node 0's */
1660 WARN_ON(pgdat != NODE_DATA(0));
1661
1662 mem_map = pgdat->node_mem_map;
1663 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1664 mem_map -= offset;
1665
1666 max_mapnr = end - start;
1667 }
1668 #else
alloc_node_mem_map(struct pglist_data * pgdat)1669 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1670 #endif /* CONFIG_FLATMEM */
1671
1672 /**
1673 * get_pfn_range_for_nid - Return the start and end page frames for a node
1674 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1675 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1676 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1677 *
1678 * It returns the start and end page frame of a node based on information
1679 * provided by memblock_set_node(). If called for a node
1680 * with no available memory, the start and end PFNs will be 0.
1681 */
get_pfn_range_for_nid(unsigned int nid,unsigned long * start_pfn,unsigned long * end_pfn)1682 void __init get_pfn_range_for_nid(unsigned int nid,
1683 unsigned long *start_pfn, unsigned long *end_pfn)
1684 {
1685 unsigned long this_start_pfn, this_end_pfn;
1686 int i;
1687
1688 *start_pfn = -1UL;
1689 *end_pfn = 0;
1690
1691 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1692 *start_pfn = min(*start_pfn, this_start_pfn);
1693 *end_pfn = max(*end_pfn, this_end_pfn);
1694 }
1695
1696 if (*start_pfn == -1UL)
1697 *start_pfn = 0;
1698 }
1699
free_area_init_node(int nid)1700 static void __init free_area_init_node(int nid)
1701 {
1702 pg_data_t *pgdat = NODE_DATA(nid);
1703 unsigned long start_pfn = 0;
1704 unsigned long end_pfn = 0;
1705
1706 /* pg_data_t should be reset to zero when it's allocated */
1707 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1708
1709 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1710
1711 pgdat->node_id = nid;
1712 pgdat->node_start_pfn = start_pfn;
1713 pgdat->per_cpu_nodestats = NULL;
1714
1715 if (start_pfn != end_pfn) {
1716 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1717 (u64)start_pfn << PAGE_SHIFT,
1718 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1719
1720 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1721 } else {
1722 pr_info("Initmem setup node %d as memoryless\n", nid);
1723
1724 reset_memoryless_node_totalpages(pgdat);
1725 }
1726
1727 alloc_node_mem_map(pgdat);
1728 pgdat_set_deferred_range(pgdat);
1729
1730 free_area_init_core(pgdat);
1731 lru_gen_init_pgdat(pgdat);
1732 }
1733
1734 /* Any regular or high memory on that node? */
check_for_memory(pg_data_t * pgdat)1735 static void __init check_for_memory(pg_data_t *pgdat)
1736 {
1737 enum zone_type zone_type;
1738
1739 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1740 struct zone *zone = &pgdat->node_zones[zone_type];
1741 if (populated_zone(zone)) {
1742 if (IS_ENABLED(CONFIG_HIGHMEM))
1743 node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1744 if (zone_type <= ZONE_NORMAL)
1745 node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1746 break;
1747 }
1748 }
1749 }
1750
1751 #if MAX_NUMNODES > 1
1752 /*
1753 * Figure out the number of possible node ids.
1754 */
setup_nr_node_ids(void)1755 void __init setup_nr_node_ids(void)
1756 {
1757 unsigned int highest;
1758
1759 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1760 nr_node_ids = highest + 1;
1761 }
1762 #endif
1763
1764 /*
1765 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1766 * such cases we allow max_zone_pfn sorted in the descending order
1767 */
arch_has_descending_max_zone_pfns(void)1768 static bool arch_has_descending_max_zone_pfns(void)
1769 {
1770 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1771 }
1772
set_high_memory(void)1773 static void __init set_high_memory(void)
1774 {
1775 phys_addr_t highmem = memblock_end_of_DRAM();
1776
1777 /*
1778 * Some architectures (e.g. ARM) set high_memory very early and
1779 * use it in arch setup code.
1780 * If an architecture already set high_memory don't overwrite it
1781 */
1782 if (high_memory)
1783 return;
1784
1785 #ifdef CONFIG_HIGHMEM
1786 if (arch_has_descending_max_zone_pfns() ||
1787 highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]))
1788 highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]);
1789 #endif
1790
1791 high_memory = phys_to_virt(highmem - 1) + 1;
1792 }
1793
1794 /**
1795 * free_area_init - Initialise all pg_data_t and zone data
1796 *
1797 * This will call free_area_init_node() for each active node in the system.
1798 * Using the page ranges provided by memblock_set_node(), the size of each
1799 * zone in each node and their holes is calculated. If the maximum PFN
1800 * between two adjacent zones match, it is assumed that the zone is empty.
1801 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1802 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1803 * starts where the previous one ended. For example, ZONE_DMA32 starts
1804 * at arch_max_dma_pfn.
1805 */
free_area_init(void)1806 static void __init free_area_init(void)
1807 {
1808 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
1809 unsigned long start_pfn, end_pfn;
1810 int i, nid, zone;
1811 bool descending;
1812
1813 arch_zone_limits_init(max_zone_pfn);
1814 sparse_init();
1815
1816 start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1817 descending = arch_has_descending_max_zone_pfns();
1818
1819 for (i = 0; i < MAX_NR_ZONES; i++) {
1820 if (descending)
1821 zone = MAX_NR_ZONES - i - 1;
1822 else
1823 zone = i;
1824
1825 if (zone == ZONE_MOVABLE)
1826 continue;
1827
1828 end_pfn = max(max_zone_pfn[zone], start_pfn);
1829 arch_zone_lowest_possible_pfn[zone] = start_pfn;
1830 arch_zone_highest_possible_pfn[zone] = end_pfn;
1831
1832 start_pfn = end_pfn;
1833 }
1834
1835 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
1836 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1837 find_zone_movable_pfns_for_nodes();
1838
1839 /* Print out the zone ranges */
1840 pr_info("Zone ranges:\n");
1841 for (i = 0; i < MAX_NR_ZONES; i++) {
1842 if (i == ZONE_MOVABLE)
1843 continue;
1844 pr_info(" %-8s ", zone_names[i]);
1845 if (arch_zone_lowest_possible_pfn[i] ==
1846 arch_zone_highest_possible_pfn[i])
1847 pr_cont("empty\n");
1848 else
1849 pr_cont("[mem %#018Lx-%#018Lx]\n",
1850 (u64)arch_zone_lowest_possible_pfn[i]
1851 << PAGE_SHIFT,
1852 ((u64)arch_zone_highest_possible_pfn[i]
1853 << PAGE_SHIFT) - 1);
1854 }
1855
1856 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
1857 pr_info("Movable zone start for each node\n");
1858 for (i = 0; i < MAX_NUMNODES; i++) {
1859 if (zone_movable_pfn[i])
1860 pr_info(" Node %d: %#018Lx\n", i,
1861 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1862 }
1863
1864 /*
1865 * Print out the early node map, and initialize the
1866 * subsection-map relative to active online memory ranges to
1867 * enable future "sub-section" extensions of the memory map.
1868 */
1869 pr_info("Early memory node ranges\n");
1870 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1871 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1872 (u64)start_pfn << PAGE_SHIFT,
1873 ((u64)end_pfn << PAGE_SHIFT) - 1);
1874 sparse_init_subsection_map(start_pfn, end_pfn - start_pfn);
1875 }
1876
1877 /* Initialise every node */
1878 mminit_verify_pageflags_layout();
1879 setup_nr_node_ids();
1880 set_pageblock_order();
1881
1882 for_each_node(nid) {
1883 pg_data_t *pgdat;
1884
1885 /*
1886 * If an architecture has not allocated node data for
1887 * this node, presume the node is memoryless or offline.
1888 */
1889 if (!NODE_DATA(nid))
1890 alloc_offline_node_data(nid);
1891
1892 pgdat = NODE_DATA(nid);
1893 free_area_init_node(nid);
1894
1895 /*
1896 * No sysfs hierarchy will be created via register_node()
1897 *for memory-less node because here it's not marked as N_MEMORY
1898 *and won't be set online later. The benefit is userspace
1899 *program won't be confused by sysfs files/directories of
1900 *memory-less node. The pgdat will get fully initialized by
1901 *hotadd_init_pgdat() when memory is hotplugged into this node.
1902 */
1903 if (pgdat->node_present_pages) {
1904 node_set_state(nid, N_MEMORY);
1905 check_for_memory(pgdat);
1906 }
1907 }
1908
1909 for_each_node_state(nid, N_MEMORY)
1910 sparse_vmemmap_init_nid_late(nid);
1911
1912 calc_nr_kernel_pages();
1913 memmap_init();
1914
1915 /* disable hash distribution for systems with a single node */
1916 fixup_hashdist();
1917
1918 set_high_memory();
1919 }
1920
1921 /**
1922 * node_map_pfn_alignment - determine the maximum internode alignment
1923 *
1924 * This function should be called after node map is populated and sorted.
1925 * It calculates the maximum power of two alignment which can distinguish
1926 * all the nodes.
1927 *
1928 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1929 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1930 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
1931 * shifted, 1GiB is enough and this function will indicate so.
1932 *
1933 * This is used to test whether pfn -> nid mapping of the chosen memory
1934 * model has fine enough granularity to avoid incorrect mapping for the
1935 * populated node map.
1936 *
1937 * Return: the determined alignment in pfn's. 0 if there is no alignment
1938 * requirement (single node).
1939 */
node_map_pfn_alignment(void)1940 unsigned long __init node_map_pfn_alignment(void)
1941 {
1942 unsigned long accl_mask = 0, last_end = 0;
1943 unsigned long start, end, mask;
1944 int last_nid = NUMA_NO_NODE;
1945 int i, nid;
1946
1947 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1948 if (!start || last_nid < 0 || last_nid == nid) {
1949 last_nid = nid;
1950 last_end = end;
1951 continue;
1952 }
1953
1954 /*
1955 * Start with a mask granular enough to pin-point to the
1956 * start pfn and tick off bits one-by-one until it becomes
1957 * too coarse to separate the current node from the last.
1958 */
1959 mask = ~((1 << __ffs(start)) - 1);
1960 while (mask && last_end <= (start & (mask << 1)))
1961 mask <<= 1;
1962
1963 /* accumulate all internode masks */
1964 accl_mask |= mask;
1965 }
1966
1967 /* convert mask to number of pages */
1968 return ~accl_mask + 1;
1969 }
1970
1971 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
deferred_free_pages(unsigned long pfn,unsigned long nr_pages)1972 static void __init deferred_free_pages(unsigned long pfn,
1973 unsigned long nr_pages)
1974 {
1975 struct page *page;
1976 unsigned long i;
1977
1978 if (!nr_pages)
1979 return;
1980
1981 page = pfn_to_page(pfn);
1982
1983 /* Free a large naturally-aligned chunk if possible */
1984 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1985 for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1986 init_pageblock_migratetype(page + i, MIGRATE_MOVABLE,
1987 false);
1988 __free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY);
1989 return;
1990 }
1991
1992 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1993 accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
1994
1995 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1996 if (pageblock_aligned(pfn))
1997 init_pageblock_migratetype(page, MIGRATE_MOVABLE,
1998 false);
1999 __free_pages_core(page, 0, MEMINIT_EARLY);
2000 }
2001 }
2002
2003 /* Completion tracking for deferred_init_memmap() threads */
2004 static atomic_t pgdat_init_n_undone __initdata;
2005 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
2006
pgdat_init_report_one_done(void)2007 static inline void __init pgdat_init_report_one_done(void)
2008 {
2009 if (atomic_dec_and_test(&pgdat_init_n_undone))
2010 complete(&pgdat_init_all_done_comp);
2011 }
2012
2013 /*
2014 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
2015 * by performing it only once every MAX_ORDER_NR_PAGES.
2016 * Return number of pages initialized.
2017 */
deferred_init_pages(struct zone * zone,unsigned long pfn,unsigned long end_pfn)2018 static unsigned long __init deferred_init_pages(struct zone *zone,
2019 unsigned long pfn, unsigned long end_pfn)
2020 {
2021 int nid = zone_to_nid(zone);
2022 unsigned long nr_pages = end_pfn - pfn;
2023 int zid = zone_idx(zone);
2024 struct page *page = pfn_to_page(pfn);
2025
2026 for (; pfn < end_pfn; pfn++, page++)
2027 __init_single_page(page, pfn, zid, nid);
2028 return nr_pages;
2029 }
2030
2031 /*
2032 * Initialize and free pages.
2033 *
2034 * At this point reserved pages and struct pages that correspond to holes in
2035 * memblock.memory are already initialized so every free range has a valid
2036 * memory map around it.
2037 * This ensures that access of pages that are ahead of the range being
2038 * initialized (computing buddy page in __free_one_page()) always reads a valid
2039 * struct page.
2040 *
2041 * In order to try and improve CPU cache locality we have the loop broken along
2042 * max page order boundaries.
2043 */
2044 static unsigned long __init
deferred_init_memmap_chunk(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone,bool can_resched)2045 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2046 struct zone *zone, bool can_resched)
2047 {
2048 int nid = zone_to_nid(zone);
2049 unsigned long nr_pages = 0;
2050 phys_addr_t start, end;
2051 u64 i = 0;
2052
2053 for_each_free_mem_range(i, nid, 0, &start, &end, NULL) {
2054 unsigned long spfn = PFN_UP(start);
2055 unsigned long epfn = PFN_DOWN(end);
2056
2057 if (spfn >= end_pfn)
2058 break;
2059
2060 spfn = max(spfn, start_pfn);
2061 epfn = min(epfn, end_pfn);
2062
2063 while (spfn < epfn) {
2064 unsigned long mo_pfn = ALIGN(spfn + 1, MAX_ORDER_NR_PAGES);
2065 unsigned long chunk_end = min(mo_pfn, epfn);
2066
2067 nr_pages += deferred_init_pages(zone, spfn, chunk_end);
2068 deferred_free_pages(spfn, chunk_end - spfn);
2069
2070 spfn = chunk_end;
2071
2072 if (can_resched)
2073 cond_resched();
2074 else
2075 touch_nmi_watchdog();
2076 }
2077 }
2078
2079 return nr_pages;
2080 }
2081
2082 static void __init
deferred_init_memmap_job(unsigned long start_pfn,unsigned long end_pfn,void * arg)2083 deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn,
2084 void *arg)
2085 {
2086 struct zone *zone = arg;
2087
2088 deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true);
2089 }
2090
2091 static unsigned int __init
deferred_page_init_max_threads(const struct cpumask * node_cpumask)2092 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2093 {
2094 return max(cpumask_weight(node_cpumask), 1U);
2095 }
2096
2097 /* Initialise remaining memory on a node */
deferred_init_memmap(void * data)2098 static int __init deferred_init_memmap(void *data)
2099 {
2100 pg_data_t *pgdat = data;
2101 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2102 int max_threads = deferred_page_init_max_threads(cpumask);
2103 unsigned long first_init_pfn, last_pfn, flags;
2104 unsigned long start = jiffies;
2105 struct zone *zone;
2106
2107 /* Bind memory initialisation thread to a local node if possible */
2108 if (!cpumask_empty(cpumask))
2109 set_cpus_allowed_ptr(current, cpumask);
2110
2111 pgdat_resize_lock(pgdat, &flags);
2112 first_init_pfn = pgdat->first_deferred_pfn;
2113 if (first_init_pfn == ULONG_MAX) {
2114 pgdat_resize_unlock(pgdat, &flags);
2115 pgdat_init_report_one_done();
2116 return 0;
2117 }
2118
2119 /* Sanity check boundaries */
2120 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2121 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2122 pgdat->first_deferred_pfn = ULONG_MAX;
2123
2124 /*
2125 * Once we unlock here, the zone cannot be grown anymore, thus if an
2126 * interrupt thread must allocate this early in boot, zone must be
2127 * pre-grown prior to start of deferred page initialization.
2128 */
2129 pgdat_resize_unlock(pgdat, &flags);
2130
2131 /* Only the highest zone is deferred */
2132 zone = pgdat->node_zones + pgdat->nr_zones - 1;
2133 last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone));
2134
2135 struct padata_mt_job job = {
2136 .thread_fn = deferred_init_memmap_job,
2137 .fn_arg = zone,
2138 .start = first_init_pfn,
2139 .size = last_pfn - first_init_pfn,
2140 .align = PAGES_PER_SECTION,
2141 .min_chunk = PAGES_PER_SECTION,
2142 .max_threads = max_threads,
2143 .numa_aware = false,
2144 };
2145
2146 padata_do_multithreaded(&job);
2147
2148 /* Sanity check that the next zone really is unpopulated */
2149 WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
2150
2151 pr_info("node %d deferred pages initialised in %ums\n",
2152 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2153
2154 pgdat_init_report_one_done();
2155 return 0;
2156 }
2157
2158 /*
2159 * If this zone has deferred pages, try to grow it by initializing enough
2160 * deferred pages to satisfy the allocation specified by order, rounded up to
2161 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2162 * of SECTION_SIZE bytes by initializing struct pages in increments of
2163 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2164 *
2165 * Return true when zone was grown, otherwise return false. We return true even
2166 * when we grow less than requested, to let the caller decide if there are
2167 * enough pages to satisfy the allocation.
2168 */
deferred_grow_zone(struct zone * zone,unsigned int order)2169 bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2170 {
2171 unsigned long nr_pages_needed = SECTION_ALIGN_UP(1 << order);
2172 pg_data_t *pgdat = zone->zone_pgdat;
2173 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2174 unsigned long spfn, epfn, flags;
2175 unsigned long nr_pages = 0;
2176
2177 /* Only the last zone may have deferred pages */
2178 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2179 return false;
2180
2181 pgdat_resize_lock(pgdat, &flags);
2182
2183 /*
2184 * If someone grew this zone while we were waiting for spinlock, return
2185 * true, as there might be enough pages already.
2186 */
2187 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2188 pgdat_resize_unlock(pgdat, &flags);
2189 return true;
2190 }
2191
2192 /*
2193 * Initialize at least nr_pages_needed in section chunks.
2194 * If a section has less free memory than nr_pages_needed, the next
2195 * section will be also initialized.
2196 * Note, that it still does not guarantee that allocation of order can
2197 * be satisfied if the sections are fragmented because of memblock
2198 * allocations.
2199 */
2200 for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1);
2201 nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
2202 spfn = epfn, epfn += PAGES_PER_SECTION) {
2203 nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false);
2204 }
2205
2206 /*
2207 * There were no pages to initialize and free which means the zone's
2208 * memory map is completely initialized.
2209 */
2210 pgdat->first_deferred_pfn = nr_pages ? spfn : ULONG_MAX;
2211
2212 pgdat_resize_unlock(pgdat, &flags);
2213
2214 return nr_pages > 0;
2215 }
2216
2217 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2218
2219 #ifdef CONFIG_CMA
init_cma_reserved_pageblock(struct page * page)2220 void __init init_cma_reserved_pageblock(struct page *page)
2221 {
2222 unsigned i = pageblock_nr_pages;
2223 struct page *p = page;
2224
2225 do {
2226 __ClearPageReserved(p);
2227 set_page_count(p, 0);
2228 } while (++p, --i);
2229
2230 init_pageblock_migratetype(page, MIGRATE_CMA, false);
2231 set_page_refcounted(page);
2232 /* pages were reserved and not allocated */
2233 clear_page_tag_ref(page);
2234 __free_pages(page, pageblock_order);
2235
2236 adjust_managed_page_count(page, pageblock_nr_pages);
2237 page_zone(page)->cma_pages += pageblock_nr_pages;
2238 }
2239 /*
2240 * Similar to above, but only set the migrate type and stats.
2241 */
init_cma_pageblock(struct page * page)2242 void __init init_cma_pageblock(struct page *page)
2243 {
2244 init_pageblock_migratetype(page, MIGRATE_CMA, false);
2245 adjust_managed_page_count(page, pageblock_nr_pages);
2246 page_zone(page)->cma_pages += pageblock_nr_pages;
2247 }
2248 #endif
2249
set_zone_contiguous(struct zone * zone)2250 void set_zone_contiguous(struct zone *zone)
2251 {
2252 unsigned long block_start_pfn = zone->zone_start_pfn;
2253 unsigned long block_end_pfn;
2254
2255 block_end_pfn = pageblock_end_pfn(block_start_pfn);
2256 for (; block_start_pfn < zone_end_pfn(zone);
2257 block_start_pfn = block_end_pfn,
2258 block_end_pfn += pageblock_nr_pages) {
2259
2260 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2261
2262 if (!__pageblock_pfn_to_page(block_start_pfn,
2263 block_end_pfn, zone))
2264 return;
2265 cond_resched();
2266 }
2267
2268 /* We confirm that there is no hole */
2269 zone->contiguous = true;
2270 }
2271
2272 /*
2273 * Check if a PFN range intersects multiple zones on one or more
2274 * NUMA nodes. Specify the @nid argument if it is known that this
2275 * PFN range is on one node, NUMA_NO_NODE otherwise.
2276 */
pfn_range_intersects_zones(int nid,unsigned long start_pfn,unsigned long nr_pages)2277 bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
2278 unsigned long nr_pages)
2279 {
2280 struct zone *zone, *izone = NULL;
2281
2282 for_each_zone(zone) {
2283 if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid)
2284 continue;
2285
2286 if (zone_intersects(zone, start_pfn, nr_pages)) {
2287 if (izone != NULL)
2288 return true;
2289 izone = zone;
2290 }
2291
2292 }
2293
2294 return false;
2295 }
2296
2297 static void __init mem_init_print_info(void);
page_alloc_init_late(void)2298 void __init page_alloc_init_late(void)
2299 {
2300 struct zone *zone;
2301 int nid;
2302
2303 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2304
2305 /* There will be num_node_state(N_MEMORY) threads */
2306 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2307 for_each_node_state(nid, N_MEMORY) {
2308 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2309 }
2310
2311 /* Block until all are initialised */
2312 wait_for_completion(&pgdat_init_all_done_comp);
2313
2314 /*
2315 * We initialized the rest of the deferred pages. Permanently disable
2316 * on-demand struct page initialization.
2317 */
2318 static_branch_disable(&deferred_pages);
2319
2320 /* Reinit limits that are based on free pages after the kernel is up */
2321 files_maxfiles_init();
2322 #endif
2323
2324 /* Accounting of total+free memory is stable at this point. */
2325 mem_init_print_info();
2326 buffer_init();
2327
2328 /* Discard memblock private memory */
2329 memblock_discard();
2330
2331 for_each_node_state(nid, N_MEMORY)
2332 shuffle_free_memory(NODE_DATA(nid));
2333
2334 for_each_populated_zone(zone)
2335 set_zone_contiguous(zone);
2336
2337 /* Initialize page ext after all struct pages are initialized. */
2338 if (deferred_struct_pages)
2339 page_ext_init();
2340
2341 page_alloc_sysctl_init();
2342 }
2343
2344 /*
2345 * Adaptive scale is meant to reduce sizes of hash tables on large memory
2346 * machines. As memory size is increased the scale is also increased but at
2347 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
2348 * quadruples the scale is increased by one, which means the size of hash table
2349 * only doubles, instead of quadrupling as well.
2350 * Because 32-bit systems cannot have large physical memory, where this scaling
2351 * makes sense, it is disabled on such platforms.
2352 */
2353 #if __BITS_PER_LONG > 32
2354 #define ADAPT_SCALE_BASE (64ul << 30)
2355 #define ADAPT_SCALE_SHIFT 2
2356 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
2357 #endif
2358
2359 /*
2360 * allocate a large system hash table from bootmem
2361 * - it is assumed that the hash table must contain an exact power-of-2
2362 * quantity of entries
2363 * - limit is the number of hash buckets, not the total allocation size
2364 */
alloc_large_system_hash(const char * tablename,unsigned long bucketsize,unsigned long numentries,int scale,int flags,unsigned int * _hash_shift,unsigned int * _hash_mask,unsigned long low_limit,unsigned long high_limit)2365 void *__init alloc_large_system_hash(const char *tablename,
2366 unsigned long bucketsize,
2367 unsigned long numentries,
2368 int scale,
2369 int flags,
2370 unsigned int *_hash_shift,
2371 unsigned int *_hash_mask,
2372 unsigned long low_limit,
2373 unsigned long high_limit)
2374 {
2375 unsigned long long max = high_limit;
2376 unsigned long log2qty, size;
2377 void *table;
2378 gfp_t gfp_flags;
2379 bool virt;
2380 bool huge;
2381
2382 /* allow the kernel cmdline to have a say */
2383 if (!numentries) {
2384 /* round applicable memory size up to nearest megabyte */
2385 numentries = nr_kernel_pages;
2386
2387 /* It isn't necessary when PAGE_SIZE >= 1MB */
2388 if (PAGE_SIZE < SZ_1M)
2389 numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2390
2391 #if __BITS_PER_LONG > 32
2392 if (!high_limit) {
2393 unsigned long adapt;
2394
2395 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2396 adapt <<= ADAPT_SCALE_SHIFT)
2397 scale++;
2398 }
2399 #endif
2400
2401 /* limit to 1 bucket per 2^scale bytes of low memory */
2402 if (scale > PAGE_SHIFT)
2403 numentries >>= (scale - PAGE_SHIFT);
2404 else
2405 numentries <<= (PAGE_SHIFT - scale);
2406
2407 if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2408 numentries = PAGE_SIZE / bucketsize;
2409 }
2410 numentries = roundup_pow_of_two(numentries);
2411
2412 /* limit allocation size to 1/16 total memory by default */
2413 if (max == 0) {
2414 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2415 do_div(max, bucketsize);
2416 }
2417 max = min(max, 0x80000000ULL);
2418
2419 if (numentries < low_limit)
2420 numentries = low_limit;
2421 if (numentries > max)
2422 numentries = max;
2423
2424 log2qty = ilog2(numentries);
2425
2426 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2427 do {
2428 virt = false;
2429 size = bucketsize << log2qty;
2430 if (flags & HASH_EARLY) {
2431 if (flags & HASH_ZERO)
2432 table = memblock_alloc(size, SMP_CACHE_BYTES);
2433 else
2434 table = memblock_alloc_raw(size,
2435 SMP_CACHE_BYTES);
2436 } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
2437 table = vmalloc_huge(size, gfp_flags);
2438 virt = true;
2439 if (table)
2440 huge = is_vm_area_hugepages(table);
2441 } else {
2442 /*
2443 * If bucketsize is not a power-of-two, we may free
2444 * some pages at the end of hash table which
2445 * alloc_pages_exact() automatically does
2446 */
2447 table = alloc_pages_exact(size, gfp_flags);
2448 kmemleak_alloc(table, size, 1, gfp_flags);
2449 }
2450 } while (!table && size > PAGE_SIZE && --log2qty);
2451
2452 if (!table)
2453 panic("Failed to allocate %s hash table\n", tablename);
2454
2455 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2456 tablename, 1UL << log2qty, get_order(size), size,
2457 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2458
2459 if (_hash_shift)
2460 *_hash_shift = log2qty;
2461 if (_hash_mask)
2462 *_hash_mask = (1 << log2qty) - 1;
2463
2464 return table;
2465 }
2466
memblock_free_pages(unsigned long pfn,unsigned int order)2467 void __init memblock_free_pages(unsigned long pfn, unsigned int order)
2468 {
2469 struct page *page = pfn_to_page(pfn);
2470
2471 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2472 int nid = early_pfn_to_nid(pfn);
2473
2474 if (!early_page_initialised(pfn, nid))
2475 return;
2476 }
2477
2478 if (!kmsan_memblock_free_pages(page, order)) {
2479 /* KMSAN will take care of these pages. */
2480 return;
2481 }
2482
2483 /* pages were reserved and not allocated */
2484 clear_page_tag_ref(page);
2485 __free_pages_core(page, order, MEMINIT_EARLY);
2486 }
2487
2488 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2489 EXPORT_SYMBOL(init_on_alloc);
2490
2491 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2492 EXPORT_SYMBOL(init_on_free);
2493
2494 static bool _init_on_alloc_enabled_early __read_mostly
2495 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
early_init_on_alloc(char * buf)2496 static int __init early_init_on_alloc(char *buf)
2497 {
2498
2499 return kstrtobool(buf, &_init_on_alloc_enabled_early);
2500 }
2501 early_param("init_on_alloc", early_init_on_alloc);
2502
2503 static bool _init_on_free_enabled_early __read_mostly
2504 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
early_init_on_free(char * buf)2505 static int __init early_init_on_free(char *buf)
2506 {
2507 return kstrtobool(buf, &_init_on_free_enabled_early);
2508 }
2509 early_param("init_on_free", early_init_on_free);
2510
2511 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2512
2513 static bool check_pages_enabled_early __initdata;
2514
early_check_pages(char * buf)2515 static int __init early_check_pages(char *buf)
2516 {
2517 return kstrtobool(buf, &check_pages_enabled_early);
2518 }
2519 early_param("check_pages", early_check_pages);
2520
2521 /*
2522 * Enable static keys related to various memory debugging and hardening options.
2523 * Some override others, and depend on early params that are evaluated in the
2524 * order of appearance. So we need to first gather the full picture of what was
2525 * enabled, and then make decisions.
2526 */
mem_debugging_and_hardening_init(void)2527 static void __init mem_debugging_and_hardening_init(void)
2528 {
2529 bool page_poisoning_requested = false;
2530 bool want_check_pages = check_pages_enabled_early;
2531
2532 #ifdef CONFIG_PAGE_POISONING
2533 /*
2534 * Page poisoning is debug page alloc for some arches. If
2535 * either of those options are enabled, enable poisoning.
2536 */
2537 if (page_poisoning_enabled() ||
2538 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2539 debug_pagealloc_enabled())) {
2540 static_branch_enable(&_page_poisoning_enabled);
2541 page_poisoning_requested = true;
2542 want_check_pages = true;
2543 }
2544 #endif
2545
2546 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2547 page_poisoning_requested) {
2548 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2549 "will take precedence over init_on_alloc and init_on_free\n");
2550 _init_on_alloc_enabled_early = false;
2551 _init_on_free_enabled_early = false;
2552 }
2553
2554 if (_init_on_alloc_enabled_early) {
2555 want_check_pages = true;
2556 static_branch_enable(&init_on_alloc);
2557 } else {
2558 static_branch_disable(&init_on_alloc);
2559 }
2560
2561 if (_init_on_free_enabled_early) {
2562 want_check_pages = true;
2563 static_branch_enable(&init_on_free);
2564 } else {
2565 static_branch_disable(&init_on_free);
2566 }
2567
2568 if (IS_ENABLED(CONFIG_KMSAN) &&
2569 (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2570 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2571
2572 #ifdef CONFIG_DEBUG_PAGEALLOC
2573 if (debug_pagealloc_enabled()) {
2574 want_check_pages = true;
2575 static_branch_enable(&_debug_pagealloc_enabled);
2576
2577 if (debug_guardpage_minorder())
2578 static_branch_enable(&_debug_guardpage_enabled);
2579 }
2580 #endif
2581
2582 /*
2583 * Any page debugging or hardening option also enables sanity checking
2584 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2585 * enabled already.
2586 */
2587 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2588 static_branch_enable(&check_pages_enabled);
2589 }
2590
2591 /* Report memory auto-initialization states for this boot. */
report_meminit(void)2592 static void __init report_meminit(void)
2593 {
2594 const char *stack;
2595
2596 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2597 stack = "all(pattern)";
2598 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2599 stack = "all(zero)";
2600 else
2601 stack = "off";
2602
2603 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2604 stack, str_on_off(want_init_on_alloc(GFP_KERNEL)),
2605 str_on_off(want_init_on_free()));
2606 if (want_init_on_free())
2607 pr_info("mem auto-init: clearing system memory may take some time...\n");
2608 }
2609
mem_init_print_info(void)2610 static void __init mem_init_print_info(void)
2611 {
2612 unsigned long physpages, codesize, datasize, rosize, bss_size;
2613 unsigned long init_code_size, init_data_size;
2614
2615 physpages = get_num_physpages();
2616 codesize = _etext - _stext;
2617 datasize = _edata - _sdata;
2618 rosize = __end_rodata - __start_rodata;
2619 bss_size = __bss_stop - __bss_start;
2620 init_data_size = __init_end - __init_begin;
2621 init_code_size = _einittext - _sinittext;
2622
2623 /*
2624 * Detect special cases and adjust section sizes accordingly:
2625 * 1) .init.* may be embedded into .data sections
2626 * 2) .init.text.* may be out of [__init_begin, __init_end],
2627 * please refer to arch/tile/kernel/vmlinux.lds.S.
2628 * 3) .rodata.* may be embedded into .text or .data sections.
2629 */
2630 #define adj_init_size(start, end, size, pos, adj) \
2631 do { \
2632 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2633 size -= adj; \
2634 } while (0)
2635
2636 adj_init_size(__init_begin, __init_end, init_data_size,
2637 _sinittext, init_code_size);
2638 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2639 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2640 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2641 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2642
2643 #undef adj_init_size
2644
2645 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2646 #ifdef CONFIG_HIGHMEM
2647 ", %luK highmem"
2648 #endif
2649 ")\n",
2650 K(nr_free_pages()), K(physpages),
2651 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2652 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2653 K(physpages - totalram_pages() - totalcma_pages),
2654 K(totalcma_pages)
2655 #ifdef CONFIG_HIGHMEM
2656 , K(totalhigh_pages())
2657 #endif
2658 );
2659 }
2660
2661 #ifndef __HAVE_COLOR_ZERO_PAGE
2662 /*
2663 * architectures that __HAVE_COLOR_ZERO_PAGE must define this function
2664 */
arch_setup_zero_pages(void)2665 void __init __weak arch_setup_zero_pages(void)
2666 {
2667 __zero_page = virt_to_page(empty_zero_page);
2668 }
2669 #endif
2670
init_zero_page_pfn(void)2671 static void __init init_zero_page_pfn(void)
2672 {
2673 arch_setup_zero_pages();
2674 zero_page_pfn = page_to_pfn(ZERO_PAGE(0));
2675 }
2676
arch_mm_preinit(void)2677 void __init __weak arch_mm_preinit(void)
2678 {
2679 }
2680
mem_init(void)2681 void __init __weak mem_init(void)
2682 {
2683 }
2684
mm_core_init_early(void)2685 void __init mm_core_init_early(void)
2686 {
2687 hugetlb_cma_reserve();
2688 hugetlb_bootmem_alloc();
2689
2690 free_area_init();
2691 }
2692
2693 /*
2694 * Set up kernel memory allocators
2695 */
mm_core_init(void)2696 void __init mm_core_init(void)
2697 {
2698 arch_mm_preinit();
2699 init_zero_page_pfn();
2700
2701 /* Initializations relying on SMP setup */
2702 BUILD_BUG_ON(MAX_ZONELISTS > 2);
2703 build_all_zonelists(NULL);
2704 page_alloc_init_cpuhp();
2705 alloc_tag_sec_init();
2706 /*
2707 * page_ext requires contiguous pages,
2708 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2709 */
2710 page_ext_init_flatmem();
2711 mem_debugging_and_hardening_init();
2712 kfence_alloc_pool_and_metadata();
2713 report_meminit();
2714 kmsan_init_shadow();
2715 stack_depot_early_init();
2716
2717 /*
2718 * KHO memory setup must happen while memblock is still active, but
2719 * as close as possible to buddy initialization
2720 */
2721 kho_memory_init();
2722
2723 memblock_free_all();
2724 mem_init();
2725 kmem_cache_init();
2726 /*
2727 * page_owner must be initialized after buddy is ready, and also after
2728 * slab is ready so that stack_depot_init() works properly
2729 */
2730 page_ext_init_flatmem_late();
2731 kmemleak_init();
2732 ptlock_cache_init();
2733 pgtable_cache_init();
2734 debug_objects_mem_init();
2735 vmalloc_init();
2736 /* If no deferred init page_ext now, as vmap is fully initialized */
2737 if (!deferred_struct_pages)
2738 page_ext_init();
2739 /* Should be run before the first non-init thread is created */
2740 init_espfix_bsp();
2741 /* Should be run after espfix64 is set up. */
2742 pti_init();
2743 kmsan_init_runtime();
2744 mm_cache_init();
2745 execmem_init();
2746 }
2747