xref: /linux/mm/mm_init.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26b74ab97SMel Gorman /*
36b74ab97SMel Gorman  * mm_init.c - Memory initialisation verification and debugging
46b74ab97SMel Gorman  *
56b74ab97SMel Gorman  * Copyright 2008 IBM Corporation, 2008
66b74ab97SMel Gorman  * Author Mel Gorman <mel@csn.ul.ie>
76b74ab97SMel Gorman  *
86b74ab97SMel Gorman  */
96b74ab97SMel Gorman #include <linux/kernel.h>
106b74ab97SMel Gorman #include <linux/init.h>
11ff7ea79cSNishanth Aravamudan #include <linux/kobject.h>
12b95f1b31SPaul Gortmaker #include <linux/export.h>
13917d9290STim Chen #include <linux/memory.h>
14917d9290STim Chen #include <linux/notifier.h>
157e18adb4SMel Gorman #include <linux/sched.h>
1656f3547bSFeng Tang #include <linux/mman.h>
179420f89dSMike Rapoport (IBM) #include <linux/memblock.h>
189420f89dSMike Rapoport (IBM) #include <linux/page-isolation.h>
199420f89dSMike Rapoport (IBM) #include <linux/padata.h>
209420f89dSMike Rapoport (IBM) #include <linux/nmi.h>
219420f89dSMike Rapoport (IBM) #include <linux/buffer_head.h>
229420f89dSMike Rapoport (IBM) #include <linux/kmemleak.h>
23b7ec1bf3SMike Rapoport (IBM) #include <linux/kfence.h>
24b7ec1bf3SMike Rapoport (IBM) #include <linux/page_ext.h>
25b7ec1bf3SMike Rapoport (IBM) #include <linux/pti.h>
26b7ec1bf3SMike Rapoport (IBM) #include <linux/pgtable.h>
27eb8589b4SMike Rapoport (IBM) #include <linux/swap.h>
28eb8589b4SMike Rapoport (IBM) #include <linux/cma.h>
297ea6ec4cSMa Wupeng #include <linux/crash_dump.h>
30708614e6SMel Gorman #include "internal.h"
31d5d2c02aSMike Rapoport (IBM) #include "slab.h"
329420f89dSMike Rapoport (IBM) #include "shuffle.h"
336b74ab97SMel Gorman 
34b7ec1bf3SMike Rapoport (IBM) #include <asm/setup.h>
35b7ec1bf3SMike Rapoport (IBM) 
365e9426abSNishanth Aravamudan #ifdef CONFIG_DEBUG_MEMORY_INIT
37194e8151SRasmus Villemoes int __meminitdata mminit_loglevel;
386b74ab97SMel Gorman 
3968ad8df4SMel Gorman /* The zonelists are simply reported, validation is manual. */
400e2342c7SRasmus Villemoes void __init mminit_verify_zonelist(void)
4168ad8df4SMel Gorman {
4268ad8df4SMel Gorman 	int nid;
4368ad8df4SMel Gorman 
4468ad8df4SMel Gorman 	if (mminit_loglevel < MMINIT_VERIFY)
4568ad8df4SMel Gorman 		return;
4668ad8df4SMel Gorman 
4768ad8df4SMel Gorman 	for_each_online_node(nid) {
4868ad8df4SMel Gorman 		pg_data_t *pgdat = NODE_DATA(nid);
4968ad8df4SMel Gorman 		struct zone *zone;
5068ad8df4SMel Gorman 		struct zoneref *z;
5168ad8df4SMel Gorman 		struct zonelist *zonelist;
5268ad8df4SMel Gorman 		int i, listid, zoneid;
5368ad8df4SMel Gorman 
54e46b893dSMateusz Nosek 		BUILD_BUG_ON(MAX_ZONELISTS > 2);
5568ad8df4SMel Gorman 		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
5668ad8df4SMel Gorman 
5768ad8df4SMel Gorman 			/* Identify the zone and nodelist */
5868ad8df4SMel Gorman 			zoneid = i % MAX_NR_ZONES;
5968ad8df4SMel Gorman 			listid = i / MAX_NR_ZONES;
6068ad8df4SMel Gorman 			zonelist = &pgdat->node_zonelists[listid];
6168ad8df4SMel Gorman 			zone = &pgdat->node_zones[zoneid];
6268ad8df4SMel Gorman 			if (!populated_zone(zone))
6368ad8df4SMel Gorman 				continue;
6468ad8df4SMel Gorman 
6568ad8df4SMel Gorman 			/* Print information about the zonelist */
6668ad8df4SMel Gorman 			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
6768ad8df4SMel Gorman 				listid > 0 ? "thisnode" : "general", nid,
6868ad8df4SMel Gorman 				zone->name);
6968ad8df4SMel Gorman 
7068ad8df4SMel Gorman 			/* Iterate the zonelist */
71c1093b74SPavel Tatashin 			for_each_zone_zonelist(zone, z, zonelist, zoneid)
72c1093b74SPavel Tatashin 				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
731170532bSJoe Perches 			pr_cont("\n");
7468ad8df4SMel Gorman 		}
7568ad8df4SMel Gorman 	}
7668ad8df4SMel Gorman }
7768ad8df4SMel Gorman 
78708614e6SMel Gorman void __init mminit_verify_pageflags_layout(void)
79708614e6SMel Gorman {
80708614e6SMel Gorman 	int shift, width;
81708614e6SMel Gorman 	unsigned long or_mask, add_mask;
82708614e6SMel Gorman 
83daee07bfSMiaohe Lin 	shift = BITS_PER_LONG;
8486fea8b4SJing Xia 	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
85ec1c86b2SYu Zhao 		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
86708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
87ec1c86b2SYu Zhao 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
88708614e6SMel Gorman 		SECTIONS_WIDTH,
89708614e6SMel Gorman 		NODES_WIDTH,
90708614e6SMel Gorman 		ZONES_WIDTH,
9190572890SPeter Zijlstra 		LAST_CPUPID_WIDTH,
9286fea8b4SJing Xia 		KASAN_TAG_WIDTH,
93ec1c86b2SYu Zhao 		LRU_GEN_WIDTH,
94ec1c86b2SYu Zhao 		LRU_REFS_WIDTH,
95708614e6SMel Gorman 		NR_PAGEFLAGS);
96708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
9786fea8b4SJing Xia 		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
98708614e6SMel Gorman 		SECTIONS_SHIFT,
99708614e6SMel Gorman 		NODES_SHIFT,
100a4e1b4c6SMel Gorman 		ZONES_SHIFT,
10186fea8b4SJing Xia 		LAST_CPUPID_SHIFT,
10286fea8b4SJing Xia 		KASAN_TAG_WIDTH);
103a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
10486fea8b4SJing Xia 		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
105708614e6SMel Gorman 		(unsigned long)SECTIONS_PGSHIFT,
106708614e6SMel Gorman 		(unsigned long)NODES_PGSHIFT,
107a4e1b4c6SMel Gorman 		(unsigned long)ZONES_PGSHIFT,
10886fea8b4SJing Xia 		(unsigned long)LAST_CPUPID_PGSHIFT,
10986fea8b4SJing Xia 		(unsigned long)KASAN_TAG_PGSHIFT);
110a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
111a4e1b4c6SMel Gorman 		"Node/Zone ID: %lu -> %lu\n",
112a4e1b4c6SMel Gorman 		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
113a4e1b4c6SMel Gorman 		(unsigned long)ZONEID_PGOFF);
114708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
115a4e1b4c6SMel Gorman 		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
116708614e6SMel Gorman 		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
117708614e6SMel Gorman #ifdef NODE_NOT_IN_PAGE_FLAGS
118708614e6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
119708614e6SMel Gorman 		"Node not in page flags");
120708614e6SMel Gorman #endif
12190572890SPeter Zijlstra #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
122a4e1b4c6SMel Gorman 	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
12390572890SPeter Zijlstra 		"Last cpupid not in page flags");
124a4e1b4c6SMel Gorman #endif
125708614e6SMel Gorman 
126708614e6SMel Gorman 	if (SECTIONS_WIDTH) {
127708614e6SMel Gorman 		shift -= SECTIONS_WIDTH;
128708614e6SMel Gorman 		BUG_ON(shift != SECTIONS_PGSHIFT);
129708614e6SMel Gorman 	}
130708614e6SMel Gorman 	if (NODES_WIDTH) {
131708614e6SMel Gorman 		shift -= NODES_WIDTH;
132708614e6SMel Gorman 		BUG_ON(shift != NODES_PGSHIFT);
133708614e6SMel Gorman 	}
134708614e6SMel Gorman 	if (ZONES_WIDTH) {
135708614e6SMel Gorman 		shift -= ZONES_WIDTH;
136708614e6SMel Gorman 		BUG_ON(shift != ZONES_PGSHIFT);
137708614e6SMel Gorman 	}
138708614e6SMel Gorman 
139708614e6SMel Gorman 	/* Check for bitmask overlaps */
140708614e6SMel Gorman 	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
141708614e6SMel Gorman 			(NODES_MASK << NODES_PGSHIFT) |
142708614e6SMel Gorman 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
143708614e6SMel Gorman 	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
144708614e6SMel Gorman 			(NODES_MASK << NODES_PGSHIFT) +
145708614e6SMel Gorman 			(SECTIONS_MASK << SECTIONS_PGSHIFT);
146708614e6SMel Gorman 	BUG_ON(or_mask != add_mask);
147708614e6SMel Gorman }
148708614e6SMel Gorman 
1496b74ab97SMel Gorman static __init int set_mminit_loglevel(char *str)
1506b74ab97SMel Gorman {
1516b74ab97SMel Gorman 	get_option(&str, &mminit_loglevel);
1526b74ab97SMel Gorman 	return 0;
1536b74ab97SMel Gorman }
1546b74ab97SMel Gorman early_param("mminit_loglevel", set_mminit_loglevel);
1555e9426abSNishanth Aravamudan #endif /* CONFIG_DEBUG_MEMORY_INIT */
156ff7ea79cSNishanth Aravamudan 
157ff7ea79cSNishanth Aravamudan struct kobject *mm_kobj;
158ff7ea79cSNishanth Aravamudan 
159917d9290STim Chen #ifdef CONFIG_SMP
160917d9290STim Chen s32 vm_committed_as_batch = 32;
161917d9290STim Chen 
16256f3547bSFeng Tang void mm_compute_batch(int overcommit_policy)
163917d9290STim Chen {
164917d9290STim Chen 	u64 memsized_batch;
165917d9290STim Chen 	s32 nr = num_present_cpus();
166917d9290STim Chen 	s32 batch = max_t(s32, nr*2, 32);
16756f3547bSFeng Tang 	unsigned long ram_pages = totalram_pages();
168917d9290STim Chen 
16956f3547bSFeng Tang 	/*
17056f3547bSFeng Tang 	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
17156f3547bSFeng Tang 	 * (total memory/#cpus), and lift it to 25% for other policies
17256f3547bSFeng Tang 	 * to easy the possible lock contention for percpu_counter
17356f3547bSFeng Tang 	 * vm_committed_as, while the max limit is INT_MAX
17456f3547bSFeng Tang 	 */
17556f3547bSFeng Tang 	if (overcommit_policy == OVERCOMMIT_NEVER)
17656f3547bSFeng Tang 		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
17756f3547bSFeng Tang 	else
17856f3547bSFeng Tang 		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179917d9290STim Chen 
180917d9290STim Chen 	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181917d9290STim Chen }
182917d9290STim Chen 
183917d9290STim Chen static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184917d9290STim Chen 					unsigned long action, void *arg)
185917d9290STim Chen {
186917d9290STim Chen 	switch (action) {
187917d9290STim Chen 	case MEM_ONLINE:
188917d9290STim Chen 	case MEM_OFFLINE:
18956f3547bSFeng Tang 		mm_compute_batch(sysctl_overcommit_memory);
19001359eb2SGustavo A. R. Silva 		break;
191917d9290STim Chen 	default:
192917d9290STim Chen 		break;
193917d9290STim Chen 	}
194917d9290STim Chen 	return NOTIFY_OK;
195917d9290STim Chen }
196917d9290STim Chen 
197917d9290STim Chen static int __init mm_compute_batch_init(void)
198917d9290STim Chen {
19956f3547bSFeng Tang 	mm_compute_batch(sysctl_overcommit_memory);
2001eeaa4fdSLiu Shixin 	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201917d9290STim Chen 	return 0;
202917d9290STim Chen }
203917d9290STim Chen 
204917d9290STim Chen __initcall(mm_compute_batch_init);
205917d9290STim Chen 
206917d9290STim Chen #endif
207917d9290STim Chen 
208ff7ea79cSNishanth Aravamudan static int __init mm_sysfs_init(void)
209ff7ea79cSNishanth Aravamudan {
210ff7ea79cSNishanth Aravamudan 	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211ff7ea79cSNishanth Aravamudan 	if (!mm_kobj)
212ff7ea79cSNishanth Aravamudan 		return -ENOMEM;
213ff7ea79cSNishanth Aravamudan 
214ff7ea79cSNishanth Aravamudan 	return 0;
215ff7ea79cSNishanth Aravamudan }
216e82cb95dSHugh Dickins postcore_initcall(mm_sysfs_init);
2179420f89dSMike Rapoport (IBM) 
2189420f89dSMike Rapoport (IBM) static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
2199420f89dSMike Rapoport (IBM) static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
2209420f89dSMike Rapoport (IBM) static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
2219420f89dSMike Rapoport (IBM) 
2229420f89dSMike Rapoport (IBM) static unsigned long required_kernelcore __initdata;
2239420f89dSMike Rapoport (IBM) static unsigned long required_kernelcore_percent __initdata;
2249420f89dSMike Rapoport (IBM) static unsigned long required_movablecore __initdata;
2259420f89dSMike Rapoport (IBM) static unsigned long required_movablecore_percent __initdata;
2269420f89dSMike Rapoport (IBM) 
2279420f89dSMike Rapoport (IBM) static unsigned long nr_kernel_pages __initdata;
2289420f89dSMike Rapoport (IBM) static unsigned long nr_all_pages __initdata;
2299420f89dSMike Rapoport (IBM) static unsigned long dma_reserve __initdata;
2309420f89dSMike Rapoport (IBM) 
231de57807eSMike Rapoport (IBM) static bool deferred_struct_pages __meminitdata;
2329420f89dSMike Rapoport (IBM) 
2339420f89dSMike Rapoport (IBM) static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
2349420f89dSMike Rapoport (IBM) 
2359420f89dSMike Rapoport (IBM) static int __init cmdline_parse_core(char *p, unsigned long *core,
2369420f89dSMike Rapoport (IBM) 				     unsigned long *percent)
2379420f89dSMike Rapoport (IBM) {
2389420f89dSMike Rapoport (IBM) 	unsigned long long coremem;
2399420f89dSMike Rapoport (IBM) 	char *endptr;
2409420f89dSMike Rapoport (IBM) 
2419420f89dSMike Rapoport (IBM) 	if (!p)
2429420f89dSMike Rapoport (IBM) 		return -EINVAL;
2439420f89dSMike Rapoport (IBM) 
2449420f89dSMike Rapoport (IBM) 	/* Value may be a percentage of total memory, otherwise bytes */
2459420f89dSMike Rapoport (IBM) 	coremem = simple_strtoull(p, &endptr, 0);
2469420f89dSMike Rapoport (IBM) 	if (*endptr == '%') {
2479420f89dSMike Rapoport (IBM) 		/* Paranoid check for percent values greater than 100 */
2489420f89dSMike Rapoport (IBM) 		WARN_ON(coremem > 100);
2499420f89dSMike Rapoport (IBM) 
2509420f89dSMike Rapoport (IBM) 		*percent = coremem;
2519420f89dSMike Rapoport (IBM) 	} else {
2529420f89dSMike Rapoport (IBM) 		coremem = memparse(p, &p);
2539420f89dSMike Rapoport (IBM) 		/* Paranoid check that UL is enough for the coremem value */
2549420f89dSMike Rapoport (IBM) 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
2559420f89dSMike Rapoport (IBM) 
2569420f89dSMike Rapoport (IBM) 		*core = coremem >> PAGE_SHIFT;
2579420f89dSMike Rapoport (IBM) 		*percent = 0UL;
2589420f89dSMike Rapoport (IBM) 	}
2599420f89dSMike Rapoport (IBM) 	return 0;
2609420f89dSMike Rapoport (IBM) }
2619420f89dSMike Rapoport (IBM) 
262072ba380SKefeng Wang bool mirrored_kernelcore __initdata_memblock;
263072ba380SKefeng Wang 
2649420f89dSMike Rapoport (IBM) /*
2659420f89dSMike Rapoport (IBM)  * kernelcore=size sets the amount of memory for use for allocations that
2669420f89dSMike Rapoport (IBM)  * cannot be reclaimed or migrated.
2679420f89dSMike Rapoport (IBM)  */
2689420f89dSMike Rapoport (IBM) static int __init cmdline_parse_kernelcore(char *p)
2699420f89dSMike Rapoport (IBM) {
2709420f89dSMike Rapoport (IBM) 	/* parse kernelcore=mirror */
2719420f89dSMike Rapoport (IBM) 	if (parse_option_str(p, "mirror")) {
2729420f89dSMike Rapoport (IBM) 		mirrored_kernelcore = true;
2739420f89dSMike Rapoport (IBM) 		return 0;
2749420f89dSMike Rapoport (IBM) 	}
2759420f89dSMike Rapoport (IBM) 
2769420f89dSMike Rapoport (IBM) 	return cmdline_parse_core(p, &required_kernelcore,
2779420f89dSMike Rapoport (IBM) 				  &required_kernelcore_percent);
2789420f89dSMike Rapoport (IBM) }
2799420f89dSMike Rapoport (IBM) early_param("kernelcore", cmdline_parse_kernelcore);
2809420f89dSMike Rapoport (IBM) 
2819420f89dSMike Rapoport (IBM) /*
2829420f89dSMike Rapoport (IBM)  * movablecore=size sets the amount of memory for use for allocations that
2839420f89dSMike Rapoport (IBM)  * can be reclaimed or migrated.
2849420f89dSMike Rapoport (IBM)  */
2859420f89dSMike Rapoport (IBM) static int __init cmdline_parse_movablecore(char *p)
2869420f89dSMike Rapoport (IBM) {
2879420f89dSMike Rapoport (IBM) 	return cmdline_parse_core(p, &required_movablecore,
2889420f89dSMike Rapoport (IBM) 				  &required_movablecore_percent);
2899420f89dSMike Rapoport (IBM) }
2909420f89dSMike Rapoport (IBM) early_param("movablecore", cmdline_parse_movablecore);
2919420f89dSMike Rapoport (IBM) 
2929420f89dSMike Rapoport (IBM) /*
2939420f89dSMike Rapoport (IBM)  * early_calculate_totalpages()
2949420f89dSMike Rapoport (IBM)  * Sum pages in active regions for movable zone.
2959420f89dSMike Rapoport (IBM)  * Populate N_MEMORY for calculating usable_nodes.
2969420f89dSMike Rapoport (IBM)  */
2979420f89dSMike Rapoport (IBM) static unsigned long __init early_calculate_totalpages(void)
2989420f89dSMike Rapoport (IBM) {
2999420f89dSMike Rapoport (IBM) 	unsigned long totalpages = 0;
3009420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
3019420f89dSMike Rapoport (IBM) 	int i, nid;
3029420f89dSMike Rapoport (IBM) 
3039420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
3049420f89dSMike Rapoport (IBM) 		unsigned long pages = end_pfn - start_pfn;
3059420f89dSMike Rapoport (IBM) 
3069420f89dSMike Rapoport (IBM) 		totalpages += pages;
3079420f89dSMike Rapoport (IBM) 		if (pages)
3089420f89dSMike Rapoport (IBM) 			node_set_state(nid, N_MEMORY);
3099420f89dSMike Rapoport (IBM) 	}
3109420f89dSMike Rapoport (IBM) 	return totalpages;
3119420f89dSMike Rapoport (IBM) }
3129420f89dSMike Rapoport (IBM) 
3139420f89dSMike Rapoport (IBM) /*
3149420f89dSMike Rapoport (IBM)  * This finds a zone that can be used for ZONE_MOVABLE pages. The
3159420f89dSMike Rapoport (IBM)  * assumption is made that zones within a node are ordered in monotonic
3169420f89dSMike Rapoport (IBM)  * increasing memory addresses so that the "highest" populated zone is used
3179420f89dSMike Rapoport (IBM)  */
3189420f89dSMike Rapoport (IBM) static void __init find_usable_zone_for_movable(void)
3199420f89dSMike Rapoport (IBM) {
3209420f89dSMike Rapoport (IBM) 	int zone_index;
3219420f89dSMike Rapoport (IBM) 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3229420f89dSMike Rapoport (IBM) 		if (zone_index == ZONE_MOVABLE)
3239420f89dSMike Rapoport (IBM) 			continue;
3249420f89dSMike Rapoport (IBM) 
3259420f89dSMike Rapoport (IBM) 		if (arch_zone_highest_possible_pfn[zone_index] >
3269420f89dSMike Rapoport (IBM) 				arch_zone_lowest_possible_pfn[zone_index])
3279420f89dSMike Rapoport (IBM) 			break;
3289420f89dSMike Rapoport (IBM) 	}
3299420f89dSMike Rapoport (IBM) 
3309420f89dSMike Rapoport (IBM) 	VM_BUG_ON(zone_index == -1);
3319420f89dSMike Rapoport (IBM) 	movable_zone = zone_index;
3329420f89dSMike Rapoport (IBM) }
3339420f89dSMike Rapoport (IBM) 
3349420f89dSMike Rapoport (IBM) /*
3359420f89dSMike Rapoport (IBM)  * Find the PFN the Movable zone begins in each node. Kernel memory
3369420f89dSMike Rapoport (IBM)  * is spread evenly between nodes as long as the nodes have enough
3379420f89dSMike Rapoport (IBM)  * memory. When they don't, some nodes will have more kernelcore than
3389420f89dSMike Rapoport (IBM)  * others
3399420f89dSMike Rapoport (IBM)  */
3409420f89dSMike Rapoport (IBM) static void __init find_zone_movable_pfns_for_nodes(void)
3419420f89dSMike Rapoport (IBM) {
3429420f89dSMike Rapoport (IBM) 	int i, nid;
3439420f89dSMike Rapoport (IBM) 	unsigned long usable_startpfn;
3449420f89dSMike Rapoport (IBM) 	unsigned long kernelcore_node, kernelcore_remaining;
3459420f89dSMike Rapoport (IBM) 	/* save the state before borrow the nodemask */
3469420f89dSMike Rapoport (IBM) 	nodemask_t saved_node_state = node_states[N_MEMORY];
3479420f89dSMike Rapoport (IBM) 	unsigned long totalpages = early_calculate_totalpages();
3489420f89dSMike Rapoport (IBM) 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
3499420f89dSMike Rapoport (IBM) 	struct memblock_region *r;
3509420f89dSMike Rapoport (IBM) 
3519420f89dSMike Rapoport (IBM) 	/* Need to find movable_zone earlier when movable_node is specified. */
3529420f89dSMike Rapoport (IBM) 	find_usable_zone_for_movable();
3539420f89dSMike Rapoport (IBM) 
3549420f89dSMike Rapoport (IBM) 	/*
3559420f89dSMike Rapoport (IBM) 	 * If movable_node is specified, ignore kernelcore and movablecore
3569420f89dSMike Rapoport (IBM) 	 * options.
3579420f89dSMike Rapoport (IBM) 	 */
3589420f89dSMike Rapoport (IBM) 	if (movable_node_is_enabled()) {
3599420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
3609420f89dSMike Rapoport (IBM) 			if (!memblock_is_hotpluggable(r))
3619420f89dSMike Rapoport (IBM) 				continue;
3629420f89dSMike Rapoport (IBM) 
3639420f89dSMike Rapoport (IBM) 			nid = memblock_get_region_node(r);
3649420f89dSMike Rapoport (IBM) 
3659420f89dSMike Rapoport (IBM) 			usable_startpfn = PFN_DOWN(r->base);
3669420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
3679420f89dSMike Rapoport (IBM) 				min(usable_startpfn, zone_movable_pfn[nid]) :
3689420f89dSMike Rapoport (IBM) 				usable_startpfn;
3699420f89dSMike Rapoport (IBM) 		}
3709420f89dSMike Rapoport (IBM) 
3719420f89dSMike Rapoport (IBM) 		goto out2;
3729420f89dSMike Rapoport (IBM) 	}
3739420f89dSMike Rapoport (IBM) 
3749420f89dSMike Rapoport (IBM) 	/*
3759420f89dSMike Rapoport (IBM) 	 * If kernelcore=mirror is specified, ignore movablecore option
3769420f89dSMike Rapoport (IBM) 	 */
3779420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore) {
3789420f89dSMike Rapoport (IBM) 		bool mem_below_4gb_not_mirrored = false;
3799420f89dSMike Rapoport (IBM) 
3800db31d63SMa Wupeng 		if (!memblock_has_mirror()) {
3810db31d63SMa Wupeng 			pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
3820db31d63SMa Wupeng 			goto out;
3830db31d63SMa Wupeng 		}
3840db31d63SMa Wupeng 
3857ea6ec4cSMa Wupeng 		if (is_kdump_kernel()) {
3867ea6ec4cSMa Wupeng 			pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
3877ea6ec4cSMa Wupeng 			goto out;
3887ea6ec4cSMa Wupeng 		}
3897ea6ec4cSMa Wupeng 
3909420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
3919420f89dSMike Rapoport (IBM) 			if (memblock_is_mirror(r))
3929420f89dSMike Rapoport (IBM) 				continue;
3939420f89dSMike Rapoport (IBM) 
3949420f89dSMike Rapoport (IBM) 			nid = memblock_get_region_node(r);
3959420f89dSMike Rapoport (IBM) 
3969420f89dSMike Rapoport (IBM) 			usable_startpfn = memblock_region_memory_base_pfn(r);
3979420f89dSMike Rapoport (IBM) 
3989420f89dSMike Rapoport (IBM) 			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
3999420f89dSMike Rapoport (IBM) 				mem_below_4gb_not_mirrored = true;
4009420f89dSMike Rapoport (IBM) 				continue;
4019420f89dSMike Rapoport (IBM) 			}
4029420f89dSMike Rapoport (IBM) 
4039420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
4049420f89dSMike Rapoport (IBM) 				min(usable_startpfn, zone_movable_pfn[nid]) :
4059420f89dSMike Rapoport (IBM) 				usable_startpfn;
4069420f89dSMike Rapoport (IBM) 		}
4079420f89dSMike Rapoport (IBM) 
4089420f89dSMike Rapoport (IBM) 		if (mem_below_4gb_not_mirrored)
4099420f89dSMike Rapoport (IBM) 			pr_warn("This configuration results in unmirrored kernel memory.\n");
4109420f89dSMike Rapoport (IBM) 
4119420f89dSMike Rapoport (IBM) 		goto out2;
4129420f89dSMike Rapoport (IBM) 	}
4139420f89dSMike Rapoport (IBM) 
4149420f89dSMike Rapoport (IBM) 	/*
4159420f89dSMike Rapoport (IBM) 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
4169420f89dSMike Rapoport (IBM) 	 * amount of necessary memory.
4179420f89dSMike Rapoport (IBM) 	 */
4189420f89dSMike Rapoport (IBM) 	if (required_kernelcore_percent)
4199420f89dSMike Rapoport (IBM) 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
4209420f89dSMike Rapoport (IBM) 				       10000UL;
4219420f89dSMike Rapoport (IBM) 	if (required_movablecore_percent)
4229420f89dSMike Rapoport (IBM) 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
4239420f89dSMike Rapoport (IBM) 					10000UL;
4249420f89dSMike Rapoport (IBM) 
4259420f89dSMike Rapoport (IBM) 	/*
4269420f89dSMike Rapoport (IBM) 	 * If movablecore= was specified, calculate what size of
4279420f89dSMike Rapoport (IBM) 	 * kernelcore that corresponds so that memory usable for
4289420f89dSMike Rapoport (IBM) 	 * any allocation type is evenly spread. If both kernelcore
4299420f89dSMike Rapoport (IBM) 	 * and movablecore are specified, then the value of kernelcore
4309420f89dSMike Rapoport (IBM) 	 * will be used for required_kernelcore if it's greater than
4319420f89dSMike Rapoport (IBM) 	 * what movablecore would have allowed.
4329420f89dSMike Rapoport (IBM) 	 */
4339420f89dSMike Rapoport (IBM) 	if (required_movablecore) {
4349420f89dSMike Rapoport (IBM) 		unsigned long corepages;
4359420f89dSMike Rapoport (IBM) 
4369420f89dSMike Rapoport (IBM) 		/*
4379420f89dSMike Rapoport (IBM) 		 * Round-up so that ZONE_MOVABLE is at least as large as what
4389420f89dSMike Rapoport (IBM) 		 * was requested by the user
4399420f89dSMike Rapoport (IBM) 		 */
4409420f89dSMike Rapoport (IBM) 		required_movablecore =
4419420f89dSMike Rapoport (IBM) 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4429420f89dSMike Rapoport (IBM) 		required_movablecore = min(totalpages, required_movablecore);
4439420f89dSMike Rapoport (IBM) 		corepages = totalpages - required_movablecore;
4449420f89dSMike Rapoport (IBM) 
4459420f89dSMike Rapoport (IBM) 		required_kernelcore = max(required_kernelcore, corepages);
4469420f89dSMike Rapoport (IBM) 	}
4479420f89dSMike Rapoport (IBM) 
4489420f89dSMike Rapoport (IBM) 	/*
4499420f89dSMike Rapoport (IBM) 	 * If kernelcore was not specified or kernelcore size is larger
4509420f89dSMike Rapoport (IBM) 	 * than totalpages, there is no ZONE_MOVABLE.
4519420f89dSMike Rapoport (IBM) 	 */
4529420f89dSMike Rapoport (IBM) 	if (!required_kernelcore || required_kernelcore >= totalpages)
4539420f89dSMike Rapoport (IBM) 		goto out;
4549420f89dSMike Rapoport (IBM) 
4559420f89dSMike Rapoport (IBM) 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4569420f89dSMike Rapoport (IBM) 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4579420f89dSMike Rapoport (IBM) 
4589420f89dSMike Rapoport (IBM) restart:
4599420f89dSMike Rapoport (IBM) 	/* Spread kernelcore memory as evenly as possible throughout nodes */
4609420f89dSMike Rapoport (IBM) 	kernelcore_node = required_kernelcore / usable_nodes;
4619420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY) {
4629420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
4639420f89dSMike Rapoport (IBM) 
4649420f89dSMike Rapoport (IBM) 		/*
4659420f89dSMike Rapoport (IBM) 		 * Recalculate kernelcore_node if the division per node
4669420f89dSMike Rapoport (IBM) 		 * now exceeds what is necessary to satisfy the requested
4679420f89dSMike Rapoport (IBM) 		 * amount of memory for the kernel
4689420f89dSMike Rapoport (IBM) 		 */
4699420f89dSMike Rapoport (IBM) 		if (required_kernelcore < kernelcore_node)
4709420f89dSMike Rapoport (IBM) 			kernelcore_node = required_kernelcore / usable_nodes;
4719420f89dSMike Rapoport (IBM) 
4729420f89dSMike Rapoport (IBM) 		/*
4739420f89dSMike Rapoport (IBM) 		 * As the map is walked, we track how much memory is usable
4749420f89dSMike Rapoport (IBM) 		 * by the kernel using kernelcore_remaining. When it is
4759420f89dSMike Rapoport (IBM) 		 * 0, the rest of the node is usable by ZONE_MOVABLE
4769420f89dSMike Rapoport (IBM) 		 */
4779420f89dSMike Rapoport (IBM) 		kernelcore_remaining = kernelcore_node;
4789420f89dSMike Rapoport (IBM) 
4799420f89dSMike Rapoport (IBM) 		/* Go through each range of PFNs within this node */
4809420f89dSMike Rapoport (IBM) 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4819420f89dSMike Rapoport (IBM) 			unsigned long size_pages;
4829420f89dSMike Rapoport (IBM) 
4839420f89dSMike Rapoport (IBM) 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4849420f89dSMike Rapoport (IBM) 			if (start_pfn >= end_pfn)
4859420f89dSMike Rapoport (IBM) 				continue;
4869420f89dSMike Rapoport (IBM) 
4879420f89dSMike Rapoport (IBM) 			/* Account for what is only usable for kernelcore */
4889420f89dSMike Rapoport (IBM) 			if (start_pfn < usable_startpfn) {
4899420f89dSMike Rapoport (IBM) 				unsigned long kernel_pages;
4909420f89dSMike Rapoport (IBM) 				kernel_pages = min(end_pfn, usable_startpfn)
4919420f89dSMike Rapoport (IBM) 								- start_pfn;
4929420f89dSMike Rapoport (IBM) 
4939420f89dSMike Rapoport (IBM) 				kernelcore_remaining -= min(kernel_pages,
4949420f89dSMike Rapoport (IBM) 							kernelcore_remaining);
4959420f89dSMike Rapoport (IBM) 				required_kernelcore -= min(kernel_pages,
4969420f89dSMike Rapoport (IBM) 							required_kernelcore);
4979420f89dSMike Rapoport (IBM) 
4989420f89dSMike Rapoport (IBM) 				/* Continue if range is now fully accounted */
4999420f89dSMike Rapoport (IBM) 				if (end_pfn <= usable_startpfn) {
5009420f89dSMike Rapoport (IBM) 
5019420f89dSMike Rapoport (IBM) 					/*
5029420f89dSMike Rapoport (IBM) 					 * Push zone_movable_pfn to the end so
5039420f89dSMike Rapoport (IBM) 					 * that if we have to rebalance
5049420f89dSMike Rapoport (IBM) 					 * kernelcore across nodes, we will
5059420f89dSMike Rapoport (IBM) 					 * not double account here
5069420f89dSMike Rapoport (IBM) 					 */
5079420f89dSMike Rapoport (IBM) 					zone_movable_pfn[nid] = end_pfn;
5089420f89dSMike Rapoport (IBM) 					continue;
5099420f89dSMike Rapoport (IBM) 				}
5109420f89dSMike Rapoport (IBM) 				start_pfn = usable_startpfn;
5119420f89dSMike Rapoport (IBM) 			}
5129420f89dSMike Rapoport (IBM) 
5139420f89dSMike Rapoport (IBM) 			/*
5149420f89dSMike Rapoport (IBM) 			 * The usable PFN range for ZONE_MOVABLE is from
5159420f89dSMike Rapoport (IBM) 			 * start_pfn->end_pfn. Calculate size_pages as the
5169420f89dSMike Rapoport (IBM) 			 * number of pages used as kernelcore
5179420f89dSMike Rapoport (IBM) 			 */
5189420f89dSMike Rapoport (IBM) 			size_pages = end_pfn - start_pfn;
5199420f89dSMike Rapoport (IBM) 			if (size_pages > kernelcore_remaining)
5209420f89dSMike Rapoport (IBM) 				size_pages = kernelcore_remaining;
5219420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = start_pfn + size_pages;
5229420f89dSMike Rapoport (IBM) 
5239420f89dSMike Rapoport (IBM) 			/*
5249420f89dSMike Rapoport (IBM) 			 * Some kernelcore has been met, update counts and
5259420f89dSMike Rapoport (IBM) 			 * break if the kernelcore for this node has been
5269420f89dSMike Rapoport (IBM) 			 * satisfied
5279420f89dSMike Rapoport (IBM) 			 */
5289420f89dSMike Rapoport (IBM) 			required_kernelcore -= min(required_kernelcore,
5299420f89dSMike Rapoport (IBM) 								size_pages);
5309420f89dSMike Rapoport (IBM) 			kernelcore_remaining -= size_pages;
5319420f89dSMike Rapoport (IBM) 			if (!kernelcore_remaining)
5329420f89dSMike Rapoport (IBM) 				break;
5339420f89dSMike Rapoport (IBM) 		}
5349420f89dSMike Rapoport (IBM) 	}
5359420f89dSMike Rapoport (IBM) 
5369420f89dSMike Rapoport (IBM) 	/*
5379420f89dSMike Rapoport (IBM) 	 * If there is still required_kernelcore, we do another pass with one
5389420f89dSMike Rapoport (IBM) 	 * less node in the count. This will push zone_movable_pfn[nid] further
5399420f89dSMike Rapoport (IBM) 	 * along on the nodes that still have memory until kernelcore is
5409420f89dSMike Rapoport (IBM) 	 * satisfied
5419420f89dSMike Rapoport (IBM) 	 */
5429420f89dSMike Rapoport (IBM) 	usable_nodes--;
5439420f89dSMike Rapoport (IBM) 	if (usable_nodes && required_kernelcore > usable_nodes)
5449420f89dSMike Rapoport (IBM) 		goto restart;
5459420f89dSMike Rapoport (IBM) 
5469420f89dSMike Rapoport (IBM) out2:
5479420f89dSMike Rapoport (IBM) 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5489420f89dSMike Rapoport (IBM) 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
5499420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
5509420f89dSMike Rapoport (IBM) 
5519420f89dSMike Rapoport (IBM) 		zone_movable_pfn[nid] =
5529420f89dSMike Rapoport (IBM) 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5539420f89dSMike Rapoport (IBM) 
5549420f89dSMike Rapoport (IBM) 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5559420f89dSMike Rapoport (IBM) 		if (zone_movable_pfn[nid] >= end_pfn)
5569420f89dSMike Rapoport (IBM) 			zone_movable_pfn[nid] = 0;
5579420f89dSMike Rapoport (IBM) 	}
5589420f89dSMike Rapoport (IBM) 
5599420f89dSMike Rapoport (IBM) out:
5609420f89dSMike Rapoport (IBM) 	/* restore the node_state */
5619420f89dSMike Rapoport (IBM) 	node_states[N_MEMORY] = saved_node_state;
5629420f89dSMike Rapoport (IBM) }
5639420f89dSMike Rapoport (IBM) 
564fde1c4ecSUsama Arif void __meminit __init_single_page(struct page *page, unsigned long pfn,
5659420f89dSMike Rapoport (IBM) 				unsigned long zone, int nid)
5669420f89dSMike Rapoport (IBM) {
5679420f89dSMike Rapoport (IBM) 	mm_zero_struct_page(page);
5689420f89dSMike Rapoport (IBM) 	set_page_links(page, zone, nid, pfn);
5699420f89dSMike Rapoport (IBM) 	init_page_count(page);
5709420f89dSMike Rapoport (IBM) 	page_mapcount_reset(page);
5719420f89dSMike Rapoport (IBM) 	page_cpupid_reset_last(page);
5729420f89dSMike Rapoport (IBM) 	page_kasan_tag_reset(page);
5739420f89dSMike Rapoport (IBM) 
5749420f89dSMike Rapoport (IBM) 	INIT_LIST_HEAD(&page->lru);
5759420f89dSMike Rapoport (IBM) #ifdef WANT_PAGE_VIRTUAL
5769420f89dSMike Rapoport (IBM) 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
5779420f89dSMike Rapoport (IBM) 	if (!is_highmem_idx(zone))
5789420f89dSMike Rapoport (IBM) 		set_page_address(page, __va(pfn << PAGE_SHIFT));
5799420f89dSMike Rapoport (IBM) #endif
5809420f89dSMike Rapoport (IBM) }
5819420f89dSMike Rapoport (IBM) 
5829420f89dSMike Rapoport (IBM) #ifdef CONFIG_NUMA
5839420f89dSMike Rapoport (IBM) /*
5849420f89dSMike Rapoport (IBM)  * During memory init memblocks map pfns to nids. The search is expensive and
5859420f89dSMike Rapoport (IBM)  * this caches recent lookups. The implementation of __early_pfn_to_nid
5869420f89dSMike Rapoport (IBM)  * treats start/end as pfns.
5879420f89dSMike Rapoport (IBM)  */
5889420f89dSMike Rapoport (IBM) struct mminit_pfnnid_cache {
5899420f89dSMike Rapoport (IBM) 	unsigned long last_start;
5909420f89dSMike Rapoport (IBM) 	unsigned long last_end;
5919420f89dSMike Rapoport (IBM) 	int last_nid;
5929420f89dSMike Rapoport (IBM) };
5939420f89dSMike Rapoport (IBM) 
5949420f89dSMike Rapoport (IBM) static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
5959420f89dSMike Rapoport (IBM) 
5969420f89dSMike Rapoport (IBM) /*
5979420f89dSMike Rapoport (IBM)  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
5989420f89dSMike Rapoport (IBM)  */
5999420f89dSMike Rapoport (IBM) static int __meminit __early_pfn_to_nid(unsigned long pfn,
6009420f89dSMike Rapoport (IBM) 					struct mminit_pfnnid_cache *state)
6019420f89dSMike Rapoport (IBM) {
6029420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
6039420f89dSMike Rapoport (IBM) 	int nid;
6049420f89dSMike Rapoport (IBM) 
6059420f89dSMike Rapoport (IBM) 	if (state->last_start <= pfn && pfn < state->last_end)
6069420f89dSMike Rapoport (IBM) 		return state->last_nid;
6079420f89dSMike Rapoport (IBM) 
6089420f89dSMike Rapoport (IBM) 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
6099420f89dSMike Rapoport (IBM) 	if (nid != NUMA_NO_NODE) {
6109420f89dSMike Rapoport (IBM) 		state->last_start = start_pfn;
6119420f89dSMike Rapoport (IBM) 		state->last_end = end_pfn;
6129420f89dSMike Rapoport (IBM) 		state->last_nid = nid;
6139420f89dSMike Rapoport (IBM) 	}
6149420f89dSMike Rapoport (IBM) 
6159420f89dSMike Rapoport (IBM) 	return nid;
6169420f89dSMike Rapoport (IBM) }
6179420f89dSMike Rapoport (IBM) 
6189420f89dSMike Rapoport (IBM) int __meminit early_pfn_to_nid(unsigned long pfn)
6199420f89dSMike Rapoport (IBM) {
6209420f89dSMike Rapoport (IBM) 	static DEFINE_SPINLOCK(early_pfn_lock);
6219420f89dSMike Rapoport (IBM) 	int nid;
6229420f89dSMike Rapoport (IBM) 
6239420f89dSMike Rapoport (IBM) 	spin_lock(&early_pfn_lock);
6249420f89dSMike Rapoport (IBM) 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
6259420f89dSMike Rapoport (IBM) 	if (nid < 0)
6269420f89dSMike Rapoport (IBM) 		nid = first_online_node;
6279420f89dSMike Rapoport (IBM) 	spin_unlock(&early_pfn_lock);
6289420f89dSMike Rapoport (IBM) 
6299420f89dSMike Rapoport (IBM) 	return nid;
6309420f89dSMike Rapoport (IBM) }
631534ef4e1SMike Rapoport (IBM) 
632534ef4e1SMike Rapoport (IBM) int hashdist = HASHDIST_DEFAULT;
633534ef4e1SMike Rapoport (IBM) 
634534ef4e1SMike Rapoport (IBM) static int __init set_hashdist(char *str)
635534ef4e1SMike Rapoport (IBM) {
636534ef4e1SMike Rapoport (IBM) 	if (!str)
637534ef4e1SMike Rapoport (IBM) 		return 0;
638534ef4e1SMike Rapoport (IBM) 	hashdist = simple_strtoul(str, &str, 0);
639534ef4e1SMike Rapoport (IBM) 	return 1;
640534ef4e1SMike Rapoport (IBM) }
641534ef4e1SMike Rapoport (IBM) __setup("hashdist=", set_hashdist);
642534ef4e1SMike Rapoport (IBM) 
643534ef4e1SMike Rapoport (IBM) static inline void fixup_hashdist(void)
644534ef4e1SMike Rapoport (IBM) {
645534ef4e1SMike Rapoport (IBM) 	if (num_node_state(N_MEMORY) == 1)
646534ef4e1SMike Rapoport (IBM) 		hashdist = 0;
647534ef4e1SMike Rapoport (IBM) }
648534ef4e1SMike Rapoport (IBM) #else
649534ef4e1SMike Rapoport (IBM) static inline void fixup_hashdist(void) {}
6509420f89dSMike Rapoport (IBM) #endif /* CONFIG_NUMA */
6519420f89dSMike Rapoport (IBM) 
6529420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6539420f89dSMike Rapoport (IBM) static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6549420f89dSMike Rapoport (IBM) {
6559420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = ULONG_MAX;
6569420f89dSMike Rapoport (IBM) }
6579420f89dSMike Rapoport (IBM) 
6589420f89dSMike Rapoport (IBM) /* Returns true if the struct page for the pfn is initialised */
65961167ad5SYajun Deng static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
6609420f89dSMike Rapoport (IBM) {
6619420f89dSMike Rapoport (IBM) 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
6629420f89dSMike Rapoport (IBM) 		return false;
6639420f89dSMike Rapoport (IBM) 
6649420f89dSMike Rapoport (IBM) 	return true;
6659420f89dSMike Rapoport (IBM) }
6669420f89dSMike Rapoport (IBM) 
6679420f89dSMike Rapoport (IBM) /*
6689420f89dSMike Rapoport (IBM)  * Returns true when the remaining initialisation should be deferred until
6699420f89dSMike Rapoport (IBM)  * later in the boot cycle when it can be parallelised.
6709420f89dSMike Rapoport (IBM)  */
6719420f89dSMike Rapoport (IBM) static bool __meminit
6729420f89dSMike Rapoport (IBM) defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
6739420f89dSMike Rapoport (IBM) {
6749420f89dSMike Rapoport (IBM) 	static unsigned long prev_end_pfn, nr_initialised;
6759420f89dSMike Rapoport (IBM) 
6769420f89dSMike Rapoport (IBM) 	if (early_page_ext_enabled())
6779420f89dSMike Rapoport (IBM) 		return false;
6789420f89dSMike Rapoport (IBM) 	/*
6799420f89dSMike Rapoport (IBM) 	 * prev_end_pfn static that contains the end of previous zone
6809420f89dSMike Rapoport (IBM) 	 * No need to protect because called very early in boot before smp_init.
6819420f89dSMike Rapoport (IBM) 	 */
6829420f89dSMike Rapoport (IBM) 	if (prev_end_pfn != end_pfn) {
6839420f89dSMike Rapoport (IBM) 		prev_end_pfn = end_pfn;
6849420f89dSMike Rapoport (IBM) 		nr_initialised = 0;
6859420f89dSMike Rapoport (IBM) 	}
6869420f89dSMike Rapoport (IBM) 
6879420f89dSMike Rapoport (IBM) 	/* Always populate low zones for address-constrained allocations */
6889420f89dSMike Rapoport (IBM) 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
6899420f89dSMike Rapoport (IBM) 		return false;
6909420f89dSMike Rapoport (IBM) 
6919420f89dSMike Rapoport (IBM) 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
6929420f89dSMike Rapoport (IBM) 		return true;
6939420f89dSMike Rapoport (IBM) 	/*
6949420f89dSMike Rapoport (IBM) 	 * We start only with one section of pages, more pages are added as
6959420f89dSMike Rapoport (IBM) 	 * needed until the rest of deferred pages are initialized.
6969420f89dSMike Rapoport (IBM) 	 */
6979420f89dSMike Rapoport (IBM) 	nr_initialised++;
6989420f89dSMike Rapoport (IBM) 	if ((nr_initialised > PAGES_PER_SECTION) &&
6999420f89dSMike Rapoport (IBM) 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
7009420f89dSMike Rapoport (IBM) 		NODE_DATA(nid)->first_deferred_pfn = pfn;
7019420f89dSMike Rapoport (IBM) 		return true;
7029420f89dSMike Rapoport (IBM) 	}
7039420f89dSMike Rapoport (IBM) 	return false;
7049420f89dSMike Rapoport (IBM) }
7059420f89dSMike Rapoport (IBM) 
70661167ad5SYajun Deng static void __meminit init_reserved_page(unsigned long pfn, int nid)
7079420f89dSMike Rapoport (IBM) {
7089420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat;
70961167ad5SYajun Deng 	int zid;
7109420f89dSMike Rapoport (IBM) 
71161167ad5SYajun Deng 	if (early_page_initialised(pfn, nid))
7129420f89dSMike Rapoport (IBM) 		return;
7139420f89dSMike Rapoport (IBM) 
7149420f89dSMike Rapoport (IBM) 	pgdat = NODE_DATA(nid);
7159420f89dSMike Rapoport (IBM) 
7169420f89dSMike Rapoport (IBM) 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
7179420f89dSMike Rapoport (IBM) 		struct zone *zone = &pgdat->node_zones[zid];
7189420f89dSMike Rapoport (IBM) 
7199420f89dSMike Rapoport (IBM) 		if (zone_spans_pfn(zone, pfn))
7209420f89dSMike Rapoport (IBM) 			break;
7219420f89dSMike Rapoport (IBM) 	}
7229420f89dSMike Rapoport (IBM) 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
7239420f89dSMike Rapoport (IBM) }
7249420f89dSMike Rapoport (IBM) #else
7259420f89dSMike Rapoport (IBM) static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7269420f89dSMike Rapoport (IBM) 
72761167ad5SYajun Deng static inline bool early_page_initialised(unsigned long pfn, int nid)
7289420f89dSMike Rapoport (IBM) {
7299420f89dSMike Rapoport (IBM) 	return true;
7309420f89dSMike Rapoport (IBM) }
7319420f89dSMike Rapoport (IBM) 
7329420f89dSMike Rapoport (IBM) static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
7339420f89dSMike Rapoport (IBM) {
7349420f89dSMike Rapoport (IBM) 	return false;
7359420f89dSMike Rapoport (IBM) }
7369420f89dSMike Rapoport (IBM) 
73761167ad5SYajun Deng static inline void init_reserved_page(unsigned long pfn, int nid)
7389420f89dSMike Rapoport (IBM) {
7399420f89dSMike Rapoport (IBM) }
7409420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
7419420f89dSMike Rapoport (IBM) 
7429420f89dSMike Rapoport (IBM) /*
7439420f89dSMike Rapoport (IBM)  * Initialised pages do not have PageReserved set. This function is
7449420f89dSMike Rapoport (IBM)  * called for each range allocated by the bootmem allocator and
7459420f89dSMike Rapoport (IBM)  * marks the pages PageReserved. The remaining valid pages are later
7469420f89dSMike Rapoport (IBM)  * sent to the buddy page allocator.
7479420f89dSMike Rapoport (IBM)  */
74861167ad5SYajun Deng void __meminit reserve_bootmem_region(phys_addr_t start,
74961167ad5SYajun Deng 				      phys_addr_t end, int nid)
7509420f89dSMike Rapoport (IBM) {
7519420f89dSMike Rapoport (IBM) 	unsigned long start_pfn = PFN_DOWN(start);
7529420f89dSMike Rapoport (IBM) 	unsigned long end_pfn = PFN_UP(end);
7539420f89dSMike Rapoport (IBM) 
7549420f89dSMike Rapoport (IBM) 	for (; start_pfn < end_pfn; start_pfn++) {
7559420f89dSMike Rapoport (IBM) 		if (pfn_valid(start_pfn)) {
7569420f89dSMike Rapoport (IBM) 			struct page *page = pfn_to_page(start_pfn);
7579420f89dSMike Rapoport (IBM) 
75861167ad5SYajun Deng 			init_reserved_page(start_pfn, nid);
7599420f89dSMike Rapoport (IBM) 
7609420f89dSMike Rapoport (IBM) 			/* Avoid false-positive PageTail() */
7619420f89dSMike Rapoport (IBM) 			INIT_LIST_HEAD(&page->lru);
7629420f89dSMike Rapoport (IBM) 
7639420f89dSMike Rapoport (IBM) 			/*
7649420f89dSMike Rapoport (IBM) 			 * no need for atomic set_bit because the struct
7659420f89dSMike Rapoport (IBM) 			 * page is not visible yet so nobody should
7669420f89dSMike Rapoport (IBM) 			 * access it yet.
7679420f89dSMike Rapoport (IBM) 			 */
7689420f89dSMike Rapoport (IBM) 			__SetPageReserved(page);
7699420f89dSMike Rapoport (IBM) 		}
7709420f89dSMike Rapoport (IBM) 	}
7719420f89dSMike Rapoport (IBM) }
7729420f89dSMike Rapoport (IBM) 
7739420f89dSMike Rapoport (IBM) /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
7749420f89dSMike Rapoport (IBM) static bool __meminit
7759420f89dSMike Rapoport (IBM) overlap_memmap_init(unsigned long zone, unsigned long *pfn)
7769420f89dSMike Rapoport (IBM) {
7779420f89dSMike Rapoport (IBM) 	static struct memblock_region *r;
7789420f89dSMike Rapoport (IBM) 
7799420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
7809420f89dSMike Rapoport (IBM) 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
7819420f89dSMike Rapoport (IBM) 			for_each_mem_region(r) {
7829420f89dSMike Rapoport (IBM) 				if (*pfn < memblock_region_memory_end_pfn(r))
7839420f89dSMike Rapoport (IBM) 					break;
7849420f89dSMike Rapoport (IBM) 			}
7859420f89dSMike Rapoport (IBM) 		}
7869420f89dSMike Rapoport (IBM) 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
7879420f89dSMike Rapoport (IBM) 		    memblock_is_mirror(r)) {
7889420f89dSMike Rapoport (IBM) 			*pfn = memblock_region_memory_end_pfn(r);
7899420f89dSMike Rapoport (IBM) 			return true;
7909420f89dSMike Rapoport (IBM) 		}
7919420f89dSMike Rapoport (IBM) 	}
7929420f89dSMike Rapoport (IBM) 	return false;
7939420f89dSMike Rapoport (IBM) }
7949420f89dSMike Rapoport (IBM) 
7959420f89dSMike Rapoport (IBM) /*
7969420f89dSMike Rapoport (IBM)  * Only struct pages that correspond to ranges defined by memblock.memory
7979420f89dSMike Rapoport (IBM)  * are zeroed and initialized by going through __init_single_page() during
7989420f89dSMike Rapoport (IBM)  * memmap_init_zone_range().
7999420f89dSMike Rapoport (IBM)  *
8009420f89dSMike Rapoport (IBM)  * But, there could be struct pages that correspond to holes in
8019420f89dSMike Rapoport (IBM)  * memblock.memory. This can happen because of the following reasons:
8029420f89dSMike Rapoport (IBM)  * - physical memory bank size is not necessarily the exact multiple of the
8039420f89dSMike Rapoport (IBM)  *   arbitrary section size
8049420f89dSMike Rapoport (IBM)  * - early reserved memory may not be listed in memblock.memory
805ecf5dd1fSSerge Semin  * - non-memory regions covered by the contigious flatmem mapping
8069420f89dSMike Rapoport (IBM)  * - memory layouts defined with memmap= kernel parameter may not align
8079420f89dSMike Rapoport (IBM)  *   nicely with memmap sections
8089420f89dSMike Rapoport (IBM)  *
8099420f89dSMike Rapoport (IBM)  * Explicitly initialize those struct pages so that:
8109420f89dSMike Rapoport (IBM)  * - PG_Reserved is set
8119420f89dSMike Rapoport (IBM)  * - zone and node links point to zone and node that span the page if the
8129420f89dSMike Rapoport (IBM)  *   hole is in the middle of a zone
8139420f89dSMike Rapoport (IBM)  * - zone and node links point to adjacent zone/node if the hole falls on
8149420f89dSMike Rapoport (IBM)  *   the zone boundary; the pages in such holes will be prepended to the
8159420f89dSMike Rapoport (IBM)  *   zone/node above the hole except for the trailing pages in the last
8169420f89dSMike Rapoport (IBM)  *   section that will be appended to the zone/node below.
8179420f89dSMike Rapoport (IBM)  */
8189420f89dSMike Rapoport (IBM) static void __init init_unavailable_range(unsigned long spfn,
8199420f89dSMike Rapoport (IBM) 					  unsigned long epfn,
8209420f89dSMike Rapoport (IBM) 					  int zone, int node)
8219420f89dSMike Rapoport (IBM) {
8229420f89dSMike Rapoport (IBM) 	unsigned long pfn;
8239420f89dSMike Rapoport (IBM) 	u64 pgcnt = 0;
8249420f89dSMike Rapoport (IBM) 
8259420f89dSMike Rapoport (IBM) 	for (pfn = spfn; pfn < epfn; pfn++) {
8269420f89dSMike Rapoport (IBM) 		if (!pfn_valid(pageblock_start_pfn(pfn))) {
8279420f89dSMike Rapoport (IBM) 			pfn = pageblock_end_pfn(pfn) - 1;
8289420f89dSMike Rapoport (IBM) 			continue;
8299420f89dSMike Rapoport (IBM) 		}
8309420f89dSMike Rapoport (IBM) 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
8319420f89dSMike Rapoport (IBM) 		__SetPageReserved(pfn_to_page(pfn));
8329420f89dSMike Rapoport (IBM) 		pgcnt++;
8339420f89dSMike Rapoport (IBM) 	}
8349420f89dSMike Rapoport (IBM) 
8359420f89dSMike Rapoport (IBM) 	if (pgcnt)
83601846c6cSSerge Semin 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
8379420f89dSMike Rapoport (IBM) 			node, zone_names[zone], pgcnt);
8389420f89dSMike Rapoport (IBM) }
8399420f89dSMike Rapoport (IBM) 
8409420f89dSMike Rapoport (IBM) /*
8419420f89dSMike Rapoport (IBM)  * Initially all pages are reserved - free ones are freed
8429420f89dSMike Rapoport (IBM)  * up by memblock_free_all() once the early boot process is
8439420f89dSMike Rapoport (IBM)  * done. Non-atomic initialization, single-pass.
8449420f89dSMike Rapoport (IBM)  *
8459420f89dSMike Rapoport (IBM)  * All aligned pageblocks are initialized to the specified migratetype
8469420f89dSMike Rapoport (IBM)  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
8479420f89dSMike Rapoport (IBM)  * zone stats (e.g., nr_isolate_pageblock) are touched.
8489420f89dSMike Rapoport (IBM)  */
8499420f89dSMike Rapoport (IBM) void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
8509420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, unsigned long zone_end_pfn,
8519420f89dSMike Rapoport (IBM) 		enum meminit_context context,
8529420f89dSMike Rapoport (IBM) 		struct vmem_altmap *altmap, int migratetype)
8539420f89dSMike Rapoport (IBM) {
8549420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = start_pfn + size;
8559420f89dSMike Rapoport (IBM) 	struct page *page;
8569420f89dSMike Rapoport (IBM) 
8579420f89dSMike Rapoport (IBM) 	if (highest_memmap_pfn < end_pfn - 1)
8589420f89dSMike Rapoport (IBM) 		highest_memmap_pfn = end_pfn - 1;
8599420f89dSMike Rapoport (IBM) 
8609420f89dSMike Rapoport (IBM) #ifdef CONFIG_ZONE_DEVICE
8619420f89dSMike Rapoport (IBM) 	/*
8629420f89dSMike Rapoport (IBM) 	 * Honor reservation requested by the driver for this ZONE_DEVICE
8639420f89dSMike Rapoport (IBM) 	 * memory. We limit the total number of pages to initialize to just
8649420f89dSMike Rapoport (IBM) 	 * those that might contain the memory mapping. We will defer the
8659420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE page initialization until after we have released
8669420f89dSMike Rapoport (IBM) 	 * the hotplug lock.
8679420f89dSMike Rapoport (IBM) 	 */
8689420f89dSMike Rapoport (IBM) 	if (zone == ZONE_DEVICE) {
8699420f89dSMike Rapoport (IBM) 		if (!altmap)
8709420f89dSMike Rapoport (IBM) 			return;
8719420f89dSMike Rapoport (IBM) 
8729420f89dSMike Rapoport (IBM) 		if (start_pfn == altmap->base_pfn)
8739420f89dSMike Rapoport (IBM) 			start_pfn += altmap->reserve;
8749420f89dSMike Rapoport (IBM) 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
8759420f89dSMike Rapoport (IBM) 	}
8769420f89dSMike Rapoport (IBM) #endif
8779420f89dSMike Rapoport (IBM) 
8789420f89dSMike Rapoport (IBM) 	for (pfn = start_pfn; pfn < end_pfn; ) {
8799420f89dSMike Rapoport (IBM) 		/*
8809420f89dSMike Rapoport (IBM) 		 * There can be holes in boot-time mem_map[]s handed to this
8819420f89dSMike Rapoport (IBM) 		 * function.  They do not exist on hotplugged memory.
8829420f89dSMike Rapoport (IBM) 		 */
8839420f89dSMike Rapoport (IBM) 		if (context == MEMINIT_EARLY) {
8849420f89dSMike Rapoport (IBM) 			if (overlap_memmap_init(zone, &pfn))
8859420f89dSMike Rapoport (IBM) 				continue;
8869420f89dSMike Rapoport (IBM) 			if (defer_init(nid, pfn, zone_end_pfn)) {
8879420f89dSMike Rapoport (IBM) 				deferred_struct_pages = true;
8889420f89dSMike Rapoport (IBM) 				break;
8899420f89dSMike Rapoport (IBM) 			}
8909420f89dSMike Rapoport (IBM) 		}
8919420f89dSMike Rapoport (IBM) 
8929420f89dSMike Rapoport (IBM) 		page = pfn_to_page(pfn);
8939420f89dSMike Rapoport (IBM) 		__init_single_page(page, pfn, zone, nid);
8949420f89dSMike Rapoport (IBM) 		if (context == MEMINIT_HOTPLUG)
8959420f89dSMike Rapoport (IBM) 			__SetPageReserved(page);
8969420f89dSMike Rapoport (IBM) 
8979420f89dSMike Rapoport (IBM) 		/*
8989420f89dSMike Rapoport (IBM) 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
8999420f89dSMike Rapoport (IBM) 		 * such that unmovable allocations won't be scattered all
9009420f89dSMike Rapoport (IBM) 		 * over the place during system boot.
9019420f89dSMike Rapoport (IBM) 		 */
9029420f89dSMike Rapoport (IBM) 		if (pageblock_aligned(pfn)) {
9039420f89dSMike Rapoport (IBM) 			set_pageblock_migratetype(page, migratetype);
9049420f89dSMike Rapoport (IBM) 			cond_resched();
9059420f89dSMike Rapoport (IBM) 		}
9069420f89dSMike Rapoport (IBM) 		pfn++;
9079420f89dSMike Rapoport (IBM) 	}
9089420f89dSMike Rapoport (IBM) }
9099420f89dSMike Rapoport (IBM) 
9109420f89dSMike Rapoport (IBM) static void __init memmap_init_zone_range(struct zone *zone,
9119420f89dSMike Rapoport (IBM) 					  unsigned long start_pfn,
9129420f89dSMike Rapoport (IBM) 					  unsigned long end_pfn,
9139420f89dSMike Rapoport (IBM) 					  unsigned long *hole_pfn)
9149420f89dSMike Rapoport (IBM) {
9159420f89dSMike Rapoport (IBM) 	unsigned long zone_start_pfn = zone->zone_start_pfn;
9169420f89dSMike Rapoport (IBM) 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
9179420f89dSMike Rapoport (IBM) 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
9189420f89dSMike Rapoport (IBM) 
9199420f89dSMike Rapoport (IBM) 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
9209420f89dSMike Rapoport (IBM) 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
9219420f89dSMike Rapoport (IBM) 
9229420f89dSMike Rapoport (IBM) 	if (start_pfn >= end_pfn)
9239420f89dSMike Rapoport (IBM) 		return;
9249420f89dSMike Rapoport (IBM) 
9259420f89dSMike Rapoport (IBM) 	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
9269420f89dSMike Rapoport (IBM) 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
9279420f89dSMike Rapoport (IBM) 
9289420f89dSMike Rapoport (IBM) 	if (*hole_pfn < start_pfn)
9299420f89dSMike Rapoport (IBM) 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
9309420f89dSMike Rapoport (IBM) 
9319420f89dSMike Rapoport (IBM) 	*hole_pfn = end_pfn;
9329420f89dSMike Rapoport (IBM) }
9339420f89dSMike Rapoport (IBM) 
9349420f89dSMike Rapoport (IBM) static void __init memmap_init(void)
9359420f89dSMike Rapoport (IBM) {
9369420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
9379420f89dSMike Rapoport (IBM) 	unsigned long hole_pfn = 0;
9389420f89dSMike Rapoport (IBM) 	int i, j, zone_id = 0, nid;
9399420f89dSMike Rapoport (IBM) 
9409420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
9419420f89dSMike Rapoport (IBM) 		struct pglist_data *node = NODE_DATA(nid);
9429420f89dSMike Rapoport (IBM) 
9439420f89dSMike Rapoport (IBM) 		for (j = 0; j < MAX_NR_ZONES; j++) {
9449420f89dSMike Rapoport (IBM) 			struct zone *zone = node->node_zones + j;
9459420f89dSMike Rapoport (IBM) 
9469420f89dSMike Rapoport (IBM) 			if (!populated_zone(zone))
9479420f89dSMike Rapoport (IBM) 				continue;
9489420f89dSMike Rapoport (IBM) 
9499420f89dSMike Rapoport (IBM) 			memmap_init_zone_range(zone, start_pfn, end_pfn,
9509420f89dSMike Rapoport (IBM) 					       &hole_pfn);
9519420f89dSMike Rapoport (IBM) 			zone_id = j;
9529420f89dSMike Rapoport (IBM) 		}
9539420f89dSMike Rapoport (IBM) 	}
9549420f89dSMike Rapoport (IBM) 
9559420f89dSMike Rapoport (IBM) #ifdef CONFIG_SPARSEMEM
9569420f89dSMike Rapoport (IBM) 	/*
9579420f89dSMike Rapoport (IBM) 	 * Initialize the memory map for hole in the range [memory_end,
9589420f89dSMike Rapoport (IBM) 	 * section_end].
9599420f89dSMike Rapoport (IBM) 	 * Append the pages in this hole to the highest zone in the last
9609420f89dSMike Rapoport (IBM) 	 * node.
9619420f89dSMike Rapoport (IBM) 	 * The call to init_unavailable_range() is outside the ifdef to
9629420f89dSMike Rapoport (IBM) 	 * silence the compiler warining about zone_id set but not used;
9639420f89dSMike Rapoport (IBM) 	 * for FLATMEM it is a nop anyway
9649420f89dSMike Rapoport (IBM) 	 */
9659420f89dSMike Rapoport (IBM) 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
9669420f89dSMike Rapoport (IBM) 	if (hole_pfn < end_pfn)
9679420f89dSMike Rapoport (IBM) #endif
9689420f89dSMike Rapoport (IBM) 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
9699420f89dSMike Rapoport (IBM) }
9709420f89dSMike Rapoport (IBM) 
9719420f89dSMike Rapoport (IBM) #ifdef CONFIG_ZONE_DEVICE
9729420f89dSMike Rapoport (IBM) static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
9739420f89dSMike Rapoport (IBM) 					  unsigned long zone_idx, int nid,
9749420f89dSMike Rapoport (IBM) 					  struct dev_pagemap *pgmap)
9759420f89dSMike Rapoport (IBM) {
9769420f89dSMike Rapoport (IBM) 
9779420f89dSMike Rapoport (IBM) 	__init_single_page(page, pfn, zone_idx, nid);
9789420f89dSMike Rapoport (IBM) 
9799420f89dSMike Rapoport (IBM) 	/*
9809420f89dSMike Rapoport (IBM) 	 * Mark page reserved as it will need to wait for onlining
9819420f89dSMike Rapoport (IBM) 	 * phase for it to be fully associated with a zone.
9829420f89dSMike Rapoport (IBM) 	 *
9839420f89dSMike Rapoport (IBM) 	 * We can use the non-atomic __set_bit operation for setting
9849420f89dSMike Rapoport (IBM) 	 * the flag as we are still initializing the pages.
9859420f89dSMike Rapoport (IBM) 	 */
9869420f89dSMike Rapoport (IBM) 	__SetPageReserved(page);
9879420f89dSMike Rapoport (IBM) 
9889420f89dSMike Rapoport (IBM) 	/*
9899420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
9909420f89dSMike Rapoport (IBM) 	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
9919420f89dSMike Rapoport (IBM) 	 * ever freed or placed on a driver-private list.
9929420f89dSMike Rapoport (IBM) 	 */
9939420f89dSMike Rapoport (IBM) 	page->pgmap = pgmap;
9949420f89dSMike Rapoport (IBM) 	page->zone_device_data = NULL;
9959420f89dSMike Rapoport (IBM) 
9969420f89dSMike Rapoport (IBM) 	/*
9979420f89dSMike Rapoport (IBM) 	 * Mark the block movable so that blocks are reserved for
9989420f89dSMike Rapoport (IBM) 	 * movable at startup. This will force kernel allocations
9999420f89dSMike Rapoport (IBM) 	 * to reserve their blocks rather than leaking throughout
10009420f89dSMike Rapoport (IBM) 	 * the address space during boot when many long-lived
10019420f89dSMike Rapoport (IBM) 	 * kernel allocations are made.
10029420f89dSMike Rapoport (IBM) 	 *
10039420f89dSMike Rapoport (IBM) 	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
10049420f89dSMike Rapoport (IBM) 	 * because this is done early in section_activate()
10059420f89dSMike Rapoport (IBM) 	 */
10069420f89dSMike Rapoport (IBM) 	if (pageblock_aligned(pfn)) {
10079420f89dSMike Rapoport (IBM) 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
10089420f89dSMike Rapoport (IBM) 		cond_resched();
10099420f89dSMike Rapoport (IBM) 	}
10109420f89dSMike Rapoport (IBM) 
10119420f89dSMike Rapoport (IBM) 	/*
10129420f89dSMike Rapoport (IBM) 	 * ZONE_DEVICE pages are released directly to the driver page allocator
10139420f89dSMike Rapoport (IBM) 	 * which will set the page count to 1 when allocating the page.
10149420f89dSMike Rapoport (IBM) 	 */
10159420f89dSMike Rapoport (IBM) 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
10169420f89dSMike Rapoport (IBM) 	    pgmap->type == MEMORY_DEVICE_COHERENT)
10179420f89dSMike Rapoport (IBM) 		set_page_count(page, 0);
10189420f89dSMike Rapoport (IBM) }
10199420f89dSMike Rapoport (IBM) 
10209420f89dSMike Rapoport (IBM) /*
10219420f89dSMike Rapoport (IBM)  * With compound page geometry and when struct pages are stored in ram most
10229420f89dSMike Rapoport (IBM)  * tail pages are reused. Consequently, the amount of unique struct pages to
10239420f89dSMike Rapoport (IBM)  * initialize is a lot smaller that the total amount of struct pages being
10249420f89dSMike Rapoport (IBM)  * mapped. This is a paired / mild layering violation with explicit knowledge
10259420f89dSMike Rapoport (IBM)  * of how the sparse_vmemmap internals handle compound pages in the lack
10269420f89dSMike Rapoport (IBM)  * of an altmap. See vmemmap_populate_compound_pages().
10279420f89dSMike Rapoport (IBM)  */
10289420f89dSMike Rapoport (IBM) static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
102987a7ae75SAneesh Kumar K.V 					      struct dev_pagemap *pgmap)
10309420f89dSMike Rapoport (IBM) {
103187a7ae75SAneesh Kumar K.V 	if (!vmemmap_can_optimize(altmap, pgmap))
103287a7ae75SAneesh Kumar K.V 		return pgmap_vmemmap_nr(pgmap);
103387a7ae75SAneesh Kumar K.V 
1034c1a6c536SAneesh Kumar K.V 	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
10359420f89dSMike Rapoport (IBM) }
10369420f89dSMike Rapoport (IBM) 
10379420f89dSMike Rapoport (IBM) static void __ref memmap_init_compound(struct page *head,
10389420f89dSMike Rapoport (IBM) 				       unsigned long head_pfn,
10399420f89dSMike Rapoport (IBM) 				       unsigned long zone_idx, int nid,
10409420f89dSMike Rapoport (IBM) 				       struct dev_pagemap *pgmap,
10419420f89dSMike Rapoport (IBM) 				       unsigned long nr_pages)
10429420f89dSMike Rapoport (IBM) {
10439420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = head_pfn + nr_pages;
10449420f89dSMike Rapoport (IBM) 	unsigned int order = pgmap->vmemmap_shift;
10459420f89dSMike Rapoport (IBM) 
10469420f89dSMike Rapoport (IBM) 	__SetPageHead(head);
10479420f89dSMike Rapoport (IBM) 	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
10489420f89dSMike Rapoport (IBM) 		struct page *page = pfn_to_page(pfn);
10499420f89dSMike Rapoport (IBM) 
10509420f89dSMike Rapoport (IBM) 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
10519420f89dSMike Rapoport (IBM) 		prep_compound_tail(head, pfn - head_pfn);
10529420f89dSMike Rapoport (IBM) 		set_page_count(page, 0);
10539420f89dSMike Rapoport (IBM) 
10549420f89dSMike Rapoport (IBM) 		/*
10559420f89dSMike Rapoport (IBM) 		 * The first tail page stores important compound page info.
10569420f89dSMike Rapoport (IBM) 		 * Call prep_compound_head() after the first tail page has
10579420f89dSMike Rapoport (IBM) 		 * been initialized, to not have the data overwritten.
10589420f89dSMike Rapoport (IBM) 		 */
10599420f89dSMike Rapoport (IBM) 		if (pfn == head_pfn + 1)
10609420f89dSMike Rapoport (IBM) 			prep_compound_head(head, order);
10619420f89dSMike Rapoport (IBM) 	}
10629420f89dSMike Rapoport (IBM) }
10639420f89dSMike Rapoport (IBM) 
10649420f89dSMike Rapoport (IBM) void __ref memmap_init_zone_device(struct zone *zone,
10659420f89dSMike Rapoport (IBM) 				   unsigned long start_pfn,
10669420f89dSMike Rapoport (IBM) 				   unsigned long nr_pages,
10679420f89dSMike Rapoport (IBM) 				   struct dev_pagemap *pgmap)
10689420f89dSMike Rapoport (IBM) {
10699420f89dSMike Rapoport (IBM) 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
10709420f89dSMike Rapoport (IBM) 	struct pglist_data *pgdat = zone->zone_pgdat;
10719420f89dSMike Rapoport (IBM) 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
10729420f89dSMike Rapoport (IBM) 	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
10739420f89dSMike Rapoport (IBM) 	unsigned long zone_idx = zone_idx(zone);
10749420f89dSMike Rapoport (IBM) 	unsigned long start = jiffies;
10759420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
10769420f89dSMike Rapoport (IBM) 
10779420f89dSMike Rapoport (IBM) 	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
10789420f89dSMike Rapoport (IBM) 		return;
10799420f89dSMike Rapoport (IBM) 
10809420f89dSMike Rapoport (IBM) 	/*
10819420f89dSMike Rapoport (IBM) 	 * The call to memmap_init should have already taken care
10829420f89dSMike Rapoport (IBM) 	 * of the pages reserved for the memmap, so we can just jump to
10839420f89dSMike Rapoport (IBM) 	 * the end of that region and start processing the device pages.
10849420f89dSMike Rapoport (IBM) 	 */
10859420f89dSMike Rapoport (IBM) 	if (altmap) {
10869420f89dSMike Rapoport (IBM) 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
10879420f89dSMike Rapoport (IBM) 		nr_pages = end_pfn - start_pfn;
10889420f89dSMike Rapoport (IBM) 	}
10899420f89dSMike Rapoport (IBM) 
10909420f89dSMike Rapoport (IBM) 	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
10919420f89dSMike Rapoport (IBM) 		struct page *page = pfn_to_page(pfn);
10929420f89dSMike Rapoport (IBM) 
10939420f89dSMike Rapoport (IBM) 		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
10949420f89dSMike Rapoport (IBM) 
10959420f89dSMike Rapoport (IBM) 		if (pfns_per_compound == 1)
10969420f89dSMike Rapoport (IBM) 			continue;
10979420f89dSMike Rapoport (IBM) 
10989420f89dSMike Rapoport (IBM) 		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
109987a7ae75SAneesh Kumar K.V 				     compound_nr_pages(altmap, pgmap));
11009420f89dSMike Rapoport (IBM) 	}
11019420f89dSMike Rapoport (IBM) 
1102dd31bad2STomas Krcka 	pr_debug("%s initialised %lu pages in %ums\n", __func__,
11039420f89dSMike Rapoport (IBM) 		nr_pages, jiffies_to_msecs(jiffies - start));
11049420f89dSMike Rapoport (IBM) }
11059420f89dSMike Rapoport (IBM) #endif
11069420f89dSMike Rapoport (IBM) 
11079420f89dSMike Rapoport (IBM) /*
11089420f89dSMike Rapoport (IBM)  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
11099420f89dSMike Rapoport (IBM)  * because it is sized independent of architecture. Unlike the other zones,
11109420f89dSMike Rapoport (IBM)  * the starting point for ZONE_MOVABLE is not fixed. It may be different
11119420f89dSMike Rapoport (IBM)  * in each node depending on the size of each node and how evenly kernelcore
11129420f89dSMike Rapoport (IBM)  * is distributed. This helper function adjusts the zone ranges
11139420f89dSMike Rapoport (IBM)  * provided by the architecture for a given node by using the end of the
11149420f89dSMike Rapoport (IBM)  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
11159420f89dSMike Rapoport (IBM)  * zones within a node are in order of monotonic increases memory addresses
11169420f89dSMike Rapoport (IBM)  */
11179420f89dSMike Rapoport (IBM) static void __init adjust_zone_range_for_zone_movable(int nid,
11189420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
11199420f89dSMike Rapoport (IBM) 					unsigned long node_end_pfn,
11209420f89dSMike Rapoport (IBM) 					unsigned long *zone_start_pfn,
11219420f89dSMike Rapoport (IBM) 					unsigned long *zone_end_pfn)
11229420f89dSMike Rapoport (IBM) {
11239420f89dSMike Rapoport (IBM) 	/* Only adjust if ZONE_MOVABLE is on this node */
11249420f89dSMike Rapoport (IBM) 	if (zone_movable_pfn[nid]) {
11259420f89dSMike Rapoport (IBM) 		/* Size ZONE_MOVABLE */
11269420f89dSMike Rapoport (IBM) 		if (zone_type == ZONE_MOVABLE) {
11279420f89dSMike Rapoport (IBM) 			*zone_start_pfn = zone_movable_pfn[nid];
11289420f89dSMike Rapoport (IBM) 			*zone_end_pfn = min(node_end_pfn,
11299420f89dSMike Rapoport (IBM) 				arch_zone_highest_possible_pfn[movable_zone]);
11309420f89dSMike Rapoport (IBM) 
11319420f89dSMike Rapoport (IBM) 		/* Adjust for ZONE_MOVABLE starting within this range */
11329420f89dSMike Rapoport (IBM) 		} else if (!mirrored_kernelcore &&
11339420f89dSMike Rapoport (IBM) 			*zone_start_pfn < zone_movable_pfn[nid] &&
11349420f89dSMike Rapoport (IBM) 			*zone_end_pfn > zone_movable_pfn[nid]) {
11359420f89dSMike Rapoport (IBM) 			*zone_end_pfn = zone_movable_pfn[nid];
11369420f89dSMike Rapoport (IBM) 
11379420f89dSMike Rapoport (IBM) 		/* Check if this whole range is within ZONE_MOVABLE */
11389420f89dSMike Rapoport (IBM) 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
11399420f89dSMike Rapoport (IBM) 			*zone_start_pfn = *zone_end_pfn;
11409420f89dSMike Rapoport (IBM) 	}
11419420f89dSMike Rapoport (IBM) }
11429420f89dSMike Rapoport (IBM) 
11439420f89dSMike Rapoport (IBM) /*
11449420f89dSMike Rapoport (IBM)  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
11459420f89dSMike Rapoport (IBM)  * then all holes in the requested range will be accounted for.
11469420f89dSMike Rapoport (IBM)  */
11479420f89dSMike Rapoport (IBM) unsigned long __init __absent_pages_in_range(int nid,
11489420f89dSMike Rapoport (IBM) 				unsigned long range_start_pfn,
11499420f89dSMike Rapoport (IBM) 				unsigned long range_end_pfn)
11509420f89dSMike Rapoport (IBM) {
11519420f89dSMike Rapoport (IBM) 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
11529420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
11539420f89dSMike Rapoport (IBM) 	int i;
11549420f89dSMike Rapoport (IBM) 
11559420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
11569420f89dSMike Rapoport (IBM) 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
11579420f89dSMike Rapoport (IBM) 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
11589420f89dSMike Rapoport (IBM) 		nr_absent -= end_pfn - start_pfn;
11599420f89dSMike Rapoport (IBM) 	}
11609420f89dSMike Rapoport (IBM) 	return nr_absent;
11619420f89dSMike Rapoport (IBM) }
11629420f89dSMike Rapoport (IBM) 
11639420f89dSMike Rapoport (IBM) /**
11649420f89dSMike Rapoport (IBM)  * absent_pages_in_range - Return number of page frames in holes within a range
11659420f89dSMike Rapoport (IBM)  * @start_pfn: The start PFN to start searching for holes
11669420f89dSMike Rapoport (IBM)  * @end_pfn: The end PFN to stop searching for holes
11679420f89dSMike Rapoport (IBM)  *
11689420f89dSMike Rapoport (IBM)  * Return: the number of pages frames in memory holes within a range.
11699420f89dSMike Rapoport (IBM)  */
11709420f89dSMike Rapoport (IBM) unsigned long __init absent_pages_in_range(unsigned long start_pfn,
11719420f89dSMike Rapoport (IBM) 							unsigned long end_pfn)
11729420f89dSMike Rapoport (IBM) {
11739420f89dSMike Rapoport (IBM) 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
11749420f89dSMike Rapoport (IBM) }
11759420f89dSMike Rapoport (IBM) 
11769420f89dSMike Rapoport (IBM) /* Return the number of page frames in holes in a zone on a node */
11779420f89dSMike Rapoport (IBM) static unsigned long __init zone_absent_pages_in_node(int nid,
11789420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
11791c2d252fSHaifeng Xu 					unsigned long zone_start_pfn,
11801c2d252fSHaifeng Xu 					unsigned long zone_end_pfn)
11819420f89dSMike Rapoport (IBM) {
11829420f89dSMike Rapoport (IBM) 	unsigned long nr_absent;
11839420f89dSMike Rapoport (IBM) 
11841c2d252fSHaifeng Xu 	/* zone is empty, we don't have any absent pages */
11851c2d252fSHaifeng Xu 	if (zone_start_pfn == zone_end_pfn)
11869420f89dSMike Rapoport (IBM) 		return 0;
11879420f89dSMike Rapoport (IBM) 
11889420f89dSMike Rapoport (IBM) 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
11899420f89dSMike Rapoport (IBM) 
11909420f89dSMike Rapoport (IBM) 	/*
11919420f89dSMike Rapoport (IBM) 	 * ZONE_MOVABLE handling.
11929420f89dSMike Rapoport (IBM) 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
11939420f89dSMike Rapoport (IBM) 	 * and vice versa.
11949420f89dSMike Rapoport (IBM) 	 */
11959420f89dSMike Rapoport (IBM) 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
11969420f89dSMike Rapoport (IBM) 		unsigned long start_pfn, end_pfn;
11979420f89dSMike Rapoport (IBM) 		struct memblock_region *r;
11989420f89dSMike Rapoport (IBM) 
11999420f89dSMike Rapoport (IBM) 		for_each_mem_region(r) {
12009420f89dSMike Rapoport (IBM) 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
12019420f89dSMike Rapoport (IBM) 					  zone_start_pfn, zone_end_pfn);
12029420f89dSMike Rapoport (IBM) 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
12039420f89dSMike Rapoport (IBM) 					zone_start_pfn, zone_end_pfn);
12049420f89dSMike Rapoport (IBM) 
12059420f89dSMike Rapoport (IBM) 			if (zone_type == ZONE_MOVABLE &&
12069420f89dSMike Rapoport (IBM) 			    memblock_is_mirror(r))
12079420f89dSMike Rapoport (IBM) 				nr_absent += end_pfn - start_pfn;
12089420f89dSMike Rapoport (IBM) 
12099420f89dSMike Rapoport (IBM) 			if (zone_type == ZONE_NORMAL &&
12109420f89dSMike Rapoport (IBM) 			    !memblock_is_mirror(r))
12119420f89dSMike Rapoport (IBM) 				nr_absent += end_pfn - start_pfn;
12129420f89dSMike Rapoport (IBM) 		}
12139420f89dSMike Rapoport (IBM) 	}
12149420f89dSMike Rapoport (IBM) 
12159420f89dSMike Rapoport (IBM) 	return nr_absent;
12169420f89dSMike Rapoport (IBM) }
12179420f89dSMike Rapoport (IBM) 
12189420f89dSMike Rapoport (IBM) /*
12199420f89dSMike Rapoport (IBM)  * Return the number of pages a zone spans in a node, including holes
12209420f89dSMike Rapoport (IBM)  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
12219420f89dSMike Rapoport (IBM)  */
12229420f89dSMike Rapoport (IBM) static unsigned long __init zone_spanned_pages_in_node(int nid,
12239420f89dSMike Rapoport (IBM) 					unsigned long zone_type,
12249420f89dSMike Rapoport (IBM) 					unsigned long node_start_pfn,
12259420f89dSMike Rapoport (IBM) 					unsigned long node_end_pfn,
12269420f89dSMike Rapoport (IBM) 					unsigned long *zone_start_pfn,
12279420f89dSMike Rapoport (IBM) 					unsigned long *zone_end_pfn)
12289420f89dSMike Rapoport (IBM) {
12299420f89dSMike Rapoport (IBM) 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
12309420f89dSMike Rapoport (IBM) 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
12319420f89dSMike Rapoport (IBM) 
12329420f89dSMike Rapoport (IBM) 	/* Get the start and end of the zone */
12339420f89dSMike Rapoport (IBM) 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
12349420f89dSMike Rapoport (IBM) 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
12350792e47dSHaifeng Xu 	adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
12369420f89dSMike Rapoport (IBM) 					   zone_start_pfn, zone_end_pfn);
12379420f89dSMike Rapoport (IBM) 
12389420f89dSMike Rapoport (IBM) 	/* Check that this node has pages within the zone's required range */
12399420f89dSMike Rapoport (IBM) 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
12409420f89dSMike Rapoport (IBM) 		return 0;
12419420f89dSMike Rapoport (IBM) 
12429420f89dSMike Rapoport (IBM) 	/* Move the zone boundaries inside the node if necessary */
12439420f89dSMike Rapoport (IBM) 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
12449420f89dSMike Rapoport (IBM) 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
12459420f89dSMike Rapoport (IBM) 
12469420f89dSMike Rapoport (IBM) 	/* Return the spanned pages */
12479420f89dSMike Rapoport (IBM) 	return *zone_end_pfn - *zone_start_pfn;
12489420f89dSMike Rapoport (IBM) }
12499420f89dSMike Rapoport (IBM) 
1250ba1b67c7SHaifeng Xu static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1251ba1b67c7SHaifeng Xu {
1252ba1b67c7SHaifeng Xu 	struct zone *z;
1253ba1b67c7SHaifeng Xu 
1254ba1b67c7SHaifeng Xu 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1255ba1b67c7SHaifeng Xu 		z->zone_start_pfn = 0;
1256ba1b67c7SHaifeng Xu 		z->spanned_pages = 0;
1257ba1b67c7SHaifeng Xu 		z->present_pages = 0;
1258ba1b67c7SHaifeng Xu #if defined(CONFIG_MEMORY_HOTPLUG)
1259ba1b67c7SHaifeng Xu 		z->present_early_pages = 0;
1260ba1b67c7SHaifeng Xu #endif
1261ba1b67c7SHaifeng Xu 	}
1262ba1b67c7SHaifeng Xu 
1263ba1b67c7SHaifeng Xu 	pgdat->node_spanned_pages = 0;
1264ba1b67c7SHaifeng Xu 	pgdat->node_present_pages = 0;
1265ba1b67c7SHaifeng Xu 	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1266ba1b67c7SHaifeng Xu }
1267ba1b67c7SHaifeng Xu 
12689420f89dSMike Rapoport (IBM) static void __init calculate_node_totalpages(struct pglist_data *pgdat,
12699420f89dSMike Rapoport (IBM) 						unsigned long node_start_pfn,
12709420f89dSMike Rapoport (IBM) 						unsigned long node_end_pfn)
12719420f89dSMike Rapoport (IBM) {
12729420f89dSMike Rapoport (IBM) 	unsigned long realtotalpages = 0, totalpages = 0;
12739420f89dSMike Rapoport (IBM) 	enum zone_type i;
12749420f89dSMike Rapoport (IBM) 
12759420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
12769420f89dSMike Rapoport (IBM) 		struct zone *zone = pgdat->node_zones + i;
12779420f89dSMike Rapoport (IBM) 		unsigned long zone_start_pfn, zone_end_pfn;
12789420f89dSMike Rapoport (IBM) 		unsigned long spanned, absent;
12791c2d252fSHaifeng Xu 		unsigned long real_size;
12809420f89dSMike Rapoport (IBM) 
12819420f89dSMike Rapoport (IBM) 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
12829420f89dSMike Rapoport (IBM) 						     node_start_pfn,
12839420f89dSMike Rapoport (IBM) 						     node_end_pfn,
12849420f89dSMike Rapoport (IBM) 						     &zone_start_pfn,
12859420f89dSMike Rapoport (IBM) 						     &zone_end_pfn);
12869420f89dSMike Rapoport (IBM) 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
12871c2d252fSHaifeng Xu 						   zone_start_pfn,
12881c2d252fSHaifeng Xu 						   zone_end_pfn);
12899420f89dSMike Rapoport (IBM) 
12901c2d252fSHaifeng Xu 		real_size = spanned - absent;
12919420f89dSMike Rapoport (IBM) 
12921c2d252fSHaifeng Xu 		if (spanned)
12939420f89dSMike Rapoport (IBM) 			zone->zone_start_pfn = zone_start_pfn;
12949420f89dSMike Rapoport (IBM) 		else
12959420f89dSMike Rapoport (IBM) 			zone->zone_start_pfn = 0;
12961c2d252fSHaifeng Xu 		zone->spanned_pages = spanned;
12979420f89dSMike Rapoport (IBM) 		zone->present_pages = real_size;
12989420f89dSMike Rapoport (IBM) #if defined(CONFIG_MEMORY_HOTPLUG)
12999420f89dSMike Rapoport (IBM) 		zone->present_early_pages = real_size;
13009420f89dSMike Rapoport (IBM) #endif
13019420f89dSMike Rapoport (IBM) 
13021c2d252fSHaifeng Xu 		totalpages += spanned;
13039420f89dSMike Rapoport (IBM) 		realtotalpages += real_size;
13049420f89dSMike Rapoport (IBM) 	}
13059420f89dSMike Rapoport (IBM) 
13069420f89dSMike Rapoport (IBM) 	pgdat->node_spanned_pages = totalpages;
13079420f89dSMike Rapoport (IBM) 	pgdat->node_present_pages = realtotalpages;
13089420f89dSMike Rapoport (IBM) 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
13099420f89dSMike Rapoport (IBM) }
13109420f89dSMike Rapoport (IBM) 
13119420f89dSMike Rapoport (IBM) static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
13129420f89dSMike Rapoport (IBM) 						unsigned long present_pages)
13139420f89dSMike Rapoport (IBM) {
13149420f89dSMike Rapoport (IBM) 	unsigned long pages = spanned_pages;
13159420f89dSMike Rapoport (IBM) 
13169420f89dSMike Rapoport (IBM) 	/*
13179420f89dSMike Rapoport (IBM) 	 * Provide a more accurate estimation if there are holes within
13189420f89dSMike Rapoport (IBM) 	 * the zone and SPARSEMEM is in use. If there are holes within the
13199420f89dSMike Rapoport (IBM) 	 * zone, each populated memory region may cost us one or two extra
13209420f89dSMike Rapoport (IBM) 	 * memmap pages due to alignment because memmap pages for each
13219420f89dSMike Rapoport (IBM) 	 * populated regions may not be naturally aligned on page boundary.
13229420f89dSMike Rapoport (IBM) 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
13239420f89dSMike Rapoport (IBM) 	 */
13249420f89dSMike Rapoport (IBM) 	if (spanned_pages > present_pages + (present_pages >> 4) &&
13259420f89dSMike Rapoport (IBM) 	    IS_ENABLED(CONFIG_SPARSEMEM))
13269420f89dSMike Rapoport (IBM) 		pages = present_pages;
13279420f89dSMike Rapoport (IBM) 
13289420f89dSMike Rapoport (IBM) 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
13299420f89dSMike Rapoport (IBM) }
13309420f89dSMike Rapoport (IBM) 
13319420f89dSMike Rapoport (IBM) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
13329420f89dSMike Rapoport (IBM) static void pgdat_init_split_queue(struct pglist_data *pgdat)
13339420f89dSMike Rapoport (IBM) {
13349420f89dSMike Rapoport (IBM) 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
13359420f89dSMike Rapoport (IBM) 
13369420f89dSMike Rapoport (IBM) 	spin_lock_init(&ds_queue->split_queue_lock);
13379420f89dSMike Rapoport (IBM) 	INIT_LIST_HEAD(&ds_queue->split_queue);
13389420f89dSMike Rapoport (IBM) 	ds_queue->split_queue_len = 0;
13399420f89dSMike Rapoport (IBM) }
13409420f89dSMike Rapoport (IBM) #else
13419420f89dSMike Rapoport (IBM) static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
13429420f89dSMike Rapoport (IBM) #endif
13439420f89dSMike Rapoport (IBM) 
13449420f89dSMike Rapoport (IBM) #ifdef CONFIG_COMPACTION
13459420f89dSMike Rapoport (IBM) static void pgdat_init_kcompactd(struct pglist_data *pgdat)
13469420f89dSMike Rapoport (IBM) {
13479420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->kcompactd_wait);
13489420f89dSMike Rapoport (IBM) }
13499420f89dSMike Rapoport (IBM) #else
13509420f89dSMike Rapoport (IBM) static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
13519420f89dSMike Rapoport (IBM) #endif
13529420f89dSMike Rapoport (IBM) 
13539420f89dSMike Rapoport (IBM) static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
13549420f89dSMike Rapoport (IBM) {
13559420f89dSMike Rapoport (IBM) 	int i;
13569420f89dSMike Rapoport (IBM) 
13579420f89dSMike Rapoport (IBM) 	pgdat_resize_init(pgdat);
13589420f89dSMike Rapoport (IBM) 	pgdat_kswapd_lock_init(pgdat);
13599420f89dSMike Rapoport (IBM) 
13609420f89dSMike Rapoport (IBM) 	pgdat_init_split_queue(pgdat);
13619420f89dSMike Rapoport (IBM) 	pgdat_init_kcompactd(pgdat);
13629420f89dSMike Rapoport (IBM) 
13639420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->kswapd_wait);
13649420f89dSMike Rapoport (IBM) 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
13659420f89dSMike Rapoport (IBM) 
13669420f89dSMike Rapoport (IBM) 	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
13679420f89dSMike Rapoport (IBM) 		init_waitqueue_head(&pgdat->reclaim_wait[i]);
13689420f89dSMike Rapoport (IBM) 
13699420f89dSMike Rapoport (IBM) 	pgdat_page_ext_init(pgdat);
13709420f89dSMike Rapoport (IBM) 	lruvec_init(&pgdat->__lruvec);
13719420f89dSMike Rapoport (IBM) }
13729420f89dSMike Rapoport (IBM) 
13739420f89dSMike Rapoport (IBM) static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
13749420f89dSMike Rapoport (IBM) 							unsigned long remaining_pages)
13759420f89dSMike Rapoport (IBM) {
13769420f89dSMike Rapoport (IBM) 	atomic_long_set(&zone->managed_pages, remaining_pages);
13779420f89dSMike Rapoport (IBM) 	zone_set_nid(zone, nid);
13789420f89dSMike Rapoport (IBM) 	zone->name = zone_names[idx];
13799420f89dSMike Rapoport (IBM) 	zone->zone_pgdat = NODE_DATA(nid);
13809420f89dSMike Rapoport (IBM) 	spin_lock_init(&zone->lock);
13819420f89dSMike Rapoport (IBM) 	zone_seqlock_init(zone);
13829420f89dSMike Rapoport (IBM) 	zone_pcp_init(zone);
13839420f89dSMike Rapoport (IBM) }
13849420f89dSMike Rapoport (IBM) 
13859420f89dSMike Rapoport (IBM) static void __meminit zone_init_free_lists(struct zone *zone)
13869420f89dSMike Rapoport (IBM) {
13879420f89dSMike Rapoport (IBM) 	unsigned int order, t;
13889420f89dSMike Rapoport (IBM) 	for_each_migratetype_order(order, t) {
13899420f89dSMike Rapoport (IBM) 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
13909420f89dSMike Rapoport (IBM) 		zone->free_area[order].nr_free = 0;
13919420f89dSMike Rapoport (IBM) 	}
1392dcdfdd40SKirill A. Shutemov 
1393dcdfdd40SKirill A. Shutemov #ifdef CONFIG_UNACCEPTED_MEMORY
1394dcdfdd40SKirill A. Shutemov 	INIT_LIST_HEAD(&zone->unaccepted_pages);
1395dcdfdd40SKirill A. Shutemov #endif
13969420f89dSMike Rapoport (IBM) }
13979420f89dSMike Rapoport (IBM) 
13989420f89dSMike Rapoport (IBM) void __meminit init_currently_empty_zone(struct zone *zone,
13999420f89dSMike Rapoport (IBM) 					unsigned long zone_start_pfn,
14009420f89dSMike Rapoport (IBM) 					unsigned long size)
14019420f89dSMike Rapoport (IBM) {
14029420f89dSMike Rapoport (IBM) 	struct pglist_data *pgdat = zone->zone_pgdat;
14039420f89dSMike Rapoport (IBM) 	int zone_idx = zone_idx(zone) + 1;
14049420f89dSMike Rapoport (IBM) 
14059420f89dSMike Rapoport (IBM) 	if (zone_idx > pgdat->nr_zones)
14069420f89dSMike Rapoport (IBM) 		pgdat->nr_zones = zone_idx;
14079420f89dSMike Rapoport (IBM) 
14089420f89dSMike Rapoport (IBM) 	zone->zone_start_pfn = zone_start_pfn;
14099420f89dSMike Rapoport (IBM) 
14109420f89dSMike Rapoport (IBM) 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
14119420f89dSMike Rapoport (IBM) 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
14129420f89dSMike Rapoport (IBM) 			pgdat->node_id,
14139420f89dSMike Rapoport (IBM) 			(unsigned long)zone_idx(zone),
14149420f89dSMike Rapoport (IBM) 			zone_start_pfn, (zone_start_pfn + size));
14159420f89dSMike Rapoport (IBM) 
14169420f89dSMike Rapoport (IBM) 	zone_init_free_lists(zone);
14179420f89dSMike Rapoport (IBM) 	zone->initialized = 1;
14189420f89dSMike Rapoport (IBM) }
14199420f89dSMike Rapoport (IBM) 
14209420f89dSMike Rapoport (IBM) #ifndef CONFIG_SPARSEMEM
14219420f89dSMike Rapoport (IBM) /*
14229420f89dSMike Rapoport (IBM)  * Calculate the size of the zone->blockflags rounded to an unsigned long
14239420f89dSMike Rapoport (IBM)  * Start by making sure zonesize is a multiple of pageblock_order by rounding
14249420f89dSMike Rapoport (IBM)  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
14259420f89dSMike Rapoport (IBM)  * round what is now in bits to nearest long in bits, then return it in
14269420f89dSMike Rapoport (IBM)  * bytes.
14279420f89dSMike Rapoport (IBM)  */
14289420f89dSMike Rapoport (IBM) static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
14299420f89dSMike Rapoport (IBM) {
14309420f89dSMike Rapoport (IBM) 	unsigned long usemapsize;
14319420f89dSMike Rapoport (IBM) 
14329420f89dSMike Rapoport (IBM) 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
14339420f89dSMike Rapoport (IBM) 	usemapsize = roundup(zonesize, pageblock_nr_pages);
14349420f89dSMike Rapoport (IBM) 	usemapsize = usemapsize >> pageblock_order;
14359420f89dSMike Rapoport (IBM) 	usemapsize *= NR_PAGEBLOCK_BITS;
1436daee07bfSMiaohe Lin 	usemapsize = roundup(usemapsize, BITS_PER_LONG);
14379420f89dSMike Rapoport (IBM) 
1438daee07bfSMiaohe Lin 	return usemapsize / BITS_PER_BYTE;
14399420f89dSMike Rapoport (IBM) }
14409420f89dSMike Rapoport (IBM) 
14419420f89dSMike Rapoport (IBM) static void __ref setup_usemap(struct zone *zone)
14429420f89dSMike Rapoport (IBM) {
14439420f89dSMike Rapoport (IBM) 	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
14449420f89dSMike Rapoport (IBM) 					       zone->spanned_pages);
14459420f89dSMike Rapoport (IBM) 	zone->pageblock_flags = NULL;
14469420f89dSMike Rapoport (IBM) 	if (usemapsize) {
14479420f89dSMike Rapoport (IBM) 		zone->pageblock_flags =
14489420f89dSMike Rapoport (IBM) 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
14499420f89dSMike Rapoport (IBM) 					    zone_to_nid(zone));
14509420f89dSMike Rapoport (IBM) 		if (!zone->pageblock_flags)
14519420f89dSMike Rapoport (IBM) 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
14529420f89dSMike Rapoport (IBM) 			      usemapsize, zone->name, zone_to_nid(zone));
14539420f89dSMike Rapoport (IBM) 	}
14549420f89dSMike Rapoport (IBM) }
14559420f89dSMike Rapoport (IBM) #else
14569420f89dSMike Rapoport (IBM) static inline void setup_usemap(struct zone *zone) {}
14579420f89dSMike Rapoport (IBM) #endif /* CONFIG_SPARSEMEM */
14589420f89dSMike Rapoport (IBM) 
14599420f89dSMike Rapoport (IBM) #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
14609420f89dSMike Rapoport (IBM) 
14619420f89dSMike Rapoport (IBM) /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
14629420f89dSMike Rapoport (IBM) void __init set_pageblock_order(void)
14639420f89dSMike Rapoport (IBM) {
14645e0a760bSKirill A. Shutemov 	unsigned int order = MAX_PAGE_ORDER;
14659420f89dSMike Rapoport (IBM) 
14669420f89dSMike Rapoport (IBM) 	/* Check that pageblock_nr_pages has not already been setup */
14679420f89dSMike Rapoport (IBM) 	if (pageblock_order)
14689420f89dSMike Rapoport (IBM) 		return;
14699420f89dSMike Rapoport (IBM) 
14709420f89dSMike Rapoport (IBM) 	/* Don't let pageblocks exceed the maximum allocation granularity. */
14719420f89dSMike Rapoport (IBM) 	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
14729420f89dSMike Rapoport (IBM) 		order = HUGETLB_PAGE_ORDER;
14739420f89dSMike Rapoport (IBM) 
14749420f89dSMike Rapoport (IBM) 	/*
14759420f89dSMike Rapoport (IBM) 	 * Assume the largest contiguous order of interest is a huge page.
1476e99fb98dSKefeng Wang 	 * This value may be variable depending on boot parameters on powerpc.
14779420f89dSMike Rapoport (IBM) 	 */
14789420f89dSMike Rapoport (IBM) 	pageblock_order = order;
14799420f89dSMike Rapoport (IBM) }
14809420f89dSMike Rapoport (IBM) #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
14819420f89dSMike Rapoport (IBM) 
14829420f89dSMike Rapoport (IBM) /*
14839420f89dSMike Rapoport (IBM)  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
14849420f89dSMike Rapoport (IBM)  * is unused as pageblock_order is set at compile-time. See
14859420f89dSMike Rapoport (IBM)  * include/linux/pageblock-flags.h for the values of pageblock_order based on
14869420f89dSMike Rapoport (IBM)  * the kernel config
14879420f89dSMike Rapoport (IBM)  */
14889420f89dSMike Rapoport (IBM) void __init set_pageblock_order(void)
14899420f89dSMike Rapoport (IBM) {
14909420f89dSMike Rapoport (IBM) }
14919420f89dSMike Rapoport (IBM) 
14929420f89dSMike Rapoport (IBM) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
14939420f89dSMike Rapoport (IBM) 
14949420f89dSMike Rapoport (IBM) /*
14959420f89dSMike Rapoport (IBM)  * Set up the zone data structures
14969420f89dSMike Rapoport (IBM)  * - init pgdat internals
14979420f89dSMike Rapoport (IBM)  * - init all zones belonging to this node
14989420f89dSMike Rapoport (IBM)  *
14999420f89dSMike Rapoport (IBM)  * NOTE: this function is only called during memory hotplug
15009420f89dSMike Rapoport (IBM)  */
15019420f89dSMike Rapoport (IBM) #ifdef CONFIG_MEMORY_HOTPLUG
15029420f89dSMike Rapoport (IBM) void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
15039420f89dSMike Rapoport (IBM) {
15049420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
15059420f89dSMike Rapoport (IBM) 	enum zone_type z;
15069420f89dSMike Rapoport (IBM) 	int cpu;
15079420f89dSMike Rapoport (IBM) 
15089420f89dSMike Rapoport (IBM) 	pgdat_init_internals(pgdat);
15099420f89dSMike Rapoport (IBM) 
15109420f89dSMike Rapoport (IBM) 	if (pgdat->per_cpu_nodestats == &boot_nodestats)
15119420f89dSMike Rapoport (IBM) 		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
15129420f89dSMike Rapoport (IBM) 
15139420f89dSMike Rapoport (IBM) 	/*
15149420f89dSMike Rapoport (IBM) 	 * Reset the nr_zones, order and highest_zoneidx before reuse.
15159420f89dSMike Rapoport (IBM) 	 * Note that kswapd will init kswapd_highest_zoneidx properly
15169420f89dSMike Rapoport (IBM) 	 * when it starts in the near future.
15179420f89dSMike Rapoport (IBM) 	 */
15189420f89dSMike Rapoport (IBM) 	pgdat->nr_zones = 0;
15199420f89dSMike Rapoport (IBM) 	pgdat->kswapd_order = 0;
15209420f89dSMike Rapoport (IBM) 	pgdat->kswapd_highest_zoneidx = 0;
15219420f89dSMike Rapoport (IBM) 	pgdat->node_start_pfn = 0;
152232b6a4a1SHaifeng Xu 	pgdat->node_present_pages = 0;
152332b6a4a1SHaifeng Xu 
15249420f89dSMike Rapoport (IBM) 	for_each_online_cpu(cpu) {
15259420f89dSMike Rapoport (IBM) 		struct per_cpu_nodestat *p;
15269420f89dSMike Rapoport (IBM) 
15279420f89dSMike Rapoport (IBM) 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
15289420f89dSMike Rapoport (IBM) 		memset(p, 0, sizeof(*p));
15299420f89dSMike Rapoport (IBM) 	}
15309420f89dSMike Rapoport (IBM) 
153132b6a4a1SHaifeng Xu 	/*
153232b6a4a1SHaifeng Xu 	 * When memory is hot-added, all the memory is in offline state. So
153332b6a4a1SHaifeng Xu 	 * clear all zones' present_pages and managed_pages because they will
153432b6a4a1SHaifeng Xu 	 * be updated in online_pages() and offline_pages().
153532b6a4a1SHaifeng Xu 	 */
153632b6a4a1SHaifeng Xu 	for (z = 0; z < MAX_NR_ZONES; z++) {
153732b6a4a1SHaifeng Xu 		struct zone *zone = pgdat->node_zones + z;
153832b6a4a1SHaifeng Xu 
153932b6a4a1SHaifeng Xu 		zone->present_pages = 0;
154032b6a4a1SHaifeng Xu 		zone_init_internals(zone, z, nid, 0);
154132b6a4a1SHaifeng Xu 	}
15429420f89dSMike Rapoport (IBM) }
15439420f89dSMike Rapoport (IBM) #endif
15449420f89dSMike Rapoport (IBM) 
15459420f89dSMike Rapoport (IBM) /*
15469420f89dSMike Rapoport (IBM)  * Set up the zone data structures:
15479420f89dSMike Rapoport (IBM)  *   - mark all pages reserved
15489420f89dSMike Rapoport (IBM)  *   - mark all memory queues empty
15499420f89dSMike Rapoport (IBM)  *   - clear the memory bitmaps
15509420f89dSMike Rapoport (IBM)  *
15519420f89dSMike Rapoport (IBM)  * NOTE: pgdat should get zeroed by caller.
15529420f89dSMike Rapoport (IBM)  * NOTE: this function is only called during early init.
15539420f89dSMike Rapoport (IBM)  */
15549420f89dSMike Rapoport (IBM) static void __init free_area_init_core(struct pglist_data *pgdat)
15559420f89dSMike Rapoport (IBM) {
15569420f89dSMike Rapoport (IBM) 	enum zone_type j;
15579420f89dSMike Rapoport (IBM) 	int nid = pgdat->node_id;
15589420f89dSMike Rapoport (IBM) 
15599420f89dSMike Rapoport (IBM) 	pgdat_init_internals(pgdat);
15609420f89dSMike Rapoport (IBM) 	pgdat->per_cpu_nodestats = &boot_nodestats;
15619420f89dSMike Rapoport (IBM) 
15629420f89dSMike Rapoport (IBM) 	for (j = 0; j < MAX_NR_ZONES; j++) {
15639420f89dSMike Rapoport (IBM) 		struct zone *zone = pgdat->node_zones + j;
15649420f89dSMike Rapoport (IBM) 		unsigned long size, freesize, memmap_pages;
15659420f89dSMike Rapoport (IBM) 
15669420f89dSMike Rapoport (IBM) 		size = zone->spanned_pages;
15679420f89dSMike Rapoport (IBM) 		freesize = zone->present_pages;
15689420f89dSMike Rapoport (IBM) 
15699420f89dSMike Rapoport (IBM) 		/*
15709420f89dSMike Rapoport (IBM) 		 * Adjust freesize so that it accounts for how much memory
15719420f89dSMike Rapoport (IBM) 		 * is used by this zone for memmap. This affects the watermark
15729420f89dSMike Rapoport (IBM) 		 * and per-cpu initialisations
15739420f89dSMike Rapoport (IBM) 		 */
15749420f89dSMike Rapoport (IBM) 		memmap_pages = calc_memmap_size(size, freesize);
15759420f89dSMike Rapoport (IBM) 		if (!is_highmem_idx(j)) {
15769420f89dSMike Rapoport (IBM) 			if (freesize >= memmap_pages) {
15779420f89dSMike Rapoport (IBM) 				freesize -= memmap_pages;
15789420f89dSMike Rapoport (IBM) 				if (memmap_pages)
15799420f89dSMike Rapoport (IBM) 					pr_debug("  %s zone: %lu pages used for memmap\n",
15809420f89dSMike Rapoport (IBM) 						 zone_names[j], memmap_pages);
15819420f89dSMike Rapoport (IBM) 			} else
15829420f89dSMike Rapoport (IBM) 				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
15839420f89dSMike Rapoport (IBM) 					zone_names[j], memmap_pages, freesize);
15849420f89dSMike Rapoport (IBM) 		}
15859420f89dSMike Rapoport (IBM) 
15869420f89dSMike Rapoport (IBM) 		/* Account for reserved pages */
15879420f89dSMike Rapoport (IBM) 		if (j == 0 && freesize > dma_reserve) {
15889420f89dSMike Rapoport (IBM) 			freesize -= dma_reserve;
15899420f89dSMike Rapoport (IBM) 			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
15909420f89dSMike Rapoport (IBM) 		}
15919420f89dSMike Rapoport (IBM) 
15929420f89dSMike Rapoport (IBM) 		if (!is_highmem_idx(j))
15939420f89dSMike Rapoport (IBM) 			nr_kernel_pages += freesize;
15949420f89dSMike Rapoport (IBM) 		/* Charge for highmem memmap if there are enough kernel pages */
15959420f89dSMike Rapoport (IBM) 		else if (nr_kernel_pages > memmap_pages * 2)
15969420f89dSMike Rapoport (IBM) 			nr_kernel_pages -= memmap_pages;
15979420f89dSMike Rapoport (IBM) 		nr_all_pages += freesize;
15989420f89dSMike Rapoport (IBM) 
15999420f89dSMike Rapoport (IBM) 		/*
16009420f89dSMike Rapoport (IBM) 		 * Set an approximate value for lowmem here, it will be adjusted
16019420f89dSMike Rapoport (IBM) 		 * when the bootmem allocator frees pages into the buddy system.
16029420f89dSMike Rapoport (IBM) 		 * And all highmem pages will be managed by the buddy system.
16039420f89dSMike Rapoport (IBM) 		 */
16049420f89dSMike Rapoport (IBM) 		zone_init_internals(zone, j, nid, freesize);
16059420f89dSMike Rapoport (IBM) 
16069420f89dSMike Rapoport (IBM) 		if (!size)
16079420f89dSMike Rapoport (IBM) 			continue;
16089420f89dSMike Rapoport (IBM) 
16099420f89dSMike Rapoport (IBM) 		setup_usemap(zone);
16109420f89dSMike Rapoport (IBM) 		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
16119420f89dSMike Rapoport (IBM) 	}
16129420f89dSMike Rapoport (IBM) }
16139420f89dSMike Rapoport (IBM) 
16149420f89dSMike Rapoport (IBM) void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
16159420f89dSMike Rapoport (IBM) 			  phys_addr_t min_addr, int nid, bool exact_nid)
16169420f89dSMike Rapoport (IBM) {
16179420f89dSMike Rapoport (IBM) 	void *ptr;
16189420f89dSMike Rapoport (IBM) 
16199420f89dSMike Rapoport (IBM) 	if (exact_nid)
16209420f89dSMike Rapoport (IBM) 		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
16219420f89dSMike Rapoport (IBM) 						   MEMBLOCK_ALLOC_ACCESSIBLE,
16229420f89dSMike Rapoport (IBM) 						   nid);
16239420f89dSMike Rapoport (IBM) 	else
16249420f89dSMike Rapoport (IBM) 		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
16259420f89dSMike Rapoport (IBM) 						 MEMBLOCK_ALLOC_ACCESSIBLE,
16269420f89dSMike Rapoport (IBM) 						 nid);
16279420f89dSMike Rapoport (IBM) 
16289420f89dSMike Rapoport (IBM) 	if (ptr && size > 0)
16299420f89dSMike Rapoport (IBM) 		page_init_poison(ptr, size);
16309420f89dSMike Rapoport (IBM) 
16319420f89dSMike Rapoport (IBM) 	return ptr;
16329420f89dSMike Rapoport (IBM) }
16339420f89dSMike Rapoport (IBM) 
16349420f89dSMike Rapoport (IBM) #ifdef CONFIG_FLATMEM
16359420f89dSMike Rapoport (IBM) static void __init alloc_node_mem_map(struct pglist_data *pgdat)
16369420f89dSMike Rapoport (IBM) {
1637e99fb98dSKefeng Wang 	unsigned long start, offset, size, end;
1638e99fb98dSKefeng Wang 	struct page *map;
16399420f89dSMike Rapoport (IBM) 
16409420f89dSMike Rapoport (IBM) 	/* Skip empty nodes */
16419420f89dSMike Rapoport (IBM) 	if (!pgdat->node_spanned_pages)
16429420f89dSMike Rapoport (IBM) 		return;
16439420f89dSMike Rapoport (IBM) 
16449420f89dSMike Rapoport (IBM) 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
16459420f89dSMike Rapoport (IBM) 	offset = pgdat->node_start_pfn - start;
16469420f89dSMike Rapoport (IBM) 	/*
16475e0a760bSKirill A. Shutemov 		 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
16489420f89dSMike Rapoport (IBM) 	 * aligned but the node_mem_map endpoints must be in order
16499420f89dSMike Rapoport (IBM) 	 * for the buddy allocator to function correctly.
16509420f89dSMike Rapoport (IBM) 	 */
1651e99fb98dSKefeng Wang 	end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
16529420f89dSMike Rapoport (IBM) 	size =  (end - start) * sizeof(struct page);
16539420f89dSMike Rapoport (IBM) 	map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
16549420f89dSMike Rapoport (IBM) 			   pgdat->node_id, false);
16559420f89dSMike Rapoport (IBM) 	if (!map)
16569420f89dSMike Rapoport (IBM) 		panic("Failed to allocate %ld bytes for node %d memory map\n",
16579420f89dSMike Rapoport (IBM) 		      size, pgdat->node_id);
16589420f89dSMike Rapoport (IBM) 	pgdat->node_mem_map = map + offset;
16599420f89dSMike Rapoport (IBM) 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
16609420f89dSMike Rapoport (IBM) 		 __func__, pgdat->node_id, (unsigned long)pgdat,
16619420f89dSMike Rapoport (IBM) 		 (unsigned long)pgdat->node_mem_map);
16629420f89dSMike Rapoport (IBM) #ifndef CONFIG_NUMA
1663e99fb98dSKefeng Wang 	/* the global mem_map is just set as node 0's */
16649420f89dSMike Rapoport (IBM) 	if (pgdat == NODE_DATA(0)) {
16659420f89dSMike Rapoport (IBM) 		mem_map = NODE_DATA(0)->node_mem_map;
16669420f89dSMike Rapoport (IBM) 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
16679420f89dSMike Rapoport (IBM) 			mem_map -= offset;
16689420f89dSMike Rapoport (IBM) 	}
16699420f89dSMike Rapoport (IBM) #endif
16709420f89dSMike Rapoport (IBM) }
16719420f89dSMike Rapoport (IBM) #else
16729420f89dSMike Rapoport (IBM) static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
16739420f89dSMike Rapoport (IBM) #endif /* CONFIG_FLATMEM */
16749420f89dSMike Rapoport (IBM) 
16759420f89dSMike Rapoport (IBM) /**
16769420f89dSMike Rapoport (IBM)  * get_pfn_range_for_nid - Return the start and end page frames for a node
16779420f89dSMike Rapoport (IBM)  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
16789420f89dSMike Rapoport (IBM)  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
16799420f89dSMike Rapoport (IBM)  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
16809420f89dSMike Rapoport (IBM)  *
16819420f89dSMike Rapoport (IBM)  * It returns the start and end page frame of a node based on information
16829420f89dSMike Rapoport (IBM)  * provided by memblock_set_node(). If called for a node
16833a29280aSMiaohe Lin  * with no available memory, the start and end PFNs will be 0.
16849420f89dSMike Rapoport (IBM)  */
16859420f89dSMike Rapoport (IBM) void __init get_pfn_range_for_nid(unsigned int nid,
16869420f89dSMike Rapoport (IBM) 			unsigned long *start_pfn, unsigned long *end_pfn)
16879420f89dSMike Rapoport (IBM) {
16889420f89dSMike Rapoport (IBM) 	unsigned long this_start_pfn, this_end_pfn;
16899420f89dSMike Rapoport (IBM) 	int i;
16909420f89dSMike Rapoport (IBM) 
16919420f89dSMike Rapoport (IBM) 	*start_pfn = -1UL;
16929420f89dSMike Rapoport (IBM) 	*end_pfn = 0;
16939420f89dSMike Rapoport (IBM) 
16949420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
16959420f89dSMike Rapoport (IBM) 		*start_pfn = min(*start_pfn, this_start_pfn);
16969420f89dSMike Rapoport (IBM) 		*end_pfn = max(*end_pfn, this_end_pfn);
16979420f89dSMike Rapoport (IBM) 	}
16989420f89dSMike Rapoport (IBM) 
16999420f89dSMike Rapoport (IBM) 	if (*start_pfn == -1UL)
17009420f89dSMike Rapoport (IBM) 		*start_pfn = 0;
17019420f89dSMike Rapoport (IBM) }
17029420f89dSMike Rapoport (IBM) 
17039420f89dSMike Rapoport (IBM) static void __init free_area_init_node(int nid)
17049420f89dSMike Rapoport (IBM) {
17059420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = NODE_DATA(nid);
17069420f89dSMike Rapoport (IBM) 	unsigned long start_pfn = 0;
17079420f89dSMike Rapoport (IBM) 	unsigned long end_pfn = 0;
17089420f89dSMike Rapoport (IBM) 
17099420f89dSMike Rapoport (IBM) 	/* pg_data_t should be reset to zero when it's allocated */
17109420f89dSMike Rapoport (IBM) 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
17119420f89dSMike Rapoport (IBM) 
17129420f89dSMike Rapoport (IBM) 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
17139420f89dSMike Rapoport (IBM) 
17149420f89dSMike Rapoport (IBM) 	pgdat->node_id = nid;
17159420f89dSMike Rapoport (IBM) 	pgdat->node_start_pfn = start_pfn;
17169420f89dSMike Rapoport (IBM) 	pgdat->per_cpu_nodestats = NULL;
17179420f89dSMike Rapoport (IBM) 
17189420f89dSMike Rapoport (IBM) 	if (start_pfn != end_pfn) {
17199420f89dSMike Rapoport (IBM) 		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
17209420f89dSMike Rapoport (IBM) 			(u64)start_pfn << PAGE_SHIFT,
17219420f89dSMike Rapoport (IBM) 			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
17229420f89dSMike Rapoport (IBM) 
17239420f89dSMike Rapoport (IBM) 		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1724ba1b67c7SHaifeng Xu 	} else {
1725ba1b67c7SHaifeng Xu 		pr_info("Initmem setup node %d as memoryless\n", nid);
1726ba1b67c7SHaifeng Xu 
1727ba1b67c7SHaifeng Xu 		reset_memoryless_node_totalpages(pgdat);
1728ba1b67c7SHaifeng Xu 	}
17299420f89dSMike Rapoport (IBM) 
17309420f89dSMike Rapoport (IBM) 	alloc_node_mem_map(pgdat);
17319420f89dSMike Rapoport (IBM) 	pgdat_set_deferred_range(pgdat);
17329420f89dSMike Rapoport (IBM) 
17339420f89dSMike Rapoport (IBM) 	free_area_init_core(pgdat);
17349420f89dSMike Rapoport (IBM) 	lru_gen_init_pgdat(pgdat);
17359420f89dSMike Rapoport (IBM) }
17369420f89dSMike Rapoport (IBM) 
17379420f89dSMike Rapoport (IBM) /* Any regular or high memory on that node ? */
1738b894da04SHaifeng Xu static void __init check_for_memory(pg_data_t *pgdat)
17399420f89dSMike Rapoport (IBM) {
17409420f89dSMike Rapoport (IBM) 	enum zone_type zone_type;
17419420f89dSMike Rapoport (IBM) 
17429420f89dSMike Rapoport (IBM) 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
17439420f89dSMike Rapoport (IBM) 		struct zone *zone = &pgdat->node_zones[zone_type];
17449420f89dSMike Rapoport (IBM) 		if (populated_zone(zone)) {
17459420f89dSMike Rapoport (IBM) 			if (IS_ENABLED(CONFIG_HIGHMEM))
174691ff4d75SHaifeng Xu 				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
17479420f89dSMike Rapoport (IBM) 			if (zone_type <= ZONE_NORMAL)
174891ff4d75SHaifeng Xu 				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
17499420f89dSMike Rapoport (IBM) 			break;
17509420f89dSMike Rapoport (IBM) 		}
17519420f89dSMike Rapoport (IBM) 	}
17529420f89dSMike Rapoport (IBM) }
17539420f89dSMike Rapoport (IBM) 
17549420f89dSMike Rapoport (IBM) #if MAX_NUMNODES > 1
17559420f89dSMike Rapoport (IBM) /*
17569420f89dSMike Rapoport (IBM)  * Figure out the number of possible node ids.
17579420f89dSMike Rapoport (IBM)  */
17589420f89dSMike Rapoport (IBM) void __init setup_nr_node_ids(void)
17599420f89dSMike Rapoport (IBM) {
17609420f89dSMike Rapoport (IBM) 	unsigned int highest;
17619420f89dSMike Rapoport (IBM) 
17629420f89dSMike Rapoport (IBM) 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
17639420f89dSMike Rapoport (IBM) 	nr_node_ids = highest + 1;
17649420f89dSMike Rapoport (IBM) }
17659420f89dSMike Rapoport (IBM) #endif
17669420f89dSMike Rapoport (IBM) 
17679420f89dSMike Rapoport (IBM) /*
17689420f89dSMike Rapoport (IBM)  * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
17699420f89dSMike Rapoport (IBM)  * such cases we allow max_zone_pfn sorted in the descending order
17709420f89dSMike Rapoport (IBM)  */
17715f300fd5SArnd Bergmann static bool arch_has_descending_max_zone_pfns(void)
17729420f89dSMike Rapoport (IBM) {
17735f300fd5SArnd Bergmann 	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
17749420f89dSMike Rapoport (IBM) }
17759420f89dSMike Rapoport (IBM) 
17769420f89dSMike Rapoport (IBM) /**
17779420f89dSMike Rapoport (IBM)  * free_area_init - Initialise all pg_data_t and zone data
17789420f89dSMike Rapoport (IBM)  * @max_zone_pfn: an array of max PFNs for each zone
17799420f89dSMike Rapoport (IBM)  *
17809420f89dSMike Rapoport (IBM)  * This will call free_area_init_node() for each active node in the system.
17819420f89dSMike Rapoport (IBM)  * Using the page ranges provided by memblock_set_node(), the size of each
17829420f89dSMike Rapoport (IBM)  * zone in each node and their holes is calculated. If the maximum PFN
17839420f89dSMike Rapoport (IBM)  * between two adjacent zones match, it is assumed that the zone is empty.
17849420f89dSMike Rapoport (IBM)  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
17859420f89dSMike Rapoport (IBM)  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
17869420f89dSMike Rapoport (IBM)  * starts where the previous one ended. For example, ZONE_DMA32 starts
17879420f89dSMike Rapoport (IBM)  * at arch_max_dma_pfn.
17889420f89dSMike Rapoport (IBM)  */
17899420f89dSMike Rapoport (IBM) void __init free_area_init(unsigned long *max_zone_pfn)
17909420f89dSMike Rapoport (IBM) {
17919420f89dSMike Rapoport (IBM) 	unsigned long start_pfn, end_pfn;
17929420f89dSMike Rapoport (IBM) 	int i, nid, zone;
17939420f89dSMike Rapoport (IBM) 	bool descending;
17949420f89dSMike Rapoport (IBM) 
17959420f89dSMike Rapoport (IBM) 	/* Record where the zone boundaries are */
17969420f89dSMike Rapoport (IBM) 	memset(arch_zone_lowest_possible_pfn, 0,
17979420f89dSMike Rapoport (IBM) 				sizeof(arch_zone_lowest_possible_pfn));
17989420f89dSMike Rapoport (IBM) 	memset(arch_zone_highest_possible_pfn, 0,
17999420f89dSMike Rapoport (IBM) 				sizeof(arch_zone_highest_possible_pfn));
18009420f89dSMike Rapoport (IBM) 
18019420f89dSMike Rapoport (IBM) 	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
18029420f89dSMike Rapoport (IBM) 	descending = arch_has_descending_max_zone_pfns();
18039420f89dSMike Rapoport (IBM) 
18049420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
18059420f89dSMike Rapoport (IBM) 		if (descending)
18069420f89dSMike Rapoport (IBM) 			zone = MAX_NR_ZONES - i - 1;
18079420f89dSMike Rapoport (IBM) 		else
18089420f89dSMike Rapoport (IBM) 			zone = i;
18099420f89dSMike Rapoport (IBM) 
18109420f89dSMike Rapoport (IBM) 		if (zone == ZONE_MOVABLE)
18119420f89dSMike Rapoport (IBM) 			continue;
18129420f89dSMike Rapoport (IBM) 
18139420f89dSMike Rapoport (IBM) 		end_pfn = max(max_zone_pfn[zone], start_pfn);
18149420f89dSMike Rapoport (IBM) 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
18159420f89dSMike Rapoport (IBM) 		arch_zone_highest_possible_pfn[zone] = end_pfn;
18169420f89dSMike Rapoport (IBM) 
18179420f89dSMike Rapoport (IBM) 		start_pfn = end_pfn;
18189420f89dSMike Rapoport (IBM) 	}
18199420f89dSMike Rapoport (IBM) 
18209420f89dSMike Rapoport (IBM) 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
18219420f89dSMike Rapoport (IBM) 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
18229420f89dSMike Rapoport (IBM) 	find_zone_movable_pfns_for_nodes();
18239420f89dSMike Rapoport (IBM) 
18249420f89dSMike Rapoport (IBM) 	/* Print out the zone ranges */
18259420f89dSMike Rapoport (IBM) 	pr_info("Zone ranges:\n");
18269420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NR_ZONES; i++) {
18279420f89dSMike Rapoport (IBM) 		if (i == ZONE_MOVABLE)
18289420f89dSMike Rapoport (IBM) 			continue;
18299420f89dSMike Rapoport (IBM) 		pr_info("  %-8s ", zone_names[i]);
18309420f89dSMike Rapoport (IBM) 		if (arch_zone_lowest_possible_pfn[i] ==
18319420f89dSMike Rapoport (IBM) 				arch_zone_highest_possible_pfn[i])
18329420f89dSMike Rapoport (IBM) 			pr_cont("empty\n");
18339420f89dSMike Rapoport (IBM) 		else
18349420f89dSMike Rapoport (IBM) 			pr_cont("[mem %#018Lx-%#018Lx]\n",
18359420f89dSMike Rapoport (IBM) 				(u64)arch_zone_lowest_possible_pfn[i]
18369420f89dSMike Rapoport (IBM) 					<< PAGE_SHIFT,
18379420f89dSMike Rapoport (IBM) 				((u64)arch_zone_highest_possible_pfn[i]
18389420f89dSMike Rapoport (IBM) 					<< PAGE_SHIFT) - 1);
18399420f89dSMike Rapoport (IBM) 	}
18409420f89dSMike Rapoport (IBM) 
18419420f89dSMike Rapoport (IBM) 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
18429420f89dSMike Rapoport (IBM) 	pr_info("Movable zone start for each node\n");
18439420f89dSMike Rapoport (IBM) 	for (i = 0; i < MAX_NUMNODES; i++) {
18449420f89dSMike Rapoport (IBM) 		if (zone_movable_pfn[i])
18459420f89dSMike Rapoport (IBM) 			pr_info("  Node %d: %#018Lx\n", i,
18469420f89dSMike Rapoport (IBM) 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
18479420f89dSMike Rapoport (IBM) 	}
18489420f89dSMike Rapoport (IBM) 
18499420f89dSMike Rapoport (IBM) 	/*
18509420f89dSMike Rapoport (IBM) 	 * Print out the early node map, and initialize the
18519420f89dSMike Rapoport (IBM) 	 * subsection-map relative to active online memory ranges to
18529420f89dSMike Rapoport (IBM) 	 * enable future "sub-section" extensions of the memory map.
18539420f89dSMike Rapoport (IBM) 	 */
18549420f89dSMike Rapoport (IBM) 	pr_info("Early memory node ranges\n");
18559420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
18569420f89dSMike Rapoport (IBM) 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
18579420f89dSMike Rapoport (IBM) 			(u64)start_pfn << PAGE_SHIFT,
18589420f89dSMike Rapoport (IBM) 			((u64)end_pfn << PAGE_SHIFT) - 1);
18599420f89dSMike Rapoport (IBM) 		subsection_map_init(start_pfn, end_pfn - start_pfn);
18609420f89dSMike Rapoport (IBM) 	}
18619420f89dSMike Rapoport (IBM) 
18629420f89dSMike Rapoport (IBM) 	/* Initialise every node */
18639420f89dSMike Rapoport (IBM) 	mminit_verify_pageflags_layout();
18649420f89dSMike Rapoport (IBM) 	setup_nr_node_ids();
1865e3d9b45fSHaifeng Xu 	set_pageblock_order();
1866e3d9b45fSHaifeng Xu 
18679420f89dSMike Rapoport (IBM) 	for_each_node(nid) {
18689420f89dSMike Rapoport (IBM) 		pg_data_t *pgdat;
18699420f89dSMike Rapoport (IBM) 
18709420f89dSMike Rapoport (IBM) 		if (!node_online(nid)) {
18719420f89dSMike Rapoport (IBM) 			/* Allocator not initialized yet */
18729420f89dSMike Rapoport (IBM) 			pgdat = arch_alloc_nodedata(nid);
18739420f89dSMike Rapoport (IBM) 			if (!pgdat)
18749420f89dSMike Rapoport (IBM) 				panic("Cannot allocate %zuB for node %d.\n",
18759420f89dSMike Rapoport (IBM) 				       sizeof(*pgdat), nid);
18769420f89dSMike Rapoport (IBM) 			arch_refresh_nodedata(nid, pgdat);
1877837c2ba5SHaifeng Xu 			free_area_init_node(nid);
18789420f89dSMike Rapoport (IBM) 
18799420f89dSMike Rapoport (IBM) 			/*
18809420f89dSMike Rapoport (IBM) 			 * We do not want to confuse userspace by sysfs
18819420f89dSMike Rapoport (IBM) 			 * files/directories for node without any memory
18829420f89dSMike Rapoport (IBM) 			 * attached to it, so this node is not marked as
18839420f89dSMike Rapoport (IBM) 			 * N_MEMORY and not marked online so that no sysfs
18849420f89dSMike Rapoport (IBM) 			 * hierarchy will be created via register_one_node for
18859420f89dSMike Rapoport (IBM) 			 * it. The pgdat will get fully initialized by
18869420f89dSMike Rapoport (IBM) 			 * hotadd_init_pgdat() when memory is hotplugged into
18879420f89dSMike Rapoport (IBM) 			 * this node.
18889420f89dSMike Rapoport (IBM) 			 */
18899420f89dSMike Rapoport (IBM) 			continue;
18909420f89dSMike Rapoport (IBM) 		}
18919420f89dSMike Rapoport (IBM) 
18929420f89dSMike Rapoport (IBM) 		pgdat = NODE_DATA(nid);
18939420f89dSMike Rapoport (IBM) 		free_area_init_node(nid);
18949420f89dSMike Rapoport (IBM) 
18959420f89dSMike Rapoport (IBM) 		/* Any memory on that node */
18969420f89dSMike Rapoport (IBM) 		if (pgdat->node_present_pages)
18979420f89dSMike Rapoport (IBM) 			node_set_state(nid, N_MEMORY);
189891ff4d75SHaifeng Xu 		check_for_memory(pgdat);
18999420f89dSMike Rapoport (IBM) 	}
19009420f89dSMike Rapoport (IBM) 
19019420f89dSMike Rapoport (IBM) 	memmap_init();
1902534ef4e1SMike Rapoport (IBM) 
1903534ef4e1SMike Rapoport (IBM) 	/* disable hash distribution for systems with a single node */
1904534ef4e1SMike Rapoport (IBM) 	fixup_hashdist();
19059420f89dSMike Rapoport (IBM) }
19069420f89dSMike Rapoport (IBM) 
19079420f89dSMike Rapoport (IBM) /**
19089420f89dSMike Rapoport (IBM)  * node_map_pfn_alignment - determine the maximum internode alignment
19099420f89dSMike Rapoport (IBM)  *
19109420f89dSMike Rapoport (IBM)  * This function should be called after node map is populated and sorted.
19119420f89dSMike Rapoport (IBM)  * It calculates the maximum power of two alignment which can distinguish
19129420f89dSMike Rapoport (IBM)  * all the nodes.
19139420f89dSMike Rapoport (IBM)  *
19149420f89dSMike Rapoport (IBM)  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
19159420f89dSMike Rapoport (IBM)  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
19169420f89dSMike Rapoport (IBM)  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
19179420f89dSMike Rapoport (IBM)  * shifted, 1GiB is enough and this function will indicate so.
19189420f89dSMike Rapoport (IBM)  *
19199420f89dSMike Rapoport (IBM)  * This is used to test whether pfn -> nid mapping of the chosen memory
19209420f89dSMike Rapoport (IBM)  * model has fine enough granularity to avoid incorrect mapping for the
19219420f89dSMike Rapoport (IBM)  * populated node map.
19229420f89dSMike Rapoport (IBM)  *
19239420f89dSMike Rapoport (IBM)  * Return: the determined alignment in pfn's.  0 if there is no alignment
19249420f89dSMike Rapoport (IBM)  * requirement (single node).
19259420f89dSMike Rapoport (IBM)  */
19269420f89dSMike Rapoport (IBM) unsigned long __init node_map_pfn_alignment(void)
19279420f89dSMike Rapoport (IBM) {
19289420f89dSMike Rapoport (IBM) 	unsigned long accl_mask = 0, last_end = 0;
19299420f89dSMike Rapoport (IBM) 	unsigned long start, end, mask;
19309420f89dSMike Rapoport (IBM) 	int last_nid = NUMA_NO_NODE;
19319420f89dSMike Rapoport (IBM) 	int i, nid;
19329420f89dSMike Rapoport (IBM) 
19339420f89dSMike Rapoport (IBM) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
19349420f89dSMike Rapoport (IBM) 		if (!start || last_nid < 0 || last_nid == nid) {
19359420f89dSMike Rapoport (IBM) 			last_nid = nid;
19369420f89dSMike Rapoport (IBM) 			last_end = end;
19379420f89dSMike Rapoport (IBM) 			continue;
19389420f89dSMike Rapoport (IBM) 		}
19399420f89dSMike Rapoport (IBM) 
19409420f89dSMike Rapoport (IBM) 		/*
19419420f89dSMike Rapoport (IBM) 		 * Start with a mask granular enough to pin-point to the
19429420f89dSMike Rapoport (IBM) 		 * start pfn and tick off bits one-by-one until it becomes
19439420f89dSMike Rapoport (IBM) 		 * too coarse to separate the current node from the last.
19449420f89dSMike Rapoport (IBM) 		 */
19459420f89dSMike Rapoport (IBM) 		mask = ~((1 << __ffs(start)) - 1);
19469420f89dSMike Rapoport (IBM) 		while (mask && last_end <= (start & (mask << 1)))
19479420f89dSMike Rapoport (IBM) 			mask <<= 1;
19489420f89dSMike Rapoport (IBM) 
19499420f89dSMike Rapoport (IBM) 		/* accumulate all internode masks */
19509420f89dSMike Rapoport (IBM) 		accl_mask |= mask;
19519420f89dSMike Rapoport (IBM) 	}
19529420f89dSMike Rapoport (IBM) 
19539420f89dSMike Rapoport (IBM) 	/* convert mask to number of pages */
19549420f89dSMike Rapoport (IBM) 	return ~accl_mask + 1;
19559420f89dSMike Rapoport (IBM) }
19569420f89dSMike Rapoport (IBM) 
19579420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
19589420f89dSMike Rapoport (IBM) static void __init deferred_free_range(unsigned long pfn,
19599420f89dSMike Rapoport (IBM) 				       unsigned long nr_pages)
19609420f89dSMike Rapoport (IBM) {
19619420f89dSMike Rapoport (IBM) 	struct page *page;
19629420f89dSMike Rapoport (IBM) 	unsigned long i;
19639420f89dSMike Rapoport (IBM) 
19649420f89dSMike Rapoport (IBM) 	if (!nr_pages)
19659420f89dSMike Rapoport (IBM) 		return;
19669420f89dSMike Rapoport (IBM) 
19679420f89dSMike Rapoport (IBM) 	page = pfn_to_page(pfn);
19689420f89dSMike Rapoport (IBM) 
19699420f89dSMike Rapoport (IBM) 	/* Free a large naturally-aligned chunk if possible */
19703f6dac0fSKirill A. Shutemov 	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
19713f6dac0fSKirill A. Shutemov 		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
19723f6dac0fSKirill A. Shutemov 			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
19735e0a760bSKirill A. Shutemov 		__free_pages_core(page, MAX_PAGE_ORDER);
19749420f89dSMike Rapoport (IBM) 		return;
19759420f89dSMike Rapoport (IBM) 	}
19769420f89dSMike Rapoport (IBM) 
19775e0a760bSKirill A. Shutemov 	/* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1978dcdfdd40SKirill A. Shutemov 	accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
1979dcdfdd40SKirill A. Shutemov 
19809420f89dSMike Rapoport (IBM) 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
19819420f89dSMike Rapoport (IBM) 		if (pageblock_aligned(pfn))
19829420f89dSMike Rapoport (IBM) 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
19839420f89dSMike Rapoport (IBM) 		__free_pages_core(page, 0);
19849420f89dSMike Rapoport (IBM) 	}
19859420f89dSMike Rapoport (IBM) }
19869420f89dSMike Rapoport (IBM) 
19879420f89dSMike Rapoport (IBM) /* Completion tracking for deferred_init_memmap() threads */
19889420f89dSMike Rapoport (IBM) static atomic_t pgdat_init_n_undone __initdata;
19899420f89dSMike Rapoport (IBM) static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
19909420f89dSMike Rapoport (IBM) 
19919420f89dSMike Rapoport (IBM) static inline void __init pgdat_init_report_one_done(void)
19929420f89dSMike Rapoport (IBM) {
19939420f89dSMike Rapoport (IBM) 	if (atomic_dec_and_test(&pgdat_init_n_undone))
19949420f89dSMike Rapoport (IBM) 		complete(&pgdat_init_all_done_comp);
19959420f89dSMike Rapoport (IBM) }
19969420f89dSMike Rapoport (IBM) 
19979420f89dSMike Rapoport (IBM) /*
19989420f89dSMike Rapoport (IBM)  * Returns true if page needs to be initialized or freed to buddy allocator.
19999420f89dSMike Rapoport (IBM)  *
20005e0a760bSKirill A. Shutemov  * We check if a current MAX_PAGE_ORDER block is valid by only checking the
20015e0a760bSKirill A. Shutemov  * validity of the head pfn.
20029420f89dSMike Rapoport (IBM)  */
20039420f89dSMike Rapoport (IBM) static inline bool __init deferred_pfn_valid(unsigned long pfn)
20049420f89dSMike Rapoport (IBM) {
20053f6dac0fSKirill A. Shutemov 	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
20069420f89dSMike Rapoport (IBM) 		return false;
20079420f89dSMike Rapoport (IBM) 	return true;
20089420f89dSMike Rapoport (IBM) }
20099420f89dSMike Rapoport (IBM) 
20109420f89dSMike Rapoport (IBM) /*
20119420f89dSMike Rapoport (IBM)  * Free pages to buddy allocator. Try to free aligned pages in
20123f6dac0fSKirill A. Shutemov  * MAX_ORDER_NR_PAGES sizes.
20139420f89dSMike Rapoport (IBM)  */
20149420f89dSMike Rapoport (IBM) static void __init deferred_free_pages(unsigned long pfn,
20159420f89dSMike Rapoport (IBM) 				       unsigned long end_pfn)
20169420f89dSMike Rapoport (IBM) {
20179420f89dSMike Rapoport (IBM) 	unsigned long nr_free = 0;
20189420f89dSMike Rapoport (IBM) 
20199420f89dSMike Rapoport (IBM) 	for (; pfn < end_pfn; pfn++) {
20209420f89dSMike Rapoport (IBM) 		if (!deferred_pfn_valid(pfn)) {
20219420f89dSMike Rapoport (IBM) 			deferred_free_range(pfn - nr_free, nr_free);
20229420f89dSMike Rapoport (IBM) 			nr_free = 0;
20233f6dac0fSKirill A. Shutemov 		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
20249420f89dSMike Rapoport (IBM) 			deferred_free_range(pfn - nr_free, nr_free);
20259420f89dSMike Rapoport (IBM) 			nr_free = 1;
20269420f89dSMike Rapoport (IBM) 		} else {
20279420f89dSMike Rapoport (IBM) 			nr_free++;
20289420f89dSMike Rapoport (IBM) 		}
20299420f89dSMike Rapoport (IBM) 	}
20309420f89dSMike Rapoport (IBM) 	/* Free the last block of pages to allocator */
20319420f89dSMike Rapoport (IBM) 	deferred_free_range(pfn - nr_free, nr_free);
20329420f89dSMike Rapoport (IBM) }
20339420f89dSMike Rapoport (IBM) 
20349420f89dSMike Rapoport (IBM) /*
20359420f89dSMike Rapoport (IBM)  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
20363f6dac0fSKirill A. Shutemov  * by performing it only once every MAX_ORDER_NR_PAGES.
20379420f89dSMike Rapoport (IBM)  * Return number of pages initialized.
20389420f89dSMike Rapoport (IBM)  */
20399420f89dSMike Rapoport (IBM) static unsigned long  __init deferred_init_pages(struct zone *zone,
20409420f89dSMike Rapoport (IBM) 						 unsigned long pfn,
20419420f89dSMike Rapoport (IBM) 						 unsigned long end_pfn)
20429420f89dSMike Rapoport (IBM) {
20439420f89dSMike Rapoport (IBM) 	int nid = zone_to_nid(zone);
20449420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
20459420f89dSMike Rapoport (IBM) 	int zid = zone_idx(zone);
20469420f89dSMike Rapoport (IBM) 	struct page *page = NULL;
20479420f89dSMike Rapoport (IBM) 
20489420f89dSMike Rapoport (IBM) 	for (; pfn < end_pfn; pfn++) {
20499420f89dSMike Rapoport (IBM) 		if (!deferred_pfn_valid(pfn)) {
20509420f89dSMike Rapoport (IBM) 			page = NULL;
20519420f89dSMike Rapoport (IBM) 			continue;
20523f6dac0fSKirill A. Shutemov 		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
20539420f89dSMike Rapoport (IBM) 			page = pfn_to_page(pfn);
20549420f89dSMike Rapoport (IBM) 		} else {
20559420f89dSMike Rapoport (IBM) 			page++;
20569420f89dSMike Rapoport (IBM) 		}
20579420f89dSMike Rapoport (IBM) 		__init_single_page(page, pfn, zid, nid);
20589420f89dSMike Rapoport (IBM) 		nr_pages++;
20599420f89dSMike Rapoport (IBM) 	}
20609420f89dSMike Rapoport (IBM) 	return (nr_pages);
20619420f89dSMike Rapoport (IBM) }
20629420f89dSMike Rapoport (IBM) 
20639420f89dSMike Rapoport (IBM) /*
20649420f89dSMike Rapoport (IBM)  * This function is meant to pre-load the iterator for the zone init.
20659420f89dSMike Rapoport (IBM)  * Specifically it walks through the ranges until we are caught up to the
20669420f89dSMike Rapoport (IBM)  * first_init_pfn value and exits there. If we never encounter the value we
20679420f89dSMike Rapoport (IBM)  * return false indicating there are no valid ranges left.
20689420f89dSMike Rapoport (IBM)  */
20699420f89dSMike Rapoport (IBM) static bool __init
20709420f89dSMike Rapoport (IBM) deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
20719420f89dSMike Rapoport (IBM) 				    unsigned long *spfn, unsigned long *epfn,
20729420f89dSMike Rapoport (IBM) 				    unsigned long first_init_pfn)
20739420f89dSMike Rapoport (IBM) {
20749420f89dSMike Rapoport (IBM) 	u64 j;
20759420f89dSMike Rapoport (IBM) 
20769420f89dSMike Rapoport (IBM) 	/*
20779420f89dSMike Rapoport (IBM) 	 * Start out by walking through the ranges in this zone that have
20789420f89dSMike Rapoport (IBM) 	 * already been initialized. We don't need to do anything with them
20799420f89dSMike Rapoport (IBM) 	 * so we just need to flush them out of the system.
20809420f89dSMike Rapoport (IBM) 	 */
20819420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
20829420f89dSMike Rapoport (IBM) 		if (*epfn <= first_init_pfn)
20839420f89dSMike Rapoport (IBM) 			continue;
20849420f89dSMike Rapoport (IBM) 		if (*spfn < first_init_pfn)
20859420f89dSMike Rapoport (IBM) 			*spfn = first_init_pfn;
20869420f89dSMike Rapoport (IBM) 		*i = j;
20879420f89dSMike Rapoport (IBM) 		return true;
20889420f89dSMike Rapoport (IBM) 	}
20899420f89dSMike Rapoport (IBM) 
20909420f89dSMike Rapoport (IBM) 	return false;
20919420f89dSMike Rapoport (IBM) }
20929420f89dSMike Rapoport (IBM) 
20939420f89dSMike Rapoport (IBM) /*
20949420f89dSMike Rapoport (IBM)  * Initialize and free pages. We do it in two loops: first we initialize
20959420f89dSMike Rapoport (IBM)  * struct page, then free to buddy allocator, because while we are
20969420f89dSMike Rapoport (IBM)  * freeing pages we can access pages that are ahead (computing buddy
20979420f89dSMike Rapoport (IBM)  * page in __free_one_page()).
20989420f89dSMike Rapoport (IBM)  *
20999420f89dSMike Rapoport (IBM)  * In order to try and keep some memory in the cache we have the loop
21009420f89dSMike Rapoport (IBM)  * broken along max page order boundaries. This way we will not cause
21019420f89dSMike Rapoport (IBM)  * any issues with the buddy page computation.
21029420f89dSMike Rapoport (IBM)  */
21039420f89dSMike Rapoport (IBM) static unsigned long __init
21049420f89dSMike Rapoport (IBM) deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
21059420f89dSMike Rapoport (IBM) 		       unsigned long *end_pfn)
21069420f89dSMike Rapoport (IBM) {
21079420f89dSMike Rapoport (IBM) 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
21089420f89dSMike Rapoport (IBM) 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
21099420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
21109420f89dSMike Rapoport (IBM) 	u64 j = *i;
21119420f89dSMike Rapoport (IBM) 
21129420f89dSMike Rapoport (IBM) 	/* First we loop through and initialize the page values */
21139420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
21149420f89dSMike Rapoport (IBM) 		unsigned long t;
21159420f89dSMike Rapoport (IBM) 
21169420f89dSMike Rapoport (IBM) 		if (mo_pfn <= *start_pfn)
21179420f89dSMike Rapoport (IBM) 			break;
21189420f89dSMike Rapoport (IBM) 
21199420f89dSMike Rapoport (IBM) 		t = min(mo_pfn, *end_pfn);
21209420f89dSMike Rapoport (IBM) 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
21219420f89dSMike Rapoport (IBM) 
21229420f89dSMike Rapoport (IBM) 		if (mo_pfn < *end_pfn) {
21239420f89dSMike Rapoport (IBM) 			*start_pfn = mo_pfn;
21249420f89dSMike Rapoport (IBM) 			break;
21259420f89dSMike Rapoport (IBM) 		}
21269420f89dSMike Rapoport (IBM) 	}
21279420f89dSMike Rapoport (IBM) 
21289420f89dSMike Rapoport (IBM) 	/* Reset values and now loop through freeing pages as needed */
21299420f89dSMike Rapoport (IBM) 	swap(j, *i);
21309420f89dSMike Rapoport (IBM) 
21319420f89dSMike Rapoport (IBM) 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
21329420f89dSMike Rapoport (IBM) 		unsigned long t;
21339420f89dSMike Rapoport (IBM) 
21349420f89dSMike Rapoport (IBM) 		if (mo_pfn <= spfn)
21359420f89dSMike Rapoport (IBM) 			break;
21369420f89dSMike Rapoport (IBM) 
21379420f89dSMike Rapoport (IBM) 		t = min(mo_pfn, epfn);
21389420f89dSMike Rapoport (IBM) 		deferred_free_pages(spfn, t);
21399420f89dSMike Rapoport (IBM) 
21409420f89dSMike Rapoport (IBM) 		if (mo_pfn <= epfn)
21419420f89dSMike Rapoport (IBM) 			break;
21429420f89dSMike Rapoport (IBM) 	}
21439420f89dSMike Rapoport (IBM) 
21449420f89dSMike Rapoport (IBM) 	return nr_pages;
21459420f89dSMike Rapoport (IBM) }
21469420f89dSMike Rapoport (IBM) 
21479420f89dSMike Rapoport (IBM) static void __init
21489420f89dSMike Rapoport (IBM) deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
21499420f89dSMike Rapoport (IBM) 			   void *arg)
21509420f89dSMike Rapoport (IBM) {
21519420f89dSMike Rapoport (IBM) 	unsigned long spfn, epfn;
21529420f89dSMike Rapoport (IBM) 	struct zone *zone = arg;
21539420f89dSMike Rapoport (IBM) 	u64 i;
21549420f89dSMike Rapoport (IBM) 
21559420f89dSMike Rapoport (IBM) 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
21569420f89dSMike Rapoport (IBM) 
21579420f89dSMike Rapoport (IBM) 	/*
21585e0a760bSKirill A. Shutemov 	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
21595e0a760bSKirill A. Shutemov 	 * we can avoid introducing any issues with the buddy allocator.
21609420f89dSMike Rapoport (IBM) 	 */
21619420f89dSMike Rapoport (IBM) 	while (spfn < end_pfn) {
21629420f89dSMike Rapoport (IBM) 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
21639420f89dSMike Rapoport (IBM) 		cond_resched();
21649420f89dSMike Rapoport (IBM) 	}
21659420f89dSMike Rapoport (IBM) }
21669420f89dSMike Rapoport (IBM) 
21679420f89dSMike Rapoport (IBM) /* An arch may override for more concurrency. */
21689420f89dSMike Rapoport (IBM) __weak int __init
21699420f89dSMike Rapoport (IBM) deferred_page_init_max_threads(const struct cpumask *node_cpumask)
21709420f89dSMike Rapoport (IBM) {
21719420f89dSMike Rapoport (IBM) 	return 1;
21729420f89dSMike Rapoport (IBM) }
21739420f89dSMike Rapoport (IBM) 
21749420f89dSMike Rapoport (IBM) /* Initialise remaining memory on a node */
21759420f89dSMike Rapoport (IBM) static int __init deferred_init_memmap(void *data)
21769420f89dSMike Rapoport (IBM) {
21779420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = data;
21789420f89dSMike Rapoport (IBM) 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
21799420f89dSMike Rapoport (IBM) 	unsigned long spfn = 0, epfn = 0;
21809420f89dSMike Rapoport (IBM) 	unsigned long first_init_pfn, flags;
21819420f89dSMike Rapoport (IBM) 	unsigned long start = jiffies;
21829420f89dSMike Rapoport (IBM) 	struct zone *zone;
21839420f89dSMike Rapoport (IBM) 	int zid, max_threads;
21849420f89dSMike Rapoport (IBM) 	u64 i;
21859420f89dSMike Rapoport (IBM) 
21869420f89dSMike Rapoport (IBM) 	/* Bind memory initialisation thread to a local node if possible */
21879420f89dSMike Rapoport (IBM) 	if (!cpumask_empty(cpumask))
21889420f89dSMike Rapoport (IBM) 		set_cpus_allowed_ptr(current, cpumask);
21899420f89dSMike Rapoport (IBM) 
21909420f89dSMike Rapoport (IBM) 	pgdat_resize_lock(pgdat, &flags);
21919420f89dSMike Rapoport (IBM) 	first_init_pfn = pgdat->first_deferred_pfn;
21929420f89dSMike Rapoport (IBM) 	if (first_init_pfn == ULONG_MAX) {
21939420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
21949420f89dSMike Rapoport (IBM) 		pgdat_init_report_one_done();
21959420f89dSMike Rapoport (IBM) 		return 0;
21969420f89dSMike Rapoport (IBM) 	}
21979420f89dSMike Rapoport (IBM) 
21989420f89dSMike Rapoport (IBM) 	/* Sanity check boundaries */
21999420f89dSMike Rapoport (IBM) 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
22009420f89dSMike Rapoport (IBM) 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
22019420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = ULONG_MAX;
22029420f89dSMike Rapoport (IBM) 
22039420f89dSMike Rapoport (IBM) 	/*
22049420f89dSMike Rapoport (IBM) 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
22059420f89dSMike Rapoport (IBM) 	 * interrupt thread must allocate this early in boot, zone must be
22069420f89dSMike Rapoport (IBM) 	 * pre-grown prior to start of deferred page initialization.
22079420f89dSMike Rapoport (IBM) 	 */
22089420f89dSMike Rapoport (IBM) 	pgdat_resize_unlock(pgdat, &flags);
22099420f89dSMike Rapoport (IBM) 
22109420f89dSMike Rapoport (IBM) 	/* Only the highest zone is deferred so find it */
22119420f89dSMike Rapoport (IBM) 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
22129420f89dSMike Rapoport (IBM) 		zone = pgdat->node_zones + zid;
22139420f89dSMike Rapoport (IBM) 		if (first_init_pfn < zone_end_pfn(zone))
22149420f89dSMike Rapoport (IBM) 			break;
22159420f89dSMike Rapoport (IBM) 	}
22169420f89dSMike Rapoport (IBM) 
22179420f89dSMike Rapoport (IBM) 	/* If the zone is empty somebody else may have cleared out the zone */
22189420f89dSMike Rapoport (IBM) 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
22199420f89dSMike Rapoport (IBM) 						 first_init_pfn))
22209420f89dSMike Rapoport (IBM) 		goto zone_empty;
22219420f89dSMike Rapoport (IBM) 
22229420f89dSMike Rapoport (IBM) 	max_threads = deferred_page_init_max_threads(cpumask);
22239420f89dSMike Rapoport (IBM) 
22249420f89dSMike Rapoport (IBM) 	while (spfn < epfn) {
22259420f89dSMike Rapoport (IBM) 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
22269420f89dSMike Rapoport (IBM) 		struct padata_mt_job job = {
22279420f89dSMike Rapoport (IBM) 			.thread_fn   = deferred_init_memmap_chunk,
22289420f89dSMike Rapoport (IBM) 			.fn_arg      = zone,
22299420f89dSMike Rapoport (IBM) 			.start       = spfn,
22309420f89dSMike Rapoport (IBM) 			.size        = epfn_align - spfn,
22319420f89dSMike Rapoport (IBM) 			.align       = PAGES_PER_SECTION,
22329420f89dSMike Rapoport (IBM) 			.min_chunk   = PAGES_PER_SECTION,
22339420f89dSMike Rapoport (IBM) 			.max_threads = max_threads,
2234*eb522866SGang Li Subject: padata: dispatch works on 			.numa_aware  = false,
22359420f89dSMike Rapoport (IBM) 		};
22369420f89dSMike Rapoport (IBM) 
22379420f89dSMike Rapoport (IBM) 		padata_do_multithreaded(&job);
22389420f89dSMike Rapoport (IBM) 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
22399420f89dSMike Rapoport (IBM) 						    epfn_align);
22409420f89dSMike Rapoport (IBM) 	}
22419420f89dSMike Rapoport (IBM) zone_empty:
22429420f89dSMike Rapoport (IBM) 	/* Sanity check that the next zone really is unpopulated */
22439420f89dSMike Rapoport (IBM) 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
22449420f89dSMike Rapoport (IBM) 
22459420f89dSMike Rapoport (IBM) 	pr_info("node %d deferred pages initialised in %ums\n",
22469420f89dSMike Rapoport (IBM) 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
22479420f89dSMike Rapoport (IBM) 
22489420f89dSMike Rapoport (IBM) 	pgdat_init_report_one_done();
22499420f89dSMike Rapoport (IBM) 	return 0;
22509420f89dSMike Rapoport (IBM) }
22519420f89dSMike Rapoport (IBM) 
22529420f89dSMike Rapoport (IBM) /*
22539420f89dSMike Rapoport (IBM)  * If this zone has deferred pages, try to grow it by initializing enough
22549420f89dSMike Rapoport (IBM)  * deferred pages to satisfy the allocation specified by order, rounded up to
22559420f89dSMike Rapoport (IBM)  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
22569420f89dSMike Rapoport (IBM)  * of SECTION_SIZE bytes by initializing struct pages in increments of
22579420f89dSMike Rapoport (IBM)  * PAGES_PER_SECTION * sizeof(struct page) bytes.
22589420f89dSMike Rapoport (IBM)  *
22599420f89dSMike Rapoport (IBM)  * Return true when zone was grown, otherwise return false. We return true even
22609420f89dSMike Rapoport (IBM)  * when we grow less than requested, to let the caller decide if there are
22619420f89dSMike Rapoport (IBM)  * enough pages to satisfy the allocation.
22629420f89dSMike Rapoport (IBM)  *
22639420f89dSMike Rapoport (IBM)  * Note: We use noinline because this function is needed only during boot, and
22649420f89dSMike Rapoport (IBM)  * it is called from a __ref function _deferred_grow_zone. This way we are
22659420f89dSMike Rapoport (IBM)  * making sure that it is not inlined into permanent text section.
22669420f89dSMike Rapoport (IBM)  */
22679420f89dSMike Rapoport (IBM) bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
22689420f89dSMike Rapoport (IBM) {
22699420f89dSMike Rapoport (IBM) 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
22709420f89dSMike Rapoport (IBM) 	pg_data_t *pgdat = zone->zone_pgdat;
22719420f89dSMike Rapoport (IBM) 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
22729420f89dSMike Rapoport (IBM) 	unsigned long spfn, epfn, flags;
22739420f89dSMike Rapoport (IBM) 	unsigned long nr_pages = 0;
22749420f89dSMike Rapoport (IBM) 	u64 i;
22759420f89dSMike Rapoport (IBM) 
22769420f89dSMike Rapoport (IBM) 	/* Only the last zone may have deferred pages */
22779420f89dSMike Rapoport (IBM) 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
22789420f89dSMike Rapoport (IBM) 		return false;
22799420f89dSMike Rapoport (IBM) 
22809420f89dSMike Rapoport (IBM) 	pgdat_resize_lock(pgdat, &flags);
22819420f89dSMike Rapoport (IBM) 
22829420f89dSMike Rapoport (IBM) 	/*
22839420f89dSMike Rapoport (IBM) 	 * If someone grew this zone while we were waiting for spinlock, return
22849420f89dSMike Rapoport (IBM) 	 * true, as there might be enough pages already.
22859420f89dSMike Rapoport (IBM) 	 */
22869420f89dSMike Rapoport (IBM) 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
22879420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
22889420f89dSMike Rapoport (IBM) 		return true;
22899420f89dSMike Rapoport (IBM) 	}
22909420f89dSMike Rapoport (IBM) 
22919420f89dSMike Rapoport (IBM) 	/* If the zone is empty somebody else may have cleared out the zone */
22929420f89dSMike Rapoport (IBM) 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
22939420f89dSMike Rapoport (IBM) 						 first_deferred_pfn)) {
22949420f89dSMike Rapoport (IBM) 		pgdat->first_deferred_pfn = ULONG_MAX;
22959420f89dSMike Rapoport (IBM) 		pgdat_resize_unlock(pgdat, &flags);
22969420f89dSMike Rapoport (IBM) 		/* Retry only once. */
22979420f89dSMike Rapoport (IBM) 		return first_deferred_pfn != ULONG_MAX;
22989420f89dSMike Rapoport (IBM) 	}
22999420f89dSMike Rapoport (IBM) 
23009420f89dSMike Rapoport (IBM) 	/*
23015e0a760bSKirill A. Shutemov 	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so
23029420f89dSMike Rapoport (IBM) 	 * that we can avoid introducing any issues with the buddy
23039420f89dSMike Rapoport (IBM) 	 * allocator.
23049420f89dSMike Rapoport (IBM) 	 */
23059420f89dSMike Rapoport (IBM) 	while (spfn < epfn) {
23069420f89dSMike Rapoport (IBM) 		/* update our first deferred PFN for this section */
23079420f89dSMike Rapoport (IBM) 		first_deferred_pfn = spfn;
23089420f89dSMike Rapoport (IBM) 
23099420f89dSMike Rapoport (IBM) 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
23109420f89dSMike Rapoport (IBM) 		touch_nmi_watchdog();
23119420f89dSMike Rapoport (IBM) 
23129420f89dSMike Rapoport (IBM) 		/* We should only stop along section boundaries */
23139420f89dSMike Rapoport (IBM) 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
23149420f89dSMike Rapoport (IBM) 			continue;
23159420f89dSMike Rapoport (IBM) 
23169420f89dSMike Rapoport (IBM) 		/* If our quota has been met we can stop here */
23179420f89dSMike Rapoport (IBM) 		if (nr_pages >= nr_pages_needed)
23189420f89dSMike Rapoport (IBM) 			break;
23199420f89dSMike Rapoport (IBM) 	}
23209420f89dSMike Rapoport (IBM) 
23219420f89dSMike Rapoport (IBM) 	pgdat->first_deferred_pfn = spfn;
23229420f89dSMike Rapoport (IBM) 	pgdat_resize_unlock(pgdat, &flags);
23239420f89dSMike Rapoport (IBM) 
23249420f89dSMike Rapoport (IBM) 	return nr_pages > 0;
23259420f89dSMike Rapoport (IBM) }
23269420f89dSMike Rapoport (IBM) 
23279420f89dSMike Rapoport (IBM) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
23289420f89dSMike Rapoport (IBM) 
23299420f89dSMike Rapoport (IBM) #ifdef CONFIG_CMA
23309420f89dSMike Rapoport (IBM) void __init init_cma_reserved_pageblock(struct page *page)
23319420f89dSMike Rapoport (IBM) {
23329420f89dSMike Rapoport (IBM) 	unsigned i = pageblock_nr_pages;
23339420f89dSMike Rapoport (IBM) 	struct page *p = page;
23349420f89dSMike Rapoport (IBM) 
23359420f89dSMike Rapoport (IBM) 	do {
23369420f89dSMike Rapoport (IBM) 		__ClearPageReserved(p);
23379420f89dSMike Rapoport (IBM) 		set_page_count(p, 0);
23389420f89dSMike Rapoport (IBM) 	} while (++p, --i);
23399420f89dSMike Rapoport (IBM) 
23409420f89dSMike Rapoport (IBM) 	set_pageblock_migratetype(page, MIGRATE_CMA);
23419420f89dSMike Rapoport (IBM) 	set_page_refcounted(page);
23429420f89dSMike Rapoport (IBM) 	__free_pages(page, pageblock_order);
23439420f89dSMike Rapoport (IBM) 
23449420f89dSMike Rapoport (IBM) 	adjust_managed_page_count(page, pageblock_nr_pages);
23459420f89dSMike Rapoport (IBM) 	page_zone(page)->cma_pages += pageblock_nr_pages;
23469420f89dSMike Rapoport (IBM) }
23479420f89dSMike Rapoport (IBM) #endif
23489420f89dSMike Rapoport (IBM) 
2349904d5857SKefeng Wang void set_zone_contiguous(struct zone *zone)
2350904d5857SKefeng Wang {
2351904d5857SKefeng Wang 	unsigned long block_start_pfn = zone->zone_start_pfn;
2352904d5857SKefeng Wang 	unsigned long block_end_pfn;
2353904d5857SKefeng Wang 
2354904d5857SKefeng Wang 	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2355904d5857SKefeng Wang 	for (; block_start_pfn < zone_end_pfn(zone);
2356904d5857SKefeng Wang 			block_start_pfn = block_end_pfn,
2357904d5857SKefeng Wang 			 block_end_pfn += pageblock_nr_pages) {
2358904d5857SKefeng Wang 
2359904d5857SKefeng Wang 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2360904d5857SKefeng Wang 
2361904d5857SKefeng Wang 		if (!__pageblock_pfn_to_page(block_start_pfn,
2362904d5857SKefeng Wang 					     block_end_pfn, zone))
2363904d5857SKefeng Wang 			return;
2364904d5857SKefeng Wang 		cond_resched();
2365904d5857SKefeng Wang 	}
2366904d5857SKefeng Wang 
2367904d5857SKefeng Wang 	/* We confirm that there is no hole */
2368904d5857SKefeng Wang 	zone->contiguous = true;
2369904d5857SKefeng Wang }
2370904d5857SKefeng Wang 
23719420f89dSMike Rapoport (IBM) void __init page_alloc_init_late(void)
23729420f89dSMike Rapoport (IBM) {
23739420f89dSMike Rapoport (IBM) 	struct zone *zone;
23749420f89dSMike Rapoport (IBM) 	int nid;
23759420f89dSMike Rapoport (IBM) 
23769420f89dSMike Rapoport (IBM) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
23779420f89dSMike Rapoport (IBM) 
23789420f89dSMike Rapoport (IBM) 	/* There will be num_node_state(N_MEMORY) threads */
23799420f89dSMike Rapoport (IBM) 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
23809420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY) {
23819420f89dSMike Rapoport (IBM) 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
23829420f89dSMike Rapoport (IBM) 	}
23839420f89dSMike Rapoport (IBM) 
23849420f89dSMike Rapoport (IBM) 	/* Block until all are initialised */
23859420f89dSMike Rapoport (IBM) 	wait_for_completion(&pgdat_init_all_done_comp);
23869420f89dSMike Rapoport (IBM) 
23879420f89dSMike Rapoport (IBM) 	/*
23889420f89dSMike Rapoport (IBM) 	 * We initialized the rest of the deferred pages.  Permanently disable
23899420f89dSMike Rapoport (IBM) 	 * on-demand struct page initialization.
23909420f89dSMike Rapoport (IBM) 	 */
23919420f89dSMike Rapoport (IBM) 	static_branch_disable(&deferred_pages);
23929420f89dSMike Rapoport (IBM) 
23939420f89dSMike Rapoport (IBM) 	/* Reinit limits that are based on free pages after the kernel is up */
23949420f89dSMike Rapoport (IBM) 	files_maxfiles_init();
23959420f89dSMike Rapoport (IBM) #endif
23969420f89dSMike Rapoport (IBM) 
23979420f89dSMike Rapoport (IBM) 	buffer_init();
23989420f89dSMike Rapoport (IBM) 
23999420f89dSMike Rapoport (IBM) 	/* Discard memblock private memory */
24009420f89dSMike Rapoport (IBM) 	memblock_discard();
24019420f89dSMike Rapoport (IBM) 
24029420f89dSMike Rapoport (IBM) 	for_each_node_state(nid, N_MEMORY)
24039420f89dSMike Rapoport (IBM) 		shuffle_free_memory(NODE_DATA(nid));
24049420f89dSMike Rapoport (IBM) 
24059420f89dSMike Rapoport (IBM) 	for_each_populated_zone(zone)
24069420f89dSMike Rapoport (IBM) 		set_zone_contiguous(zone);
2407de57807eSMike Rapoport (IBM) 
2408de57807eSMike Rapoport (IBM) 	/* Initialize page ext after all struct pages are initialized. */
2409de57807eSMike Rapoport (IBM) 	if (deferred_struct_pages)
2410de57807eSMike Rapoport (IBM) 		page_ext_init();
2411e95d372cSKefeng Wang 
2412e95d372cSKefeng Wang 	page_alloc_sysctl_init();
24139420f89dSMike Rapoport (IBM) }
24149420f89dSMike Rapoport (IBM) 
24159420f89dSMike Rapoport (IBM) #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
24169420f89dSMike Rapoport (IBM) /*
24179420f89dSMike Rapoport (IBM)  * Returns the number of pages that arch has reserved but
24189420f89dSMike Rapoport (IBM)  * is not known to alloc_large_system_hash().
24199420f89dSMike Rapoport (IBM)  */
24209420f89dSMike Rapoport (IBM) static unsigned long __init arch_reserved_kernel_pages(void)
24219420f89dSMike Rapoport (IBM) {
24229420f89dSMike Rapoport (IBM) 	return 0;
24239420f89dSMike Rapoport (IBM) }
24249420f89dSMike Rapoport (IBM) #endif
24259420f89dSMike Rapoport (IBM) 
24269420f89dSMike Rapoport (IBM) /*
24279420f89dSMike Rapoport (IBM)  * Adaptive scale is meant to reduce sizes of hash tables on large memory
24289420f89dSMike Rapoport (IBM)  * machines. As memory size is increased the scale is also increased but at
24299420f89dSMike Rapoport (IBM)  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
24309420f89dSMike Rapoport (IBM)  * quadruples the scale is increased by one, which means the size of hash table
24319420f89dSMike Rapoport (IBM)  * only doubles, instead of quadrupling as well.
24329420f89dSMike Rapoport (IBM)  * Because 32-bit systems cannot have large physical memory, where this scaling
24339420f89dSMike Rapoport (IBM)  * makes sense, it is disabled on such platforms.
24349420f89dSMike Rapoport (IBM)  */
24359420f89dSMike Rapoport (IBM) #if __BITS_PER_LONG > 32
24369420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_BASE	(64ul << 30)
24379420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_SHIFT	2
24389420f89dSMike Rapoport (IBM) #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
24399420f89dSMike Rapoport (IBM) #endif
24409420f89dSMike Rapoport (IBM) 
24419420f89dSMike Rapoport (IBM) /*
24429420f89dSMike Rapoport (IBM)  * allocate a large system hash table from bootmem
24439420f89dSMike Rapoport (IBM)  * - it is assumed that the hash table must contain an exact power-of-2
24449420f89dSMike Rapoport (IBM)  *   quantity of entries
24459420f89dSMike Rapoport (IBM)  * - limit is the number of hash buckets, not the total allocation size
24469420f89dSMike Rapoport (IBM)  */
24479420f89dSMike Rapoport (IBM) void *__init alloc_large_system_hash(const char *tablename,
24489420f89dSMike Rapoport (IBM) 				     unsigned long bucketsize,
24499420f89dSMike Rapoport (IBM) 				     unsigned long numentries,
24509420f89dSMike Rapoport (IBM) 				     int scale,
24519420f89dSMike Rapoport (IBM) 				     int flags,
24529420f89dSMike Rapoport (IBM) 				     unsigned int *_hash_shift,
24539420f89dSMike Rapoport (IBM) 				     unsigned int *_hash_mask,
24549420f89dSMike Rapoport (IBM) 				     unsigned long low_limit,
24559420f89dSMike Rapoport (IBM) 				     unsigned long high_limit)
24569420f89dSMike Rapoport (IBM) {
24579420f89dSMike Rapoport (IBM) 	unsigned long long max = high_limit;
24589420f89dSMike Rapoport (IBM) 	unsigned long log2qty, size;
24599420f89dSMike Rapoport (IBM) 	void *table;
24609420f89dSMike Rapoport (IBM) 	gfp_t gfp_flags;
24619420f89dSMike Rapoport (IBM) 	bool virt;
24629420f89dSMike Rapoport (IBM) 	bool huge;
24639420f89dSMike Rapoport (IBM) 
24649420f89dSMike Rapoport (IBM) 	/* allow the kernel cmdline to have a say */
24659420f89dSMike Rapoport (IBM) 	if (!numentries) {
24669420f89dSMike Rapoport (IBM) 		/* round applicable memory size up to nearest megabyte */
24679420f89dSMike Rapoport (IBM) 		numentries = nr_kernel_pages;
24689420f89dSMike Rapoport (IBM) 		numentries -= arch_reserved_kernel_pages();
24699420f89dSMike Rapoport (IBM) 
24709420f89dSMike Rapoport (IBM) 		/* It isn't necessary when PAGE_SIZE >= 1MB */
24719420f89dSMike Rapoport (IBM) 		if (PAGE_SIZE < SZ_1M)
24729420f89dSMike Rapoport (IBM) 			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
24739420f89dSMike Rapoport (IBM) 
24749420f89dSMike Rapoport (IBM) #if __BITS_PER_LONG > 32
24759420f89dSMike Rapoport (IBM) 		if (!high_limit) {
24769420f89dSMike Rapoport (IBM) 			unsigned long adapt;
24779420f89dSMike Rapoport (IBM) 
24789420f89dSMike Rapoport (IBM) 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
24799420f89dSMike Rapoport (IBM) 			     adapt <<= ADAPT_SCALE_SHIFT)
24809420f89dSMike Rapoport (IBM) 				scale++;
24819420f89dSMike Rapoport (IBM) 		}
24829420f89dSMike Rapoport (IBM) #endif
24839420f89dSMike Rapoport (IBM) 
24849420f89dSMike Rapoport (IBM) 		/* limit to 1 bucket per 2^scale bytes of low memory */
24859420f89dSMike Rapoport (IBM) 		if (scale > PAGE_SHIFT)
24869420f89dSMike Rapoport (IBM) 			numentries >>= (scale - PAGE_SHIFT);
24879420f89dSMike Rapoport (IBM) 		else
24889420f89dSMike Rapoport (IBM) 			numentries <<= (PAGE_SHIFT - scale);
24899420f89dSMike Rapoport (IBM) 
24903fade62bSMiaohe Lin 		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
24919420f89dSMike Rapoport (IBM) 			numentries = PAGE_SIZE / bucketsize;
24929420f89dSMike Rapoport (IBM) 	}
24939420f89dSMike Rapoport (IBM) 	numentries = roundup_pow_of_two(numentries);
24949420f89dSMike Rapoport (IBM) 
24959420f89dSMike Rapoport (IBM) 	/* limit allocation size to 1/16 total memory by default */
24969420f89dSMike Rapoport (IBM) 	if (max == 0) {
24979420f89dSMike Rapoport (IBM) 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
24989420f89dSMike Rapoport (IBM) 		do_div(max, bucketsize);
24999420f89dSMike Rapoport (IBM) 	}
25009420f89dSMike Rapoport (IBM) 	max = min(max, 0x80000000ULL);
25019420f89dSMike Rapoport (IBM) 
25029420f89dSMike Rapoport (IBM) 	if (numentries < low_limit)
25039420f89dSMike Rapoport (IBM) 		numentries = low_limit;
25049420f89dSMike Rapoport (IBM) 	if (numentries > max)
25059420f89dSMike Rapoport (IBM) 		numentries = max;
25069420f89dSMike Rapoport (IBM) 
25079420f89dSMike Rapoport (IBM) 	log2qty = ilog2(numentries);
25089420f89dSMike Rapoport (IBM) 
25099420f89dSMike Rapoport (IBM) 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
25109420f89dSMike Rapoport (IBM) 	do {
25119420f89dSMike Rapoport (IBM) 		virt = false;
25129420f89dSMike Rapoport (IBM) 		size = bucketsize << log2qty;
25139420f89dSMike Rapoport (IBM) 		if (flags & HASH_EARLY) {
25149420f89dSMike Rapoport (IBM) 			if (flags & HASH_ZERO)
25159420f89dSMike Rapoport (IBM) 				table = memblock_alloc(size, SMP_CACHE_BYTES);
25169420f89dSMike Rapoport (IBM) 			else
25179420f89dSMike Rapoport (IBM) 				table = memblock_alloc_raw(size,
25189420f89dSMike Rapoport (IBM) 							   SMP_CACHE_BYTES);
25195e0a760bSKirill A. Shutemov 		} else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
25209420f89dSMike Rapoport (IBM) 			table = vmalloc_huge(size, gfp_flags);
25219420f89dSMike Rapoport (IBM) 			virt = true;
25229420f89dSMike Rapoport (IBM) 			if (table)
25239420f89dSMike Rapoport (IBM) 				huge = is_vm_area_hugepages(table);
25249420f89dSMike Rapoport (IBM) 		} else {
25259420f89dSMike Rapoport (IBM) 			/*
25269420f89dSMike Rapoport (IBM) 			 * If bucketsize is not a power-of-two, we may free
25279420f89dSMike Rapoport (IBM) 			 * some pages at the end of hash table which
25289420f89dSMike Rapoport (IBM) 			 * alloc_pages_exact() automatically does
25299420f89dSMike Rapoport (IBM) 			 */
25309420f89dSMike Rapoport (IBM) 			table = alloc_pages_exact(size, gfp_flags);
25319420f89dSMike Rapoport (IBM) 			kmemleak_alloc(table, size, 1, gfp_flags);
25329420f89dSMike Rapoport (IBM) 		}
25339420f89dSMike Rapoport (IBM) 	} while (!table && size > PAGE_SIZE && --log2qty);
25349420f89dSMike Rapoport (IBM) 
25359420f89dSMike Rapoport (IBM) 	if (!table)
25369420f89dSMike Rapoport (IBM) 		panic("Failed to allocate %s hash table\n", tablename);
25379420f89dSMike Rapoport (IBM) 
25389420f89dSMike Rapoport (IBM) 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
25399420f89dSMike Rapoport (IBM) 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
25409420f89dSMike Rapoport (IBM) 		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
25419420f89dSMike Rapoport (IBM) 
25429420f89dSMike Rapoport (IBM) 	if (_hash_shift)
25439420f89dSMike Rapoport (IBM) 		*_hash_shift = log2qty;
25449420f89dSMike Rapoport (IBM) 	if (_hash_mask)
25459420f89dSMike Rapoport (IBM) 		*_hash_mask = (1 << log2qty) - 1;
25469420f89dSMike Rapoport (IBM) 
25479420f89dSMike Rapoport (IBM) 	return table;
25489420f89dSMike Rapoport (IBM) }
25499420f89dSMike Rapoport (IBM) 
25509420f89dSMike Rapoport (IBM) /**
25519420f89dSMike Rapoport (IBM)  * set_dma_reserve - set the specified number of pages reserved in the first zone
25529420f89dSMike Rapoport (IBM)  * @new_dma_reserve: The number of pages to mark reserved
25539420f89dSMike Rapoport (IBM)  *
25549420f89dSMike Rapoport (IBM)  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
25559420f89dSMike Rapoport (IBM)  * In the DMA zone, a significant percentage may be consumed by kernel image
25569420f89dSMike Rapoport (IBM)  * and other unfreeable allocations which can skew the watermarks badly. This
25579420f89dSMike Rapoport (IBM)  * function may optionally be used to account for unfreeable pages in the
25589420f89dSMike Rapoport (IBM)  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
25599420f89dSMike Rapoport (IBM)  * smaller per-cpu batchsize.
25609420f89dSMike Rapoport (IBM)  */
25619420f89dSMike Rapoport (IBM) void __init set_dma_reserve(unsigned long new_dma_reserve)
25629420f89dSMike Rapoport (IBM) {
25639420f89dSMike Rapoport (IBM) 	dma_reserve = new_dma_reserve;
25649420f89dSMike Rapoport (IBM) }
25659420f89dSMike Rapoport (IBM) 
25669420f89dSMike Rapoport (IBM) void __init memblock_free_pages(struct page *page, unsigned long pfn,
25679420f89dSMike Rapoport (IBM) 							unsigned int order)
25689420f89dSMike Rapoport (IBM) {
256961167ad5SYajun Deng 
257061167ad5SYajun Deng 	if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
257161167ad5SYajun Deng 		int nid = early_pfn_to_nid(pfn);
257261167ad5SYajun Deng 
257361167ad5SYajun Deng 		if (!early_page_initialised(pfn, nid))
25749420f89dSMike Rapoport (IBM) 			return;
257561167ad5SYajun Deng 	}
257661167ad5SYajun Deng 
25779420f89dSMike Rapoport (IBM) 	if (!kmsan_memblock_free_pages(page, order)) {
25789420f89dSMike Rapoport (IBM) 		/* KMSAN will take care of these pages. */
25799420f89dSMike Rapoport (IBM) 		return;
25809420f89dSMike Rapoport (IBM) 	}
25819420f89dSMike Rapoport (IBM) 	__free_pages_core(page, order);
25829420f89dSMike Rapoport (IBM) }
2583b7ec1bf3SMike Rapoport (IBM) 
25845e7d5da2SKefeng Wang DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
25855e7d5da2SKefeng Wang EXPORT_SYMBOL(init_on_alloc);
25865e7d5da2SKefeng Wang 
25875e7d5da2SKefeng Wang DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
25885e7d5da2SKefeng Wang EXPORT_SYMBOL(init_on_free);
25895e7d5da2SKefeng Wang 
2590f2fc4b44SMike Rapoport (IBM) static bool _init_on_alloc_enabled_early __read_mostly
2591f2fc4b44SMike Rapoport (IBM) 				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2592f2fc4b44SMike Rapoport (IBM) static int __init early_init_on_alloc(char *buf)
2593f2fc4b44SMike Rapoport (IBM) {
2594f2fc4b44SMike Rapoport (IBM) 
2595f2fc4b44SMike Rapoport (IBM) 	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2596f2fc4b44SMike Rapoport (IBM) }
2597f2fc4b44SMike Rapoport (IBM) early_param("init_on_alloc", early_init_on_alloc);
2598f2fc4b44SMike Rapoport (IBM) 
2599f2fc4b44SMike Rapoport (IBM) static bool _init_on_free_enabled_early __read_mostly
2600f2fc4b44SMike Rapoport (IBM) 				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2601f2fc4b44SMike Rapoport (IBM) static int __init early_init_on_free(char *buf)
2602f2fc4b44SMike Rapoport (IBM) {
2603f2fc4b44SMike Rapoport (IBM) 	return kstrtobool(buf, &_init_on_free_enabled_early);
2604f2fc4b44SMike Rapoport (IBM) }
2605f2fc4b44SMike Rapoport (IBM) early_param("init_on_free", early_init_on_free);
2606f2fc4b44SMike Rapoport (IBM) 
2607f2fc4b44SMike Rapoport (IBM) DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2608f2fc4b44SMike Rapoport (IBM) 
2609f2fc4b44SMike Rapoport (IBM) /*
2610f2fc4b44SMike Rapoport (IBM)  * Enable static keys related to various memory debugging and hardening options.
2611f2fc4b44SMike Rapoport (IBM)  * Some override others, and depend on early params that are evaluated in the
2612f2fc4b44SMike Rapoport (IBM)  * order of appearance. So we need to first gather the full picture of what was
2613f2fc4b44SMike Rapoport (IBM)  * enabled, and then make decisions.
2614f2fc4b44SMike Rapoport (IBM)  */
2615f2fc4b44SMike Rapoport (IBM) static void __init mem_debugging_and_hardening_init(void)
2616f2fc4b44SMike Rapoport (IBM) {
2617f2fc4b44SMike Rapoport (IBM) 	bool page_poisoning_requested = false;
2618f2fc4b44SMike Rapoport (IBM) 	bool want_check_pages = false;
2619f2fc4b44SMike Rapoport (IBM) 
2620f2fc4b44SMike Rapoport (IBM) #ifdef CONFIG_PAGE_POISONING
2621f2fc4b44SMike Rapoport (IBM) 	/*
2622f2fc4b44SMike Rapoport (IBM) 	 * Page poisoning is debug page alloc for some arches. If
2623f2fc4b44SMike Rapoport (IBM) 	 * either of those options are enabled, enable poisoning.
2624f2fc4b44SMike Rapoport (IBM) 	 */
2625f2fc4b44SMike Rapoport (IBM) 	if (page_poisoning_enabled() ||
2626f2fc4b44SMike Rapoport (IBM) 	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2627f2fc4b44SMike Rapoport (IBM) 	      debug_pagealloc_enabled())) {
2628f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&_page_poisoning_enabled);
2629f2fc4b44SMike Rapoport (IBM) 		page_poisoning_requested = true;
2630f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2631f2fc4b44SMike Rapoport (IBM) 	}
2632f2fc4b44SMike Rapoport (IBM) #endif
2633f2fc4b44SMike Rapoport (IBM) 
2634f2fc4b44SMike Rapoport (IBM) 	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2635f2fc4b44SMike Rapoport (IBM) 	    page_poisoning_requested) {
2636f2fc4b44SMike Rapoport (IBM) 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2637f2fc4b44SMike Rapoport (IBM) 			"will take precedence over init_on_alloc and init_on_free\n");
2638f2fc4b44SMike Rapoport (IBM) 		_init_on_alloc_enabled_early = false;
2639f2fc4b44SMike Rapoport (IBM) 		_init_on_free_enabled_early = false;
2640f2fc4b44SMike Rapoport (IBM) 	}
2641f2fc4b44SMike Rapoport (IBM) 
2642f2fc4b44SMike Rapoport (IBM) 	if (_init_on_alloc_enabled_early) {
2643f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2644f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&init_on_alloc);
2645f2fc4b44SMike Rapoport (IBM) 	} else {
2646f2fc4b44SMike Rapoport (IBM) 		static_branch_disable(&init_on_alloc);
2647f2fc4b44SMike Rapoport (IBM) 	}
2648f2fc4b44SMike Rapoport (IBM) 
2649f2fc4b44SMike Rapoport (IBM) 	if (_init_on_free_enabled_early) {
2650f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2651f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&init_on_free);
2652f2fc4b44SMike Rapoport (IBM) 	} else {
2653f2fc4b44SMike Rapoport (IBM) 		static_branch_disable(&init_on_free);
2654f2fc4b44SMike Rapoport (IBM) 	}
2655f2fc4b44SMike Rapoport (IBM) 
2656f2fc4b44SMike Rapoport (IBM) 	if (IS_ENABLED(CONFIG_KMSAN) &&
2657f2fc4b44SMike Rapoport (IBM) 	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2658f2fc4b44SMike Rapoport (IBM) 		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2659f2fc4b44SMike Rapoport (IBM) 
2660f2fc4b44SMike Rapoport (IBM) #ifdef CONFIG_DEBUG_PAGEALLOC
2661f2fc4b44SMike Rapoport (IBM) 	if (debug_pagealloc_enabled()) {
2662f2fc4b44SMike Rapoport (IBM) 		want_check_pages = true;
2663f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&_debug_pagealloc_enabled);
2664f2fc4b44SMike Rapoport (IBM) 
2665f2fc4b44SMike Rapoport (IBM) 		if (debug_guardpage_minorder())
2666f2fc4b44SMike Rapoport (IBM) 			static_branch_enable(&_debug_guardpage_enabled);
2667f2fc4b44SMike Rapoport (IBM) 	}
2668f2fc4b44SMike Rapoport (IBM) #endif
2669f2fc4b44SMike Rapoport (IBM) 
2670f2fc4b44SMike Rapoport (IBM) 	/*
2671f2fc4b44SMike Rapoport (IBM) 	 * Any page debugging or hardening option also enables sanity checking
2672f2fc4b44SMike Rapoport (IBM) 	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2673f2fc4b44SMike Rapoport (IBM) 	 * enabled already.
2674f2fc4b44SMike Rapoport (IBM) 	 */
2675f2fc4b44SMike Rapoport (IBM) 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2676f2fc4b44SMike Rapoport (IBM) 		static_branch_enable(&check_pages_enabled);
2677f2fc4b44SMike Rapoport (IBM) }
2678f2fc4b44SMike Rapoport (IBM) 
2679b7ec1bf3SMike Rapoport (IBM) /* Report memory auto-initialization states for this boot. */
2680b7ec1bf3SMike Rapoport (IBM) static void __init report_meminit(void)
2681b7ec1bf3SMike Rapoport (IBM) {
2682b7ec1bf3SMike Rapoport (IBM) 	const char *stack;
2683b7ec1bf3SMike Rapoport (IBM) 
2684b7ec1bf3SMike Rapoport (IBM) 	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2685b7ec1bf3SMike Rapoport (IBM) 		stack = "all(pattern)";
2686b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2687b7ec1bf3SMike Rapoport (IBM) 		stack = "all(zero)";
2688b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2689b7ec1bf3SMike Rapoport (IBM) 		stack = "byref_all(zero)";
2690b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2691b7ec1bf3SMike Rapoport (IBM) 		stack = "byref(zero)";
2692b7ec1bf3SMike Rapoport (IBM) 	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2693b7ec1bf3SMike Rapoport (IBM) 		stack = "__user(zero)";
2694b7ec1bf3SMike Rapoport (IBM) 	else
2695b7ec1bf3SMike Rapoport (IBM) 		stack = "off";
2696b7ec1bf3SMike Rapoport (IBM) 
2697b7ec1bf3SMike Rapoport (IBM) 	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2698b7ec1bf3SMike Rapoport (IBM) 		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2699b7ec1bf3SMike Rapoport (IBM) 		want_init_on_free() ? "on" : "off");
2700b7ec1bf3SMike Rapoport (IBM) 	if (want_init_on_free())
2701b7ec1bf3SMike Rapoport (IBM) 		pr_info("mem auto-init: clearing system memory may take some time...\n");
2702b7ec1bf3SMike Rapoport (IBM) }
2703b7ec1bf3SMike Rapoport (IBM) 
2704eb8589b4SMike Rapoport (IBM) static void __init mem_init_print_info(void)
2705eb8589b4SMike Rapoport (IBM) {
2706eb8589b4SMike Rapoport (IBM) 	unsigned long physpages, codesize, datasize, rosize, bss_size;
2707eb8589b4SMike Rapoport (IBM) 	unsigned long init_code_size, init_data_size;
2708eb8589b4SMike Rapoport (IBM) 
2709eb8589b4SMike Rapoport (IBM) 	physpages = get_num_physpages();
2710eb8589b4SMike Rapoport (IBM) 	codesize = _etext - _stext;
2711eb8589b4SMike Rapoport (IBM) 	datasize = _edata - _sdata;
2712eb8589b4SMike Rapoport (IBM) 	rosize = __end_rodata - __start_rodata;
2713eb8589b4SMike Rapoport (IBM) 	bss_size = __bss_stop - __bss_start;
2714eb8589b4SMike Rapoport (IBM) 	init_data_size = __init_end - __init_begin;
2715eb8589b4SMike Rapoport (IBM) 	init_code_size = _einittext - _sinittext;
2716eb8589b4SMike Rapoport (IBM) 
2717eb8589b4SMike Rapoport (IBM) 	/*
2718eb8589b4SMike Rapoport (IBM) 	 * Detect special cases and adjust section sizes accordingly:
2719eb8589b4SMike Rapoport (IBM) 	 * 1) .init.* may be embedded into .data sections
2720eb8589b4SMike Rapoport (IBM) 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2721eb8589b4SMike Rapoport (IBM) 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2722eb8589b4SMike Rapoport (IBM) 	 * 3) .rodata.* may be embedded into .text or .data sections.
2723eb8589b4SMike Rapoport (IBM) 	 */
2724eb8589b4SMike Rapoport (IBM) #define adj_init_size(start, end, size, pos, adj) \
2725eb8589b4SMike Rapoport (IBM) 	do { \
2726eb8589b4SMike Rapoport (IBM) 		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2727eb8589b4SMike Rapoport (IBM) 			size -= adj; \
2728eb8589b4SMike Rapoport (IBM) 	} while (0)
2729eb8589b4SMike Rapoport (IBM) 
2730eb8589b4SMike Rapoport (IBM) 	adj_init_size(__init_begin, __init_end, init_data_size,
2731eb8589b4SMike Rapoport (IBM) 		     _sinittext, init_code_size);
2732eb8589b4SMike Rapoport (IBM) 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2733eb8589b4SMike Rapoport (IBM) 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2734eb8589b4SMike Rapoport (IBM) 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2735eb8589b4SMike Rapoport (IBM) 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2736eb8589b4SMike Rapoport (IBM) 
2737eb8589b4SMike Rapoport (IBM) #undef	adj_init_size
2738eb8589b4SMike Rapoport (IBM) 
2739eb8589b4SMike Rapoport (IBM) 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2740eb8589b4SMike Rapoport (IBM) #ifdef	CONFIG_HIGHMEM
2741eb8589b4SMike Rapoport (IBM) 		", %luK highmem"
2742eb8589b4SMike Rapoport (IBM) #endif
2743eb8589b4SMike Rapoport (IBM) 		")\n",
2744eb8589b4SMike Rapoport (IBM) 		K(nr_free_pages()), K(physpages),
2745eb8589b4SMike Rapoport (IBM) 		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2746eb8589b4SMike Rapoport (IBM) 		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2747eb8589b4SMike Rapoport (IBM) 		K(physpages - totalram_pages() - totalcma_pages),
2748eb8589b4SMike Rapoport (IBM) 		K(totalcma_pages)
2749eb8589b4SMike Rapoport (IBM) #ifdef	CONFIG_HIGHMEM
2750eb8589b4SMike Rapoport (IBM) 		, K(totalhigh_pages())
2751eb8589b4SMike Rapoport (IBM) #endif
2752eb8589b4SMike Rapoport (IBM) 		);
2753eb8589b4SMike Rapoport (IBM) }
2754eb8589b4SMike Rapoport (IBM) 
2755b7ec1bf3SMike Rapoport (IBM) /*
2756b7ec1bf3SMike Rapoport (IBM)  * Set up kernel memory allocators
2757b7ec1bf3SMike Rapoport (IBM)  */
2758b7ec1bf3SMike Rapoport (IBM) void __init mm_core_init(void)
2759b7ec1bf3SMike Rapoport (IBM) {
2760b7ec1bf3SMike Rapoport (IBM) 	/* Initializations relying on SMP setup */
2761b7ec1bf3SMike Rapoport (IBM) 	build_all_zonelists(NULL);
2762b7ec1bf3SMike Rapoport (IBM) 	page_alloc_init_cpuhp();
2763b7ec1bf3SMike Rapoport (IBM) 
2764b7ec1bf3SMike Rapoport (IBM) 	/*
2765b7ec1bf3SMike Rapoport (IBM) 	 * page_ext requires contiguous pages,
27665e0a760bSKirill A. Shutemov 	 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2767b7ec1bf3SMike Rapoport (IBM) 	 */
2768b7ec1bf3SMike Rapoport (IBM) 	page_ext_init_flatmem();
2769f2fc4b44SMike Rapoport (IBM) 	mem_debugging_and_hardening_init();
2770cabdf74eSPeng Zhang 	kfence_alloc_pool_and_metadata();
2771b7ec1bf3SMike Rapoport (IBM) 	report_meminit();
2772b7ec1bf3SMike Rapoport (IBM) 	kmsan_init_shadow();
2773b7ec1bf3SMike Rapoport (IBM) 	stack_depot_early_init();
2774b7ec1bf3SMike Rapoport (IBM) 	mem_init();
2775b7ec1bf3SMike Rapoport (IBM) 	mem_init_print_info();
2776b7ec1bf3SMike Rapoport (IBM) 	kmem_cache_init();
2777b7ec1bf3SMike Rapoport (IBM) 	/*
2778b7ec1bf3SMike Rapoport (IBM) 	 * page_owner must be initialized after buddy is ready, and also after
2779b7ec1bf3SMike Rapoport (IBM) 	 * slab is ready so that stack_depot_init() works properly
2780b7ec1bf3SMike Rapoport (IBM) 	 */
2781b7ec1bf3SMike Rapoport (IBM) 	page_ext_init_flatmem_late();
2782b7ec1bf3SMike Rapoport (IBM) 	kmemleak_init();
27834cd1e9edSMike Rapoport (IBM) 	ptlock_cache_init();
27844cd1e9edSMike Rapoport (IBM) 	pgtable_cache_init();
2785b7ec1bf3SMike Rapoport (IBM) 	debug_objects_mem_init();
2786b7ec1bf3SMike Rapoport (IBM) 	vmalloc_init();
2787b7ec1bf3SMike Rapoport (IBM) 	/* If no deferred init page_ext now, as vmap is fully initialized */
2788b7ec1bf3SMike Rapoport (IBM) 	if (!deferred_struct_pages)
2789b7ec1bf3SMike Rapoport (IBM) 		page_ext_init();
2790b7ec1bf3SMike Rapoport (IBM) 	/* Should be run before the first non-init thread is created */
2791b7ec1bf3SMike Rapoport (IBM) 	init_espfix_bsp();
2792b7ec1bf3SMike Rapoport (IBM) 	/* Should be run after espfix64 is set up. */
2793b7ec1bf3SMike Rapoport (IBM) 	pti_init();
2794b7ec1bf3SMike Rapoport (IBM) 	kmsan_init_runtime();
2795b7ec1bf3SMike Rapoport (IBM) 	mm_cache_init();
2796b7ec1bf3SMike Rapoport (IBM) }
2797