xref: /linux/arch/x86/kernel/setup_percpu.c (revision f58dc01ba2ca9fe3ab2ba4ca43d9c8a735cf62d8)
1378b39a4SYinghai Lu #include <linux/kernel.h>
2378b39a4SYinghai Lu #include <linux/module.h>
3378b39a4SYinghai Lu #include <linux/init.h>
4378b39a4SYinghai Lu #include <linux/bootmem.h>
5378b39a4SYinghai Lu #include <linux/percpu.h>
6378b39a4SYinghai Lu #include <linux/kexec.h>
7378b39a4SYinghai Lu #include <linux/crash_dump.h>
88a87dd9aSJaswinder Singh Rajput #include <linux/smp.h>
98a87dd9aSJaswinder Singh Rajput #include <linux/topology.h>
105f5d8405STejun Heo #include <linux/pfn.h>
11378b39a4SYinghai Lu #include <asm/sections.h>
12378b39a4SYinghai Lu #include <asm/processor.h>
13378b39a4SYinghai Lu #include <asm/setup.h>
14378b39a4SYinghai Lu #include <asm/mpspec.h>
15378b39a4SYinghai Lu #include <asm/apicdef.h>
16378b39a4SYinghai Lu #include <asm/highmem.h>
171a51e3a0STejun Heo #include <asm/proto.h>
1806879033SJaswinder Singh Rajput #include <asm/cpumask.h>
1934019be1SBrian Gerst #include <asm/cpu.h>
2060a5317fSTejun Heo #include <asm/stackprotector.h>
21378b39a4SYinghai Lu 
22c90aa894SMike Travis #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23c90aa894SMike Travis # define DBG(x...) printk(KERN_DEBUG x)
24c90aa894SMike Travis #else
25c90aa894SMike Travis # define DBG(x...)
26c90aa894SMike Travis #endif
27c90aa894SMike Travis 
28ea927906SBrian Gerst DEFINE_PER_CPU(int, cpu_number);
29ea927906SBrian Gerst EXPORT_PER_CPU_SYMBOL(cpu_number);
30ea927906SBrian Gerst 
311688401aSBrian Gerst #ifdef CONFIG_X86_64
321688401aSBrian Gerst #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
331688401aSBrian Gerst #else
341688401aSBrian Gerst #define BOOT_PERCPU_OFFSET 0
351688401aSBrian Gerst #endif
361688401aSBrian Gerst 
371688401aSBrian Gerst DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
381688401aSBrian Gerst EXPORT_PER_CPU_SYMBOL(this_cpu_off);
391688401aSBrian Gerst 
409939ddafSTejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
4134019be1SBrian Gerst 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
429939ddafSTejun Heo };
439939ddafSTejun Heo EXPORT_SYMBOL(__per_cpu_offset);
44378b39a4SYinghai Lu 
456b19b0c2STejun Heo /*
466b19b0c2STejun Heo  * On x86_64 symbols referenced from code should be reachable using
476b19b0c2STejun Heo  * 32bit relocations.  Reserve space for static percpu variables in
486b19b0c2STejun Heo  * modules so that they are always served from the first chunk which
496b19b0c2STejun Heo  * is located at the percpu segment base.  On x86_32, anything can
506b19b0c2STejun Heo  * address anywhere.  No need to reserve space in the first chunk.
516b19b0c2STejun Heo  */
526b19b0c2STejun Heo #ifdef CONFIG_X86_64
536b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
546b19b0c2STejun Heo #else
556b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE	0
566b19b0c2STejun Heo #endif
576b19b0c2STejun Heo 
585f5d8405STejun Heo /**
5989c92151STejun Heo  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
6089c92151STejun Heo  *
6189c92151STejun Heo  * If NUMA is not configured or there is only one NUMA node available,
6289c92151STejun Heo  * there is no reason to consider NUMA.  This function determines
6389c92151STejun Heo  * whether percpu allocation should consider NUMA or not.
6489c92151STejun Heo  *
6589c92151STejun Heo  * RETURNS:
6689c92151STejun Heo  * true if NUMA should be considered; otherwise, false.
6789c92151STejun Heo  */
6889c92151STejun Heo static bool __init pcpu_need_numa(void)
6989c92151STejun Heo {
7089c92151STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
7189c92151STejun Heo 	pg_data_t *last = NULL;
7289c92151STejun Heo 	unsigned int cpu;
7389c92151STejun Heo 
7489c92151STejun Heo 	for_each_possible_cpu(cpu) {
7589c92151STejun Heo 		int node = early_cpu_to_node(cpu);
7689c92151STejun Heo 
7789c92151STejun Heo 		if (node_online(node) && NODE_DATA(node) &&
7889c92151STejun Heo 		    last && last != NODE_DATA(node))
7989c92151STejun Heo 			return true;
8089c92151STejun Heo 
8189c92151STejun Heo 		last = NODE_DATA(node);
8289c92151STejun Heo 	}
8389c92151STejun Heo #endif
8489c92151STejun Heo 	return false;
8589c92151STejun Heo }
8689c92151STejun Heo 
8789c92151STejun Heo /**
885f5d8405STejun Heo  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
895f5d8405STejun Heo  * @cpu: cpu to allocate for
905f5d8405STejun Heo  * @size: size allocation in bytes
915f5d8405STejun Heo  * @align: alignment
925f5d8405STejun Heo  *
935f5d8405STejun Heo  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
945f5d8405STejun Heo  * does the right thing for NUMA regardless of the current
955f5d8405STejun Heo  * configuration.
965f5d8405STejun Heo  *
975f5d8405STejun Heo  * RETURNS:
985f5d8405STejun Heo  * Pointer to the allocated area on success, NULL on failure.
995f5d8405STejun Heo  */
1005f5d8405STejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1015f5d8405STejun Heo 					unsigned long align)
1025f5d8405STejun Heo {
1035f5d8405STejun Heo 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1045f5d8405STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
1055f5d8405STejun Heo 	int node = early_cpu_to_node(cpu);
1065f5d8405STejun Heo 	void *ptr;
1075f5d8405STejun Heo 
1085f5d8405STejun Heo 	if (!node_online(node) || !NODE_DATA(node)) {
1095f5d8405STejun Heo 		ptr = __alloc_bootmem_nopanic(size, align, goal);
1105f5d8405STejun Heo 		pr_info("cpu %d has no node %d or node-local memory\n",
1115f5d8405STejun Heo 			cpu, node);
1125f5d8405STejun Heo 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1135f5d8405STejun Heo 			 cpu, size, __pa(ptr));
1145f5d8405STejun Heo 	} else {
1155f5d8405STejun Heo 		ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
1165f5d8405STejun Heo 						   size, align, goal);
1175f5d8405STejun Heo 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1185f5d8405STejun Heo 			 "%016lx\n", cpu, size, node, __pa(ptr));
1195f5d8405STejun Heo 	}
1205f5d8405STejun Heo 	return ptr;
1215f5d8405STejun Heo #else
1225f5d8405STejun Heo 	return __alloc_bootmem_nopanic(size, align, goal);
1235f5d8405STejun Heo #endif
1245f5d8405STejun Heo }
1255f5d8405STejun Heo 
1265f5d8405STejun Heo /*
127d4b95f80STejun Heo  * Helpers for first chunk memory allocation
128d4b95f80STejun Heo  */
129d4b95f80STejun Heo static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size)
130d4b95f80STejun Heo {
131d4b95f80STejun Heo 	return pcpu_alloc_bootmem(cpu, size, size);
132d4b95f80STejun Heo }
133d4b95f80STejun Heo 
134d4b95f80STejun Heo static void __init pcpu_fc_free(void *ptr, size_t size)
135d4b95f80STejun Heo {
136d4b95f80STejun Heo 	free_bootmem(__pa(ptr), size);
137d4b95f80STejun Heo }
138d4b95f80STejun Heo 
139d4b95f80STejun Heo /*
1408c4bfc6eSTejun Heo  * Large page remapping allocator
1418ac83757STejun Heo  */
1428ac83757STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
1438c4bfc6eSTejun Heo static void __init pcpul_map(void *ptr, size_t size, void *addr)
1448ac83757STejun Heo {
1458c4bfc6eSTejun Heo 	pmd_t *pmd, pmd_v;
1468ac83757STejun Heo 
1478c4bfc6eSTejun Heo 	pmd = populate_extra_pmd((unsigned long)addr);
1488c4bfc6eSTejun Heo 	pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
1498c4bfc6eSTejun Heo 	set_pmd(pmd, pmd_v);
1508ac83757STejun Heo }
1518ac83757STejun Heo 
152a530b795STejun Heo static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
153a530b795STejun Heo {
154a530b795STejun Heo 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
155a530b795STejun Heo 		return LOCAL_DISTANCE;
156a530b795STejun Heo 	else
157a530b795STejun Heo 		return REMOTE_DISTANCE;
158a530b795STejun Heo }
159a530b795STejun Heo 
160fa8a7094STejun Heo static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
1618ac83757STejun Heo {
1628c4bfc6eSTejun Heo 	size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163a530b795STejun Heo 	size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164a530b795STejun Heo 	size_t unit_map_size, unit_size;
165a530b795STejun Heo 	int *unit_map;
166a530b795STejun Heo 	int nr_units;
167a530b795STejun Heo 	ssize_t ret;
1680017c869STejun Heo 
169fa8a7094STejun Heo 	/* on non-NUMA, embedding is better */
170a530b795STejun Heo 	if (!chosen && !pcpu_need_numa())
1718ac83757STejun Heo 		return -EINVAL;
1728ac83757STejun Heo 
173fa8a7094STejun Heo 	/* need PSE */
174fa8a7094STejun Heo 	if (!cpu_has_pse) {
175fa8a7094STejun Heo 		pr_warning("PERCPU: lpage allocator requires PSE\n");
176fa8a7094STejun Heo 		return -EINVAL;
177fa8a7094STejun Heo 	}
178fa8a7094STejun Heo 
179a530b795STejun Heo 	/* allocate and build unit_map */
180384be2b1STejun Heo 	unit_map_size = nr_cpu_ids * sizeof(int);
181a530b795STejun Heo 	unit_map = alloc_bootmem_nopanic(unit_map_size);
182a530b795STejun Heo 	if (!unit_map) {
183a530b795STejun Heo 		pr_warning("PERCPU: failed to allocate unit_map\n");
184a530b795STejun Heo 		return -ENOMEM;
185a530b795STejun Heo 	}
186a530b795STejun Heo 
187a530b795STejun Heo 	ret = pcpu_lpage_build_unit_map(static_size,
188a530b795STejun Heo 					PERCPU_FIRST_CHUNK_RESERVE,
189a530b795STejun Heo 					&dyn_size, &unit_size, PMD_SIZE,
190a530b795STejun Heo 					unit_map, pcpu_lpage_cpu_distance);
191a530b795STejun Heo 	if (ret < 0) {
192a530b795STejun Heo 		pr_warning("PERCPU: failed to build unit_map\n");
193a530b795STejun Heo 		goto out_free;
194a530b795STejun Heo 	}
195a530b795STejun Heo 	nr_units = ret;
196a530b795STejun Heo 
197a530b795STejun Heo 	/* do the parameters look okay? */
198a530b795STejun Heo 	if (!chosen) {
199a530b795STejun Heo 		size_t vm_size = VMALLOC_END - VMALLOC_START;
200a530b795STejun Heo 		size_t tot_size = nr_units * unit_size;
201a530b795STejun Heo 
202a530b795STejun Heo 		/* don't consume more than 20% of vmalloc area */
203a530b795STejun Heo 		if (tot_size > vm_size / 5) {
204a530b795STejun Heo 			pr_info("PERCPU: too large chunk size %zuMB for "
205a530b795STejun Heo 				"large page remap\n", tot_size >> 20);
206a530b795STejun Heo 			ret = -EINVAL;
207a530b795STejun Heo 			goto out_free;
208a530b795STejun Heo 		}
209a530b795STejun Heo 	}
210a530b795STejun Heo 
211a530b795STejun Heo 	ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
212a530b795STejun Heo 				     dyn_size, unit_size, PMD_SIZE,
213a530b795STejun Heo 				     unit_map, nr_units,
2148c4bfc6eSTejun Heo 				     pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
215a530b795STejun Heo out_free:
216a530b795STejun Heo 	if (ret < 0)
217a530b795STejun Heo 		free_bootmem(__pa(unit_map), unit_map_size);
218a530b795STejun Heo 	return ret;
2198ac83757STejun Heo }
2208ac83757STejun Heo #else
221fa8a7094STejun Heo static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
2228ac83757STejun Heo {
2238ac83757STejun Heo 	return -EINVAL;
2248ac83757STejun Heo }
2258ac83757STejun Heo #endif
2268ac83757STejun Heo 
2278ac83757STejun Heo /*
22889c92151STejun Heo  * Embedding allocator
22989c92151STejun Heo  *
23089c92151STejun Heo  * The first chunk is sized to just contain the static area plus
23166c3a757STejun Heo  * module and dynamic reserves and embedded into linear physical
23266c3a757STejun Heo  * mapping so that it can use PMD mapping without additional TLB
23366c3a757STejun Heo  * pressure.
23489c92151STejun Heo  */
235fa8a7094STejun Heo static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
23689c92151STejun Heo {
23766c3a757STejun Heo 	size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
23889c92151STejun Heo 
23989c92151STejun Heo 	/*
24089c92151STejun Heo 	 * If large page isn't supported, there's no benefit in doing
24189c92151STejun Heo 	 * this.  Also, embedding allocation doesn't play well with
24289c92151STejun Heo 	 * NUMA.
24389c92151STejun Heo 	 */
244fa8a7094STejun Heo 	if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
24589c92151STejun Heo 		return -EINVAL;
24689c92151STejun Heo 
24766c3a757STejun Heo 	return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
248788e5abcSTejun Heo 				      reserve - PERCPU_FIRST_CHUNK_RESERVE);
24989c92151STejun Heo }
25089c92151STejun Heo 
25189c92151STejun Heo /*
25200ae4064STejun Heo  * Page allocator
2535f5d8405STejun Heo  *
25400ae4064STejun Heo  * Boring fallback 4k page allocator.  This allocator puts more
25500ae4064STejun Heo  * pressure on PTE TLBs but other than that behaves nicely on both UMA
25600ae4064STejun Heo  * and NUMA.
2575f5d8405STejun Heo  */
25800ae4064STejun Heo static void __init pcpup_populate_pte(unsigned long addr)
259458a3e64STejun Heo {
260458a3e64STejun Heo 	populate_extra_pte(addr);
261458a3e64STejun Heo }
262458a3e64STejun Heo 
26300ae4064STejun Heo static ssize_t __init setup_pcpu_page(size_t static_size)
2645f5d8405STejun Heo {
26500ae4064STejun Heo 	return pcpu_page_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
266d4b95f80STejun Heo 				     pcpu_fc_alloc, pcpu_fc_free,
26700ae4064STejun Heo 				     pcpup_populate_pte);
2685f5d8405STejun Heo }
2695f5d8405STejun Heo 
270b2d2f431SBrian Gerst static inline void setup_percpu_segment(int cpu)
271b2d2f431SBrian Gerst {
272b2d2f431SBrian Gerst #ifdef CONFIG_X86_32
273b2d2f431SBrian Gerst 	struct desc_struct gdt;
274b2d2f431SBrian Gerst 
275b2d2f431SBrian Gerst 	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
276b2d2f431SBrian Gerst 			0x2 | DESCTYPE_S, 0x8);
277b2d2f431SBrian Gerst 	gdt.s = 1;
278b2d2f431SBrian Gerst 	write_gdt_entry(get_cpu_gdt_table(cpu),
279b2d2f431SBrian Gerst 			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
280b2d2f431SBrian Gerst #endif
281b2d2f431SBrian Gerst }
282b2d2f431SBrian Gerst 
283378b39a4SYinghai Lu void __init setup_per_cpu_areas(void)
284378b39a4SYinghai Lu {
2855f5d8405STejun Heo 	size_t static_size = __per_cpu_end - __per_cpu_start;
2865f5d8405STejun Heo 	unsigned int cpu;
28711124411STejun Heo 	unsigned long delta;
28811124411STejun Heo 	size_t pcpu_unit_size;
2895f5d8405STejun Heo 	ssize_t ret;
290a1681965SMike Travis 
291ab14398aSCyrill Gorcunov 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
292a1681965SMike Travis 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
293a1681965SMike Travis 
2948ac83757STejun Heo 	/*
2958ac83757STejun Heo 	 * Allocate percpu area.  If PSE is supported, try to make use
2968ac83757STejun Heo 	 * of large page mappings.  Please read comments on top of
2978ac83757STejun Heo 	 * each allocator for details.
2988ac83757STejun Heo 	 */
299fa8a7094STejun Heo 	ret = -EINVAL;
300*f58dc01bSTejun Heo 	if (pcpu_chosen_fc != PCPU_FC_AUTO) {
301*f58dc01bSTejun Heo 		if (pcpu_chosen_fc != PCPU_FC_PAGE) {
302*f58dc01bSTejun Heo 			if (pcpu_chosen_fc == PCPU_FC_LPAGE)
303fa8a7094STejun Heo 				ret = setup_pcpu_lpage(static_size, true);
304fa8a7094STejun Heo 			else
305*f58dc01bSTejun Heo 				ret = setup_pcpu_embed(static_size, true);
306*f58dc01bSTejun Heo 
3078ac83757STejun Heo 			if (ret < 0)
308fa8a7094STejun Heo 				pr_warning("PERCPU: %s allocator failed (%zd), "
30900ae4064STejun Heo 					   "falling back to page size\n",
310*f58dc01bSTejun Heo 					   pcpu_fc_names[pcpu_chosen_fc], ret);
311fa8a7094STejun Heo 		}
312fa8a7094STejun Heo 	} else {
313fa8a7094STejun Heo 		ret = setup_pcpu_lpage(static_size, false);
314fa8a7094STejun Heo 		if (ret < 0)
315fa8a7094STejun Heo 			ret = setup_pcpu_embed(static_size, false);
316fa8a7094STejun Heo 	}
31789c92151STejun Heo 	if (ret < 0)
31800ae4064STejun Heo 		ret = setup_pcpu_page(static_size);
3195f5d8405STejun Heo 	if (ret < 0)
3205f5d8405STejun Heo 		panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
3215f5d8405STejun Heo 		      static_size, ret);
322378b39a4SYinghai Lu 
3235f5d8405STejun Heo 	pcpu_unit_size = ret;
32411124411STejun Heo 
3255f5d8405STejun Heo 	/* alrighty, percpu areas up and running */
32611124411STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
32711124411STejun Heo 	for_each_possible_cpu(cpu) {
328a530b795STejun Heo 		per_cpu_offset(cpu) =
329a530b795STejun Heo 			delta + pcpu_unit_map[cpu] * pcpu_unit_size;
33026f80bd6SBrian Gerst 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
331ea927906SBrian Gerst 		per_cpu(cpu_number, cpu) = cpu;
332b2d2f431SBrian Gerst 		setup_percpu_segment(cpu);
33360a5317fSTejun Heo 		setup_stack_canary_segment(cpu);
3340d77e7f0SBrian Gerst 		/*
335cf3997f5STejun Heo 		 * Copy data used in early init routines from the
336cf3997f5STejun Heo 		 * initial arrays to the per cpu data areas.  These
337cf3997f5STejun Heo 		 * arrays then become expendable and the *_early_ptr's
338cf3997f5STejun Heo 		 * are zeroed indicating that the static arrays are
339cf3997f5STejun Heo 		 * gone.
3400d77e7f0SBrian Gerst 		 */
341ec70de8bSBrian Gerst #ifdef CONFIG_X86_LOCAL_APIC
3420d77e7f0SBrian Gerst 		per_cpu(x86_cpu_to_apicid, cpu) =
3430d77e7f0SBrian Gerst 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
3440d77e7f0SBrian Gerst 		per_cpu(x86_bios_cpu_apicid, cpu) =
3450d77e7f0SBrian Gerst 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
346ec70de8bSBrian Gerst #endif
3471a51e3a0STejun Heo #ifdef CONFIG_X86_64
34826f80bd6SBrian Gerst 		per_cpu(irq_stack_ptr, cpu) =
349cf3997f5STejun Heo 			per_cpu(irq_stack_union.irq_stack, cpu) +
350cf3997f5STejun Heo 			IRQ_STACK_SIZE - 64;
3516470aff6SBrian Gerst #ifdef CONFIG_NUMA
3526470aff6SBrian Gerst 		per_cpu(x86_cpu_to_node_map, cpu) =
3536470aff6SBrian Gerst 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
3546470aff6SBrian Gerst #endif
3552697fbd5SBrian Gerst #endif
3561a51e3a0STejun Heo 		/*
35734019be1SBrian Gerst 		 * Up to this point, the boot CPU has been using .data.init
3582697fbd5SBrian Gerst 		 * area.  Reload any changed state for the boot CPU.
3591a51e3a0STejun Heo 		 */
36034019be1SBrian Gerst 		if (cpu == boot_cpu_id)
361552be871SBrian Gerst 			switch_to_new_gdt(cpu);
362378b39a4SYinghai Lu 	}
363378b39a4SYinghai Lu 
3640d77e7f0SBrian Gerst 	/* indicate the early static arrays will soon be gone */
36522f25138SJames Bottomley #ifdef CONFIG_X86_LOCAL_APIC
3660d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
3670d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
36822f25138SJames Bottomley #endif
3696470aff6SBrian Gerst #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
3700d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
3710d77e7f0SBrian Gerst #endif
372378b39a4SYinghai Lu 
37335d5a9a6SYinghai Lu #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
37435d5a9a6SYinghai Lu 	/*
37535d5a9a6SYinghai Lu 	 * make sure boot cpu node_number is right, when boot cpu is on the
37635d5a9a6SYinghai Lu 	 * node that doesn't have mem installed
37735d5a9a6SYinghai Lu 	 */
37835d5a9a6SYinghai Lu 	per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
37935d5a9a6SYinghai Lu #endif
38035d5a9a6SYinghai Lu 
381378b39a4SYinghai Lu 	/* Setup node to cpumask map */
382378b39a4SYinghai Lu 	setup_node_to_cpumask_map();
383c2d1cec1SMike Travis 
384c2d1cec1SMike Travis 	/* Setup cpu initialized, callin, callout masks */
385c2d1cec1SMike Travis 	setup_cpu_local_masks();
386378b39a4SYinghai Lu }
387