xref: /linux/arch/x86/kernel/setup_percpu.c (revision 2013288f723887837d2f1cebef5fcf663b2319de)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
240685236SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
340685236SJoe Perches 
4378b39a4SYinghai Lu #include <linux/kernel.h>
5523d0fb4SPaul Gortmaker #include <linux/export.h>
6378b39a4SYinghai Lu #include <linux/init.h>
7378b39a4SYinghai Lu #include <linux/bootmem.h>
8*2013288fSMike Rapoport #include <linux/memblock.h>
9378b39a4SYinghai Lu #include <linux/percpu.h>
10378b39a4SYinghai Lu #include <linux/kexec.h>
11378b39a4SYinghai Lu #include <linux/crash_dump.h>
128a87dd9aSJaswinder Singh Rajput #include <linux/smp.h>
138a87dd9aSJaswinder Singh Rajput #include <linux/topology.h>
145f5d8405STejun Heo #include <linux/pfn.h>
15378b39a4SYinghai Lu #include <asm/sections.h>
16378b39a4SYinghai Lu #include <asm/processor.h>
17523d0fb4SPaul Gortmaker #include <asm/desc.h>
18378b39a4SYinghai Lu #include <asm/setup.h>
19378b39a4SYinghai Lu #include <asm/mpspec.h>
20378b39a4SYinghai Lu #include <asm/apicdef.h>
21378b39a4SYinghai Lu #include <asm/highmem.h>
221a51e3a0STejun Heo #include <asm/proto.h>
2306879033SJaswinder Singh Rajput #include <asm/cpumask.h>
2434019be1SBrian Gerst #include <asm/cpu.h>
2560a5317fSTejun Heo #include <asm/stackprotector.h>
26378b39a4SYinghai Lu 
270816b0f0SVlad Zolotarov DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
28ea927906SBrian Gerst EXPORT_PER_CPU_SYMBOL(cpu_number);
29ea927906SBrian Gerst 
301688401aSBrian Gerst #ifdef CONFIG_X86_64
311688401aSBrian Gerst #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
321688401aSBrian Gerst #else
331688401aSBrian Gerst #define BOOT_PERCPU_OFFSET 0
341688401aSBrian Gerst #endif
351688401aSBrian Gerst 
362c773dd3SJan Beulich DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
371688401aSBrian Gerst EXPORT_PER_CPU_SYMBOL(this_cpu_off);
381688401aSBrian Gerst 
39404f6aacSKees Cook unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
4034019be1SBrian Gerst 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
419939ddafSTejun Heo };
429939ddafSTejun Heo EXPORT_SYMBOL(__per_cpu_offset);
43378b39a4SYinghai Lu 
446b19b0c2STejun Heo /*
456b19b0c2STejun Heo  * On x86_64 symbols referenced from code should be reachable using
466b19b0c2STejun Heo  * 32bit relocations.  Reserve space for static percpu variables in
476b19b0c2STejun Heo  * modules so that they are always served from the first chunk which
486b19b0c2STejun Heo  * is located at the percpu segment base.  On x86_32, anything can
496b19b0c2STejun Heo  * address anywhere.  No need to reserve space in the first chunk.
506b19b0c2STejun Heo  */
516b19b0c2STejun Heo #ifdef CONFIG_X86_64
526b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
536b19b0c2STejun Heo #else
546b19b0c2STejun Heo #define PERCPU_FIRST_CHUNK_RESERVE	0
556b19b0c2STejun Heo #endif
566b19b0c2STejun Heo 
574518e6a0STejun Heo #ifdef CONFIG_X86_32
585f5d8405STejun Heo /**
5989c92151STejun Heo  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
6089c92151STejun Heo  *
6189c92151STejun Heo  * If NUMA is not configured or there is only one NUMA node available,
6289c92151STejun Heo  * there is no reason to consider NUMA.  This function determines
6389c92151STejun Heo  * whether percpu allocation should consider NUMA or not.
6489c92151STejun Heo  *
6589c92151STejun Heo  * RETURNS:
6689c92151STejun Heo  * true if NUMA should be considered; otherwise, false.
6789c92151STejun Heo  */
6889c92151STejun Heo static bool __init pcpu_need_numa(void)
6989c92151STejun Heo {
7089c92151STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
7189c92151STejun Heo 	pg_data_t *last = NULL;
7289c92151STejun Heo 	unsigned int cpu;
7389c92151STejun Heo 
7489c92151STejun Heo 	for_each_possible_cpu(cpu) {
7589c92151STejun Heo 		int node = early_cpu_to_node(cpu);
7689c92151STejun Heo 
7789c92151STejun Heo 		if (node_online(node) && NODE_DATA(node) &&
7889c92151STejun Heo 		    last && last != NODE_DATA(node))
7989c92151STejun Heo 			return true;
8089c92151STejun Heo 
8189c92151STejun Heo 		last = NODE_DATA(node);
8289c92151STejun Heo 	}
8389c92151STejun Heo #endif
8489c92151STejun Heo 	return false;
8589c92151STejun Heo }
864518e6a0STejun Heo #endif
8789c92151STejun Heo 
8889c92151STejun Heo /**
895f5d8405STejun Heo  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
905f5d8405STejun Heo  * @cpu: cpu to allocate for
915f5d8405STejun Heo  * @size: size allocation in bytes
925f5d8405STejun Heo  * @align: alignment
935f5d8405STejun Heo  *
945f5d8405STejun Heo  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
955f5d8405STejun Heo  * does the right thing for NUMA regardless of the current
965f5d8405STejun Heo  * configuration.
975f5d8405STejun Heo  *
985f5d8405STejun Heo  * RETURNS:
995f5d8405STejun Heo  * Pointer to the allocated area on success, NULL on failure.
1005f5d8405STejun Heo  */
1015f5d8405STejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1025f5d8405STejun Heo 					unsigned long align)
1035f5d8405STejun Heo {
1045f5d8405STejun Heo 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1055f5d8405STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
1065f5d8405STejun Heo 	int node = early_cpu_to_node(cpu);
1075f5d8405STejun Heo 	void *ptr;
1085f5d8405STejun Heo 
1095f5d8405STejun Heo 	if (!node_online(node) || !NODE_DATA(node)) {
110a5159e84SMike Rapoport 		ptr = memblock_alloc_from_nopanic(size, align, goal);
1115f5d8405STejun Heo 		pr_info("cpu %d has no node %d or node-local memory\n",
1125f5d8405STejun Heo 			cpu, node);
1135f5d8405STejun Heo 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1145f5d8405STejun Heo 			 cpu, size, __pa(ptr));
1155f5d8405STejun Heo 	} else {
116bf2886efSMike Rapoport 		ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
117bf2886efSMike Rapoport 						     BOOTMEM_ALLOC_ACCESSIBLE,
118bf2886efSMike Rapoport 						     node);
119bf2886efSMike Rapoport 
12040685236SJoe Perches 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
12140685236SJoe Perches 			 cpu, size, node, __pa(ptr));
1225f5d8405STejun Heo 	}
1235f5d8405STejun Heo 	return ptr;
1245f5d8405STejun Heo #else
125a5159e84SMike Rapoport 	return memblock_alloc_from_nopanic(size, align, goal);
1265f5d8405STejun Heo #endif
1275f5d8405STejun Heo }
1285f5d8405STejun Heo 
1295f5d8405STejun Heo /*
130d4b95f80STejun Heo  * Helpers for first chunk memory allocation
131d4b95f80STejun Heo  */
1323cbc8565STejun Heo static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
133d4b95f80STejun Heo {
1343cbc8565STejun Heo 	return pcpu_alloc_bootmem(cpu, size, align);
135d4b95f80STejun Heo }
136d4b95f80STejun Heo 
137d4b95f80STejun Heo static void __init pcpu_fc_free(void *ptr, size_t size)
138d4b95f80STejun Heo {
139*2013288fSMike Rapoport 	memblock_free(__pa(ptr), size);
140d4b95f80STejun Heo }
141d4b95f80STejun Heo 
1424518e6a0STejun Heo static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1434518e6a0STejun Heo {
1448ac83757STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
145a530b795STejun Heo 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
146a530b795STejun Heo 		return LOCAL_DISTANCE;
147a530b795STejun Heo 	else
148a530b795STejun Heo 		return REMOTE_DISTANCE;
1498ac83757STejun Heo #else
1504518e6a0STejun Heo 	return LOCAL_DISTANCE;
1518ac83757STejun Heo #endif
15289c92151STejun Heo }
15389c92151STejun Heo 
15400ae4064STejun Heo static void __init pcpup_populate_pte(unsigned long addr)
155458a3e64STejun Heo {
156458a3e64STejun Heo 	populate_extra_pte(addr);
157458a3e64STejun Heo }
158458a3e64STejun Heo 
159b2d2f431SBrian Gerst static inline void setup_percpu_segment(int cpu)
160b2d2f431SBrian Gerst {
161b2d2f431SBrian Gerst #ifdef CONFIG_X86_32
1621dd439feSThomas Gleixner 	struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
1631dd439feSThomas Gleixner 					      0xFFFFF);
164b2d2f431SBrian Gerst 
1651dd439feSThomas Gleixner 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
166b2d2f431SBrian Gerst #endif
167b2d2f431SBrian Gerst }
168b2d2f431SBrian Gerst 
169378b39a4SYinghai Lu void __init setup_per_cpu_areas(void)
170378b39a4SYinghai Lu {
1715f5d8405STejun Heo 	unsigned int cpu;
17211124411STejun Heo 	unsigned long delta;
173fb435d52STejun Heo 	int rc;
174a1681965SMike Travis 
1759b130ad5SAlexey Dobriyan 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
176a1681965SMike Travis 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
177a1681965SMike Travis 
1788ac83757STejun Heo 	/*
1794518e6a0STejun Heo 	 * Allocate percpu area.  Embedding allocator is our favorite;
1804518e6a0STejun Heo 	 * however, on NUMA configurations, it can result in very
1814518e6a0STejun Heo 	 * sparse unit mapping and vmalloc area isn't spacious enough
1824518e6a0STejun Heo 	 * on 32bit.  Use page in that case.
1838ac83757STejun Heo 	 */
1844518e6a0STejun Heo #ifdef CONFIG_X86_32
1854518e6a0STejun Heo 	if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
1864518e6a0STejun Heo 		pcpu_chosen_fc = PCPU_FC_PAGE;
1874518e6a0STejun Heo #endif
188fb435d52STejun Heo 	rc = -EINVAL;
189f58dc01bSTejun Heo 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1904518e6a0STejun Heo 		const size_t dyn_size = PERCPU_MODULE_RESERVE +
1914518e6a0STejun Heo 			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
192d5e28005STejun Heo 		size_t atom_size;
193f58dc01bSTejun Heo 
194d5e28005STejun Heo 		/*
195d5e28005STejun Heo 		 * On 64bit, use PMD_SIZE for atom_size so that embedded
196d5e28005STejun Heo 		 * percpu areas are aligned to PMD.  This, in the future,
197d5e28005STejun Heo 		 * can also allow using PMD mappings in vmalloc area.  Use
198d5e28005STejun Heo 		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
199d5e28005STejun Heo 		 * and large vmalloc area allocs can easily fail.
200d5e28005STejun Heo 		 */
201d5e28005STejun Heo #ifdef CONFIG_X86_64
202d5e28005STejun Heo 		atom_size = PMD_SIZE;
203d5e28005STejun Heo #else
204d5e28005STejun Heo 		atom_size = PAGE_SIZE;
205d5e28005STejun Heo #endif
2064518e6a0STejun Heo 		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
2074518e6a0STejun Heo 					    dyn_size, atom_size,
2084518e6a0STejun Heo 					    pcpu_cpu_distance,
2094518e6a0STejun Heo 					    pcpu_fc_alloc, pcpu_fc_free);
210fb435d52STejun Heo 		if (rc < 0)
21140685236SJoe Perches 			pr_warning("%s allocator failed (%d), falling back to page size\n",
212fb435d52STejun Heo 				   pcpu_fc_names[pcpu_chosen_fc], rc);
213fa8a7094STejun Heo 	}
214fb435d52STejun Heo 	if (rc < 0)
2154518e6a0STejun Heo 		rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
2164518e6a0STejun Heo 					   pcpu_fc_alloc, pcpu_fc_free,
2174518e6a0STejun Heo 					   pcpup_populate_pte);
218fb435d52STejun Heo 	if (rc < 0)
219fb435d52STejun Heo 		panic("cannot initialize percpu area (err=%d)", rc);
22011124411STejun Heo 
2215f5d8405STejun Heo 	/* alrighty, percpu areas up and running */
22211124411STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22311124411STejun Heo 	for_each_possible_cpu(cpu) {
224fb435d52STejun Heo 		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22526f80bd6SBrian Gerst 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
226ea927906SBrian Gerst 		per_cpu(cpu_number, cpu) = cpu;
227b2d2f431SBrian Gerst 		setup_percpu_segment(cpu);
22860a5317fSTejun Heo 		setup_stack_canary_segment(cpu);
2290d77e7f0SBrian Gerst 		/*
230cf3997f5STejun Heo 		 * Copy data used in early init routines from the
231cf3997f5STejun Heo 		 * initial arrays to the per cpu data areas.  These
232cf3997f5STejun Heo 		 * arrays then become expendable and the *_early_ptr's
233cf3997f5STejun Heo 		 * are zeroed indicating that the static arrays are
234cf3997f5STejun Heo 		 * gone.
2350d77e7f0SBrian Gerst 		 */
236ec70de8bSBrian Gerst #ifdef CONFIG_X86_LOCAL_APIC
2370d77e7f0SBrian Gerst 		per_cpu(x86_cpu_to_apicid, cpu) =
2380d77e7f0SBrian Gerst 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
2390d77e7f0SBrian Gerst 		per_cpu(x86_bios_cpu_apicid, cpu) =
2400d77e7f0SBrian Gerst 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
2413e9e57faSVitaly Kuznetsov 		per_cpu(x86_cpu_to_acpiid, cpu) =
2423e9e57faSVitaly Kuznetsov 			early_per_cpu_map(x86_cpu_to_acpiid, cpu);
243ec70de8bSBrian Gerst #endif
2444c321ff8STejun Heo #ifdef CONFIG_X86_32
2454c321ff8STejun Heo 		per_cpu(x86_cpu_to_logical_apicid, cpu) =
2464c321ff8STejun Heo 			early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
2474c321ff8STejun Heo #endif
2481a51e3a0STejun Heo #ifdef CONFIG_X86_64
24926f80bd6SBrian Gerst 		per_cpu(irq_stack_ptr, cpu) =
250cf3997f5STejun Heo 			per_cpu(irq_stack_union.irq_stack, cpu) +
2514950d6d4SJosh Poimboeuf 			IRQ_STACK_SIZE;
252645a7919STejun Heo #endif
2536470aff6SBrian Gerst #ifdef CONFIG_NUMA
2546470aff6SBrian Gerst 		per_cpu(x86_cpu_to_node_map, cpu) =
2556470aff6SBrian Gerst 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
2569aebbdb6SYinghai Lu 		/*
257a4ce96acSLinus Torvalds 		 * Ensure that the boot cpu numa_node is correct when the boot
2589aebbdb6SYinghai Lu 		 * cpu is on a node that doesn't have memory installed.
2599aebbdb6SYinghai Lu 		 * Also cpu_up() will call cpu_to_node() for APs when
2609aebbdb6SYinghai Lu 		 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
2619aebbdb6SYinghai Lu 		 * up later with c_init aka intel_init/amd_init.
2629aebbdb6SYinghai Lu 		 * So set them all (boot cpu and all APs).
2639aebbdb6SYinghai Lu 		 */
2649aebbdb6SYinghai Lu 		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
2656470aff6SBrian Gerst #endif
2661a51e3a0STejun Heo 		/*
267c273fb3bSDenys Vlasenko 		 * Up to this point, the boot CPU has been using .init.data
2682697fbd5SBrian Gerst 		 * area.  Reload any changed state for the boot CPU.
2691a51e3a0STejun Heo 		 */
270f6e9456cSRobert Richter 		if (!cpu)
271552be871SBrian Gerst 			switch_to_new_gdt(cpu);
272378b39a4SYinghai Lu 	}
273378b39a4SYinghai Lu 
2740d77e7f0SBrian Gerst 	/* indicate the early static arrays will soon be gone */
27522f25138SJames Bottomley #ifdef CONFIG_X86_LOCAL_APIC
2760d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
2770d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
2783e9e57faSVitaly Kuznetsov 	early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
27922f25138SJames Bottomley #endif
2804c321ff8STejun Heo #ifdef CONFIG_X86_32
2814c321ff8STejun Heo 	early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
2824c321ff8STejun Heo #endif
283645a7919STejun Heo #ifdef CONFIG_NUMA
2840d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
2850d77e7f0SBrian Gerst #endif
286378b39a4SYinghai Lu 
287378b39a4SYinghai Lu 	/* Setup node to cpumask map */
288378b39a4SYinghai Lu 	setup_node_to_cpumask_map();
289c2d1cec1SMike Travis 
290c2d1cec1SMike Travis 	/* Setup cpu initialized, callin, callout masks */
291c2d1cec1SMike Travis 	setup_cpu_local_masks();
29223b2a4ddSAndy Lutomirski 
29323b2a4ddSAndy Lutomirski 	/*
294d2b6dc61SAndy Lutomirski 	 * Sync back kernel address range again.  We already did this in
295d2b6dc61SAndy Lutomirski 	 * setup_arch(), but percpu data also needs to be available in
296d2b6dc61SAndy Lutomirski 	 * the smpboot asm.  We can't reliably pick up percpu mappings
297d2b6dc61SAndy Lutomirski 	 * using vmalloc_fault(), because exception dispatch needs
298d2b6dc61SAndy Lutomirski 	 * percpu data.
299945fd17aSThomas Gleixner 	 *
300945fd17aSThomas Gleixner 	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
301945fd17aSThomas Gleixner 	 * this call?
30223b2a4ddSAndy Lutomirski 	 */
303945fd17aSThomas Gleixner 	sync_initial_page_table();
304378b39a4SYinghai Lu }
305