xref: /linux/arch/x86/kernel/setup_percpu.c (revision 89c9215165ca609096e845926d9a18f1306176a4)
1378b39a4SYinghai Lu #include <linux/kernel.h>
2378b39a4SYinghai Lu #include <linux/module.h>
3378b39a4SYinghai Lu #include <linux/init.h>
4378b39a4SYinghai Lu #include <linux/bootmem.h>
5378b39a4SYinghai Lu #include <linux/percpu.h>
6378b39a4SYinghai Lu #include <linux/kexec.h>
7378b39a4SYinghai Lu #include <linux/crash_dump.h>
88a87dd9aSJaswinder Singh Rajput #include <linux/smp.h>
98a87dd9aSJaswinder Singh Rajput #include <linux/topology.h>
105f5d8405STejun Heo #include <linux/pfn.h>
11378b39a4SYinghai Lu #include <asm/sections.h>
12378b39a4SYinghai Lu #include <asm/processor.h>
13378b39a4SYinghai Lu #include <asm/setup.h>
14378b39a4SYinghai Lu #include <asm/mpspec.h>
15378b39a4SYinghai Lu #include <asm/apicdef.h>
16378b39a4SYinghai Lu #include <asm/highmem.h>
171a51e3a0STejun Heo #include <asm/proto.h>
1806879033SJaswinder Singh Rajput #include <asm/cpumask.h>
1934019be1SBrian Gerst #include <asm/cpu.h>
2060a5317fSTejun Heo #include <asm/stackprotector.h>
21378b39a4SYinghai Lu 
22c90aa894SMike Travis #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23c90aa894SMike Travis # define DBG(x...) printk(KERN_DEBUG x)
24c90aa894SMike Travis #else
25c90aa894SMike Travis # define DBG(x...)
26c90aa894SMike Travis #endif
27c90aa894SMike Travis 
28ea927906SBrian Gerst DEFINE_PER_CPU(int, cpu_number);
29ea927906SBrian Gerst EXPORT_PER_CPU_SYMBOL(cpu_number);
30ea927906SBrian Gerst 
311688401aSBrian Gerst #ifdef CONFIG_X86_64
321688401aSBrian Gerst #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
331688401aSBrian Gerst #else
341688401aSBrian Gerst #define BOOT_PERCPU_OFFSET 0
351688401aSBrian Gerst #endif
361688401aSBrian Gerst 
371688401aSBrian Gerst DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
381688401aSBrian Gerst EXPORT_PER_CPU_SYMBOL(this_cpu_off);
391688401aSBrian Gerst 
409939ddafSTejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
4134019be1SBrian Gerst 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
429939ddafSTejun Heo };
439939ddafSTejun Heo EXPORT_SYMBOL(__per_cpu_offset);
44378b39a4SYinghai Lu 
455f5d8405STejun Heo /**
46*89c92151STejun Heo  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
47*89c92151STejun Heo  *
48*89c92151STejun Heo  * If NUMA is not configured or there is only one NUMA node available,
49*89c92151STejun Heo  * there is no reason to consider NUMA.  This function determines
50*89c92151STejun Heo  * whether percpu allocation should consider NUMA or not.
51*89c92151STejun Heo  *
52*89c92151STejun Heo  * RETURNS:
53*89c92151STejun Heo  * true if NUMA should be considered; otherwise, false.
54*89c92151STejun Heo  */
55*89c92151STejun Heo static bool __init pcpu_need_numa(void)
56*89c92151STejun Heo {
57*89c92151STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
58*89c92151STejun Heo 	pg_data_t *last = NULL;
59*89c92151STejun Heo 	unsigned int cpu;
60*89c92151STejun Heo 
61*89c92151STejun Heo 	for_each_possible_cpu(cpu) {
62*89c92151STejun Heo 		int node = early_cpu_to_node(cpu);
63*89c92151STejun Heo 
64*89c92151STejun Heo 		if (node_online(node) && NODE_DATA(node) &&
65*89c92151STejun Heo 		    last && last != NODE_DATA(node))
66*89c92151STejun Heo 			return true;
67*89c92151STejun Heo 
68*89c92151STejun Heo 		last = NODE_DATA(node);
69*89c92151STejun Heo 	}
70*89c92151STejun Heo #endif
71*89c92151STejun Heo 	return false;
72*89c92151STejun Heo }
73*89c92151STejun Heo 
74*89c92151STejun Heo /**
755f5d8405STejun Heo  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
765f5d8405STejun Heo  * @cpu: cpu to allocate for
775f5d8405STejun Heo  * @size: size allocation in bytes
785f5d8405STejun Heo  * @align: alignment
795f5d8405STejun Heo  *
805f5d8405STejun Heo  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
815f5d8405STejun Heo  * does the right thing for NUMA regardless of the current
825f5d8405STejun Heo  * configuration.
835f5d8405STejun Heo  *
845f5d8405STejun Heo  * RETURNS:
855f5d8405STejun Heo  * Pointer to the allocated area on success, NULL on failure.
865f5d8405STejun Heo  */
875f5d8405STejun Heo static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
885f5d8405STejun Heo 					unsigned long align)
895f5d8405STejun Heo {
905f5d8405STejun Heo 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
915f5d8405STejun Heo #ifdef CONFIG_NEED_MULTIPLE_NODES
925f5d8405STejun Heo 	int node = early_cpu_to_node(cpu);
935f5d8405STejun Heo 	void *ptr;
945f5d8405STejun Heo 
955f5d8405STejun Heo 	if (!node_online(node) || !NODE_DATA(node)) {
965f5d8405STejun Heo 		ptr = __alloc_bootmem_nopanic(size, align, goal);
975f5d8405STejun Heo 		pr_info("cpu %d has no node %d or node-local memory\n",
985f5d8405STejun Heo 			cpu, node);
995f5d8405STejun Heo 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1005f5d8405STejun Heo 			 cpu, size, __pa(ptr));
1015f5d8405STejun Heo 	} else {
1025f5d8405STejun Heo 		ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
1035f5d8405STejun Heo 						   size, align, goal);
1045f5d8405STejun Heo 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1055f5d8405STejun Heo 			 "%016lx\n", cpu, size, node, __pa(ptr));
1065f5d8405STejun Heo 	}
1075f5d8405STejun Heo 	return ptr;
1085f5d8405STejun Heo #else
1095f5d8405STejun Heo 	return __alloc_bootmem_nopanic(size, align, goal);
1105f5d8405STejun Heo #endif
1115f5d8405STejun Heo }
1125f5d8405STejun Heo 
1135f5d8405STejun Heo /*
114*89c92151STejun Heo  * Embedding allocator
115*89c92151STejun Heo  *
116*89c92151STejun Heo  * The first chunk is sized to just contain the static area plus
117*89c92151STejun Heo  * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
118*89c92151STejun Heo  * bootmem allocator and used as-is without being mapped into vmalloc
119*89c92151STejun Heo  * area.  This enables the first chunk to piggy back on the linear
120*89c92151STejun Heo  * physical PMD mapping and doesn't add any additional pressure to
121*89c92151STejun Heo  * TLB.
122*89c92151STejun Heo  */
123*89c92151STejun Heo static void *pcpue_ptr __initdata;
124*89c92151STejun Heo static size_t pcpue_unit_size __initdata;
125*89c92151STejun Heo 
126*89c92151STejun Heo static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
127*89c92151STejun Heo {
128*89c92151STejun Heo 	return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
129*89c92151STejun Heo 			    + ((size_t)pageno << PAGE_SHIFT));
130*89c92151STejun Heo }
131*89c92151STejun Heo 
132*89c92151STejun Heo static ssize_t __init setup_pcpu_embed(size_t static_size)
133*89c92151STejun Heo {
134*89c92151STejun Heo 	unsigned int cpu;
135*89c92151STejun Heo 
136*89c92151STejun Heo 	/*
137*89c92151STejun Heo 	 * If large page isn't supported, there's no benefit in doing
138*89c92151STejun Heo 	 * this.  Also, embedding allocation doesn't play well with
139*89c92151STejun Heo 	 * NUMA.
140*89c92151STejun Heo 	 */
141*89c92151STejun Heo 	if (!cpu_has_pse || pcpu_need_numa())
142*89c92151STejun Heo 		return -EINVAL;
143*89c92151STejun Heo 
144*89c92151STejun Heo 	/* allocate and copy */
145*89c92151STejun Heo 	pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
146*89c92151STejun Heo 	pcpue_unit_size = max(pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
147*89c92151STejun Heo 	pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
148*89c92151STejun Heo 				       PAGE_SIZE);
149*89c92151STejun Heo 	if (!pcpue_ptr)
150*89c92151STejun Heo 		return -ENOMEM;
151*89c92151STejun Heo 
152*89c92151STejun Heo 	for_each_possible_cpu(cpu)
153*89c92151STejun Heo 		memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
154*89c92151STejun Heo 		       static_size);
155*89c92151STejun Heo 
156*89c92151STejun Heo 	/* we're ready, commit */
157*89c92151STejun Heo 	pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
158*89c92151STejun Heo 		pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
159*89c92151STejun Heo 
160*89c92151STejun Heo 	return pcpu_setup_first_chunk(pcpue_get_page, static_size,
161*89c92151STejun Heo 				      pcpue_unit_size,
162*89c92151STejun Heo 				      pcpue_unit_size - static_size, pcpue_ptr,
163*89c92151STejun Heo 				      NULL);
164*89c92151STejun Heo }
165*89c92151STejun Heo 
166*89c92151STejun Heo /*
1675f5d8405STejun Heo  * 4k page allocator
1685f5d8405STejun Heo  *
1695f5d8405STejun Heo  * This is the basic allocator.  Static percpu area is allocated
1705f5d8405STejun Heo  * page-by-page and most of initialization is done by the generic
1715f5d8405STejun Heo  * setup function.
1725f5d8405STejun Heo  */
1738d408b4bSTejun Heo static struct page **pcpu4k_pages __initdata;
1748d408b4bSTejun Heo static int pcpu4k_nr_static_pages __initdata;
1758d408b4bSTejun Heo 
1768d408b4bSTejun Heo static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
1778d408b4bSTejun Heo {
1788d408b4bSTejun Heo 	if (pageno < pcpu4k_nr_static_pages)
1798d408b4bSTejun Heo 		return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
1808d408b4bSTejun Heo 	return NULL;
1818d408b4bSTejun Heo }
1828d408b4bSTejun Heo 
183458a3e64STejun Heo static void __init pcpu4k_populate_pte(unsigned long addr)
184458a3e64STejun Heo {
185458a3e64STejun Heo 	populate_extra_pte(addr);
186458a3e64STejun Heo }
187458a3e64STejun Heo 
1885f5d8405STejun Heo static ssize_t __init setup_pcpu_4k(size_t static_size)
1895f5d8405STejun Heo {
1905f5d8405STejun Heo 	size_t pages_size;
1915f5d8405STejun Heo 	unsigned int cpu;
1925f5d8405STejun Heo 	int i, j;
1935f5d8405STejun Heo 	ssize_t ret;
1945f5d8405STejun Heo 
1955f5d8405STejun Heo 	pcpu4k_nr_static_pages = PFN_UP(static_size);
1965f5d8405STejun Heo 
1975f5d8405STejun Heo 	/* unaligned allocations can't be freed, round up to page size */
1985f5d8405STejun Heo 	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
1995f5d8405STejun Heo 			       * sizeof(pcpu4k_pages[0]));
2005f5d8405STejun Heo 	pcpu4k_pages = alloc_bootmem(pages_size);
2015f5d8405STejun Heo 
2025f5d8405STejun Heo 	/* allocate and copy */
2035f5d8405STejun Heo 	j = 0;
2045f5d8405STejun Heo 	for_each_possible_cpu(cpu)
2055f5d8405STejun Heo 		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
2065f5d8405STejun Heo 			void *ptr;
2075f5d8405STejun Heo 
2085f5d8405STejun Heo 			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
2095f5d8405STejun Heo 			if (!ptr)
2105f5d8405STejun Heo 				goto enomem;
2115f5d8405STejun Heo 
2125f5d8405STejun Heo 			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
2135f5d8405STejun Heo 			pcpu4k_pages[j++] = virt_to_page(ptr);
2145f5d8405STejun Heo 		}
2155f5d8405STejun Heo 
2165f5d8405STejun Heo 	/* we're ready, commit */
2175f5d8405STejun Heo 	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
2185f5d8405STejun Heo 		pcpu4k_nr_static_pages, static_size);
2195f5d8405STejun Heo 
2205f5d8405STejun Heo 	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
2215f5d8405STejun Heo 				     pcpu4k_populate_pte);
2225f5d8405STejun Heo 	goto out_free_ar;
2235f5d8405STejun Heo 
2245f5d8405STejun Heo enomem:
2255f5d8405STejun Heo 	while (--j >= 0)
2265f5d8405STejun Heo 		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
2275f5d8405STejun Heo 	ret = -ENOMEM;
2285f5d8405STejun Heo out_free_ar:
2295f5d8405STejun Heo 	free_bootmem(__pa(pcpu4k_pages), pages_size);
2305f5d8405STejun Heo 	return ret;
2315f5d8405STejun Heo }
2325f5d8405STejun Heo 
233b2d2f431SBrian Gerst static inline void setup_percpu_segment(int cpu)
234b2d2f431SBrian Gerst {
235b2d2f431SBrian Gerst #ifdef CONFIG_X86_32
236b2d2f431SBrian Gerst 	struct desc_struct gdt;
237b2d2f431SBrian Gerst 
238b2d2f431SBrian Gerst 	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
239b2d2f431SBrian Gerst 			0x2 | DESCTYPE_S, 0x8);
240b2d2f431SBrian Gerst 	gdt.s = 1;
241b2d2f431SBrian Gerst 	write_gdt_entry(get_cpu_gdt_table(cpu),
242b2d2f431SBrian Gerst 			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
243b2d2f431SBrian Gerst #endif
244b2d2f431SBrian Gerst }
245b2d2f431SBrian Gerst 
246378b39a4SYinghai Lu /*
247378b39a4SYinghai Lu  * Great future plan:
248378b39a4SYinghai Lu  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
249378b39a4SYinghai Lu  * Always point %gs to its beginning
250378b39a4SYinghai Lu  */
251378b39a4SYinghai Lu void __init setup_per_cpu_areas(void)
252378b39a4SYinghai Lu {
2535f5d8405STejun Heo 	size_t static_size = __per_cpu_end - __per_cpu_start;
2545f5d8405STejun Heo 	unsigned int cpu;
25511124411STejun Heo 	unsigned long delta;
25611124411STejun Heo 	size_t pcpu_unit_size;
2575f5d8405STejun Heo 	ssize_t ret;
258a1681965SMike Travis 
259ab14398aSCyrill Gorcunov 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
260a1681965SMike Travis 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
261a1681965SMike Travis 
2625f5d8405STejun Heo 	/* allocate percpu area */
263*89c92151STejun Heo 	ret = setup_pcpu_embed(static_size);
264*89c92151STejun Heo 	if (ret < 0)
2655f5d8405STejun Heo 		ret = setup_pcpu_4k(static_size);
2665f5d8405STejun Heo 	if (ret < 0)
2675f5d8405STejun Heo 		panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
2685f5d8405STejun Heo 		      static_size, ret);
269378b39a4SYinghai Lu 
2705f5d8405STejun Heo 	pcpu_unit_size = ret;
27111124411STejun Heo 
2725f5d8405STejun Heo 	/* alrighty, percpu areas up and running */
27311124411STejun Heo 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27411124411STejun Heo 	for_each_possible_cpu(cpu) {
27511124411STejun Heo 		per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
27626f80bd6SBrian Gerst 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
277ea927906SBrian Gerst 		per_cpu(cpu_number, cpu) = cpu;
278b2d2f431SBrian Gerst 		setup_percpu_segment(cpu);
27960a5317fSTejun Heo 		setup_stack_canary_segment(cpu);
2800d77e7f0SBrian Gerst 		/*
281cf3997f5STejun Heo 		 * Copy data used in early init routines from the
282cf3997f5STejun Heo 		 * initial arrays to the per cpu data areas.  These
283cf3997f5STejun Heo 		 * arrays then become expendable and the *_early_ptr's
284cf3997f5STejun Heo 		 * are zeroed indicating that the static arrays are
285cf3997f5STejun Heo 		 * gone.
2860d77e7f0SBrian Gerst 		 */
287ec70de8bSBrian Gerst #ifdef CONFIG_X86_LOCAL_APIC
2880d77e7f0SBrian Gerst 		per_cpu(x86_cpu_to_apicid, cpu) =
2890d77e7f0SBrian Gerst 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
2900d77e7f0SBrian Gerst 		per_cpu(x86_bios_cpu_apicid, cpu) =
2910d77e7f0SBrian Gerst 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
292ec70de8bSBrian Gerst #endif
2931a51e3a0STejun Heo #ifdef CONFIG_X86_64
29426f80bd6SBrian Gerst 		per_cpu(irq_stack_ptr, cpu) =
295cf3997f5STejun Heo 			per_cpu(irq_stack_union.irq_stack, cpu) +
296cf3997f5STejun Heo 			IRQ_STACK_SIZE - 64;
2976470aff6SBrian Gerst #ifdef CONFIG_NUMA
2986470aff6SBrian Gerst 		per_cpu(x86_cpu_to_node_map, cpu) =
2996470aff6SBrian Gerst 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
3006470aff6SBrian Gerst #endif
3012697fbd5SBrian Gerst #endif
3021a51e3a0STejun Heo 		/*
30334019be1SBrian Gerst 		 * Up to this point, the boot CPU has been using .data.init
3042697fbd5SBrian Gerst 		 * area.  Reload any changed state for the boot CPU.
3051a51e3a0STejun Heo 		 */
30634019be1SBrian Gerst 		if (cpu == boot_cpu_id)
307552be871SBrian Gerst 			switch_to_new_gdt(cpu);
308c90aa894SMike Travis 
309c90aa894SMike Travis 		DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
310378b39a4SYinghai Lu 	}
311378b39a4SYinghai Lu 
3120d77e7f0SBrian Gerst 	/* indicate the early static arrays will soon be gone */
31322f25138SJames Bottomley #ifdef CONFIG_X86_LOCAL_APIC
3140d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
3150d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
31622f25138SJames Bottomley #endif
3176470aff6SBrian Gerst #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
3180d77e7f0SBrian Gerst 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
3190d77e7f0SBrian Gerst #endif
320378b39a4SYinghai Lu 
321378b39a4SYinghai Lu 	/* Setup node to cpumask map */
322378b39a4SYinghai Lu 	setup_node_to_cpumask_map();
323c2d1cec1SMike Travis 
324c2d1cec1SMike Travis 	/* Setup cpu initialized, callin, callout masks */
325c2d1cec1SMike Travis 	setup_cpu_local_masks();
326378b39a4SYinghai Lu }
327