xref: /linux/arch/x86/kernel/setup_percpu.c (revision 722ecdbce68a87de2d9296f91308f44ea900a039)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/kernel.h>
5 #include <linux/export.h>
6 #include <linux/init.h>
7 #include <linux/memblock.h>
8 #include <linux/percpu.h>
9 #include <linux/kexec.h>
10 #include <linux/crash_dump.h>
11 #include <linux/smp.h>
12 #include <linux/topology.h>
13 #include <linux/pfn.h>
14 #include <asm/sections.h>
15 #include <asm/processor.h>
16 #include <asm/desc.h>
17 #include <asm/setup.h>
18 #include <asm/mpspec.h>
19 #include <asm/apicdef.h>
20 #include <asm/highmem.h>
21 #include <asm/proto.h>
22 #include <asm/cpumask.h>
23 #include <asm/cpu.h>
24 #include <asm/stackprotector.h>
25 
26 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27 EXPORT_PER_CPU_SYMBOL(cpu_number);
28 
29 #ifdef CONFIG_X86_64
30 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
31 #else
32 #define BOOT_PERCPU_OFFSET 0
33 #endif
34 
35 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
36 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
37 
38 unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
39 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
40 };
41 EXPORT_SYMBOL(__per_cpu_offset);
42 
43 /*
44  * On x86_64 symbols referenced from code should be reachable using
45  * 32bit relocations.  Reserve space for static percpu variables in
46  * modules so that they are always served from the first chunk which
47  * is located at the percpu segment base.  On x86_32, anything can
48  * address anywhere.  No need to reserve space in the first chunk.
49  */
50 #ifdef CONFIG_X86_64
51 #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
52 #else
53 #define PERCPU_FIRST_CHUNK_RESERVE	0
54 #endif
55 
56 #ifdef CONFIG_X86_32
57 /**
58  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
59  *
60  * If NUMA is not configured or there is only one NUMA node available,
61  * there is no reason to consider NUMA.  This function determines
62  * whether percpu allocation should consider NUMA or not.
63  *
64  * RETURNS:
65  * true if NUMA should be considered; otherwise, false.
66  */
67 static bool __init pcpu_need_numa(void)
68 {
69 #ifdef CONFIG_NUMA
70 	pg_data_t *last = NULL;
71 	unsigned int cpu;
72 
73 	for_each_possible_cpu(cpu) {
74 		int node = early_cpu_to_node(cpu);
75 
76 		if (node_online(node) && NODE_DATA(node) &&
77 		    last && last != NODE_DATA(node))
78 			return true;
79 
80 		last = NODE_DATA(node);
81 	}
82 #endif
83 	return false;
84 }
85 #endif
86 
87 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
88 {
89 #ifdef CONFIG_NUMA
90 	if (early_cpu_to_node(from) == early_cpu_to_node(to))
91 		return LOCAL_DISTANCE;
92 	else
93 		return REMOTE_DISTANCE;
94 #else
95 	return LOCAL_DISTANCE;
96 #endif
97 }
98 
99 static int __init pcpu_cpu_to_node(int cpu)
100 {
101 	return early_cpu_to_node(cpu);
102 }
103 
104 void __init pcpu_populate_pte(unsigned long addr)
105 {
106 	populate_extra_pte(addr);
107 }
108 
109 static inline void setup_percpu_segment(int cpu)
110 {
111 #ifdef CONFIG_X86_32
112 	struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
113 					      0xFFFFF);
114 
115 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
116 #endif
117 }
118 
119 void __init setup_per_cpu_areas(void)
120 {
121 	unsigned int cpu;
122 	unsigned long delta;
123 	int rc;
124 
125 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
126 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
127 
128 	/*
129 	 * Allocate percpu area.  Embedding allocator is our favorite;
130 	 * however, on NUMA configurations, it can result in very
131 	 * sparse unit mapping and vmalloc area isn't spacious enough
132 	 * on 32bit.  Use page in that case.
133 	 */
134 #ifdef CONFIG_X86_32
135 	if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
136 		pcpu_chosen_fc = PCPU_FC_PAGE;
137 #endif
138 	rc = -EINVAL;
139 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
140 		const size_t dyn_size = PERCPU_MODULE_RESERVE +
141 			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
142 		size_t atom_size;
143 
144 		/*
145 		 * On 64bit, use PMD_SIZE for atom_size so that embedded
146 		 * percpu areas are aligned to PMD.  This, in the future,
147 		 * can also allow using PMD mappings in vmalloc area.  Use
148 		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
149 		 * and large vmalloc area allocs can easily fail.
150 		 */
151 #ifdef CONFIG_X86_64
152 		atom_size = PMD_SIZE;
153 #else
154 		atom_size = PAGE_SIZE;
155 #endif
156 		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
157 					    dyn_size, atom_size,
158 					    pcpu_cpu_distance,
159 					    pcpu_cpu_to_node);
160 		if (rc < 0)
161 			pr_warn("%s allocator failed (%d), falling back to page size\n",
162 				pcpu_fc_names[pcpu_chosen_fc], rc);
163 	}
164 	if (rc < 0)
165 		rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
166 					   pcpu_cpu_to_node);
167 	if (rc < 0)
168 		panic("cannot initialize percpu area (err=%d)", rc);
169 
170 	/* alrighty, percpu areas up and running */
171 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
172 	for_each_possible_cpu(cpu) {
173 		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
174 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
175 		per_cpu(cpu_number, cpu) = cpu;
176 		setup_percpu_segment(cpu);
177 		/*
178 		 * Copy data used in early init routines from the
179 		 * initial arrays to the per cpu data areas.  These
180 		 * arrays then become expendable and the *_early_ptr's
181 		 * are zeroed indicating that the static arrays are
182 		 * gone.
183 		 */
184 #ifdef CONFIG_X86_LOCAL_APIC
185 		per_cpu(x86_cpu_to_apicid, cpu) =
186 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
187 		per_cpu(x86_bios_cpu_apicid, cpu) =
188 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
189 		per_cpu(x86_cpu_to_acpiid, cpu) =
190 			early_per_cpu_map(x86_cpu_to_acpiid, cpu);
191 #endif
192 #ifdef CONFIG_X86_32
193 		per_cpu(x86_cpu_to_logical_apicid, cpu) =
194 			early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
195 #endif
196 #ifdef CONFIG_NUMA
197 		per_cpu(x86_cpu_to_node_map, cpu) =
198 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
199 		/*
200 		 * Ensure that the boot cpu numa_node is correct when the boot
201 		 * cpu is on a node that doesn't have memory installed.
202 		 * Also cpu_up() will call cpu_to_node() for APs when
203 		 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
204 		 * up later with c_init aka intel_init/amd_init.
205 		 * So set them all (boot cpu and all APs).
206 		 */
207 		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
208 #endif
209 		/*
210 		 * Up to this point, the boot CPU has been using .init.data
211 		 * area.  Reload any changed state for the boot CPU.
212 		 */
213 		if (!cpu)
214 			switch_to_new_gdt(cpu);
215 	}
216 
217 	/* indicate the early static arrays will soon be gone */
218 #ifdef CONFIG_X86_LOCAL_APIC
219 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
220 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
221 	early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
222 #endif
223 #ifdef CONFIG_X86_32
224 	early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
225 #endif
226 #ifdef CONFIG_NUMA
227 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
228 #endif
229 
230 	/* Setup node to cpumask map */
231 	setup_node_to_cpumask_map();
232 
233 	/* Setup cpu initialized, callin, callout masks */
234 	setup_cpu_local_masks();
235 
236 	/*
237 	 * Sync back kernel address range again.  We already did this in
238 	 * setup_arch(), but percpu data also needs to be available in
239 	 * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
240 	 * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
241 	 * there too.
242 	 *
243 	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
244 	 * this call?
245 	 */
246 	sync_initial_page_table();
247 }
248