xref: /linux/arch/x86/kernel/cpu/topology.c (revision 58aa34abe9954cd5dfbf322fc612146c5f45e52b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/cpu.h>
4 
5 #include <xen/xen.h>
6 
7 #include <asm/apic.h>
8 #include <asm/mpspec.h>
9 #include <asm/smp.h>
10 
11 /*
12  * Map cpu index to physical APIC ID
13  */
14 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid, BAD_APICID);
15 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, CPU_ACPIID_INVALID);
16 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
17 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
18 
19 /* Bitmap of physically present CPUs. */
20 DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC) __read_mostly;
21 
22 /* Used for CPU number allocation and parallel CPU bringup */
23 u32 cpuid_to_apicid[] __read_mostly = { [0 ... NR_CPUS - 1] = BAD_APICID, };
24 
25 /*
26  * Processor to be disabled specified by kernel parameter
27  * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
28  * avoid undefined behaviour caused by sending INIT from AP to BSP.
29  */
30 static u32 disabled_cpu_apicid __ro_after_init = BAD_APICID;
31 
32 static unsigned int num_processors;
33 static unsigned int disabled_cpus;
34 
35 /*
36  * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
37  * contiguously, it equals to current allocated max logical CPU ID plus 1.
38  * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
39  * so the maximum of nr_logical_cpuids is nr_cpu_ids.
40  *
41  * NOTE: Reserve 0 for BSP.
42  */
43 static int nr_logical_cpuids = 1;
44 
45 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
46 {
47 	return phys_id == (u64)cpuid_to_apicid[cpu];
48 }
49 
50 #ifdef CONFIG_SMP
51 static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid)
52 {
53 	/* Isolate the SMT bit(s) in the APICID and check for 0 */
54 	u32 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
55 
56 	if (smp_num_siblings == 1 || !(apicid & mask))
57 		cpumask_set_cpu(cpu, &__cpu_primary_thread_mask);
58 }
59 
60 /*
61  * Due to the utter mess of CPUID evaluation smp_num_siblings is not valid
62  * during early boot. Initialize the primary thread mask before SMP
63  * bringup.
64  */
65 static int __init smp_init_primary_thread_mask(void)
66 {
67 	unsigned int cpu;
68 
69 	/*
70 	 * XEN/PV provides either none or useless topology information.
71 	 * Pretend that all vCPUs are primary threads.
72 	 */
73 	if (xen_pv_domain()) {
74 		cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
75 		return 0;
76 	}
77 
78 	for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
79 		cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
80 	return 0;
81 }
82 early_initcall(smp_init_primary_thread_mask);
83 #else
84 static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { }
85 #endif
86 
87 static int topo_lookup_cpuid(u32 apic_id)
88 {
89 	int i;
90 
91 	/* CPU# to APICID mapping is persistent once it is established */
92 	for (i = 0; i < nr_logical_cpuids; i++) {
93 		if (cpuid_to_apicid[i] == apic_id)
94 			return i;
95 	}
96 	return -ENODEV;
97 }
98 
99 /*
100  * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
101  * and cpuid_to_apicid[] synchronized.
102  */
103 static int allocate_logical_cpuid(u32 apic_id)
104 {
105 	int cpu = topo_lookup_cpuid(apic_id);
106 
107 	if (cpu >= 0)
108 		return cpu;
109 
110 	/* Allocate a new cpuid. */
111 	if (nr_logical_cpuids >= nr_cpu_ids) {
112 		WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
113 			     "Processor %d/0x%x and the rest are ignored.\n",
114 			     nr_cpu_ids, nr_logical_cpuids, apic_id);
115 		return -EINVAL;
116 	}
117 
118 	cpuid_to_apicid[nr_logical_cpuids] = apic_id;
119 	return nr_logical_cpuids++;
120 }
121 
122 static void cpu_update_apic(int cpu, u32 apicid)
123 {
124 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
125 	early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
126 #endif
127 	set_cpu_possible(cpu, true);
128 	set_bit(apicid, phys_cpu_present_map);
129 	set_cpu_present(cpu, true);
130 	num_processors++;
131 
132 	if (system_state != SYSTEM_BOOTING)
133 		cpu_mark_primary_thread(cpu, apicid);
134 }
135 
136 static int generic_processor_info(int apicid)
137 {
138 	int cpu, max = nr_cpu_ids;
139 
140 	/* The boot CPU must be set before MADT/MPTABLE parsing happens */
141 	if (cpuid_to_apicid[0] == BAD_APICID)
142 		panic("Boot CPU APIC not registered yet\n");
143 
144 	if (apicid == boot_cpu_physical_apicid)
145 		return 0;
146 
147 	if (disabled_cpu_apicid == apicid) {
148 		int thiscpu = num_processors + disabled_cpus;
149 
150 		pr_warn("APIC: Disabling requested cpu. Processor %d/0x%x ignored.\n",
151 			thiscpu, apicid);
152 
153 		disabled_cpus++;
154 		return -ENODEV;
155 	}
156 
157 	if (num_processors >= nr_cpu_ids) {
158 		int thiscpu = max + disabled_cpus;
159 
160 		pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
161 			"Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
162 
163 		disabled_cpus++;
164 		return -EINVAL;
165 	}
166 
167 	cpu = allocate_logical_cpuid(apicid);
168 	if (cpu < 0) {
169 		disabled_cpus++;
170 		return -EINVAL;
171 	}
172 
173 	cpu_update_apic(cpu, apicid);
174 	return cpu;
175 }
176 
177 static int __initdata setup_possible_cpus = -1;
178 
179 /*
180  * cpu_possible_mask should be static, it cannot change as cpu's
181  * are onlined, or offlined. The reason is per-cpu data-structures
182  * are allocated by some modules at init time, and don't expect to
183  * do this dynamically on cpu arrival/departure.
184  * cpu_present_mask on the other hand can change dynamically.
185  * In case when cpu_hotplug is not compiled, then we resort to current
186  * behaviour, which is cpu_possible == cpu_present.
187  * - Ashok Raj
188  *
189  * Three ways to find out the number of additional hotplug CPUs:
190  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
191  * - The user can overwrite it with possible_cpus=NUM
192  * - Otherwise don't reserve additional CPUs.
193  * We do this because additional CPUs waste a lot of memory.
194  * -AK
195  */
196 __init void prefill_possible_map(void)
197 {
198 	int i, possible;
199 
200 	i = setup_max_cpus ?: 1;
201 	if (setup_possible_cpus == -1) {
202 		possible = num_processors;
203 #ifdef CONFIG_HOTPLUG_CPU
204 		if (setup_max_cpus)
205 			possible += disabled_cpus;
206 #else
207 		if (possible > i)
208 			possible = i;
209 #endif
210 	} else
211 		possible = setup_possible_cpus;
212 
213 	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
214 
215 	/* nr_cpu_ids could be reduced via nr_cpus= */
216 	if (possible > nr_cpu_ids) {
217 		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
218 			possible, nr_cpu_ids);
219 		possible = nr_cpu_ids;
220 	}
221 
222 #ifdef CONFIG_HOTPLUG_CPU
223 	if (!setup_max_cpus)
224 #endif
225 	if (possible > i) {
226 		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
227 			possible, setup_max_cpus);
228 		possible = i;
229 	}
230 
231 	set_nr_cpu_ids(possible);
232 
233 	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
234 		possible, max_t(int, possible - num_processors, 0));
235 
236 	reset_cpu_possible_mask();
237 
238 	for (i = 0; i < possible; i++)
239 		set_cpu_possible(i, true);
240 }
241 
242 /**
243  * topology_register_apic - Register an APIC in early topology maps
244  * @apic_id:	The APIC ID to set up
245  * @acpi_id:	The ACPI ID associated to the APIC
246  * @present:	True if the corresponding CPU is present
247  */
248 void __init topology_register_apic(u32 apic_id, u32 acpi_id, bool present)
249 {
250 	int cpu;
251 
252 	if (apic_id >= MAX_LOCAL_APIC) {
253 		pr_err_once("APIC ID %x exceeds kernel limit of: %x\n", apic_id, MAX_LOCAL_APIC - 1);
254 		return;
255 	}
256 
257 	if (!present) {
258 		disabled_cpus++;
259 		return;
260 	}
261 
262 	cpu = generic_processor_info(apic_id);
263 	if (cpu >= 0)
264 		early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
265 }
266 
267 /**
268  * topology_register_boot_apic - Register the boot CPU APIC
269  * @apic_id:	The APIC ID to set up
270  *
271  * Separate so CPU #0 can be assigned
272  */
273 void __init topology_register_boot_apic(u32 apic_id)
274 {
275 	cpuid_to_apicid[0] = apic_id;
276 	cpu_update_apic(0, apic_id);
277 }
278 
279 #ifdef CONFIG_ACPI_HOTPLUG_CPU
280 /**
281  * topology_hotplug_apic - Handle a physical hotplugged APIC after boot
282  * @apic_id:	The APIC ID to set up
283  * @acpi_id:	The ACPI ID associated to the APIC
284  */
285 int topology_hotplug_apic(u32 apic_id, u32 acpi_id)
286 {
287 	int cpu;
288 
289 	if (apic_id >= MAX_LOCAL_APIC)
290 		return -EINVAL;
291 
292 	cpu = topo_lookup_cpuid(apic_id);
293 	if (cpu < 0) {
294 		cpu = generic_processor_info(apic_id);
295 		if (cpu >= 0)
296 			per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
297 	}
298 	return cpu;
299 }
300 
301 /**
302  * topology_hotunplug_apic - Remove a physical hotplugged APIC after boot
303  * @cpu:	The CPU number for which the APIC ID is removed
304  */
305 void topology_hotunplug_apic(unsigned int cpu)
306 {
307 	u32 apic_id = cpuid_to_apicid[cpu];
308 
309 	if (apic_id == BAD_APICID)
310 		return;
311 
312 	per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
313 	clear_bit(apic_id, phys_cpu_present_map);
314 	set_cpu_present(cpu, false);
315 	num_processors--;
316 }
317 #endif
318 
319 static int __init _setup_possible_cpus(char *str)
320 {
321 	get_option(&str, &setup_possible_cpus);
322 	return 0;
323 }
324 early_param("possible_cpus", _setup_possible_cpus);
325 
326 static int __init apic_set_disabled_cpu_apicid(char *arg)
327 {
328 	if (!arg || !get_option(&arg, &disabled_cpu_apicid))
329 		return -EINVAL;
330 
331 	return 0;
332 }
333 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
334