xref: /linux/arch/x86/kernel/cpu/topology.c (revision 8098428c541212e9835c1771ee90caa968ffef4f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/cpu.h>
4 
5 #include <xen/xen.h>
6 
7 #include <asm/apic.h>
8 #include <asm/mpspec.h>
9 #include <asm/smp.h>
10 
11 /*
12  * Map cpu index to physical APIC ID
13  */
14 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid, BAD_APICID);
15 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, CPU_ACPIID_INVALID);
16 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
17 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
18 
19 /* Bitmap of physically present CPUs. */
20 DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC) __read_mostly;
21 
22 /* Used for CPU number allocation and parallel CPU bringup */
23 u32 cpuid_to_apicid[] __read_mostly = { [0 ... NR_CPUS - 1] = BAD_APICID, };
24 
25 /*
26  * Processor to be disabled specified by kernel parameter
27  * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
28  * avoid undefined behaviour caused by sending INIT from AP to BSP.
29  */
30 static u32 disabled_cpu_apicid __ro_after_init = BAD_APICID;
31 
32 unsigned int num_processors;
33 unsigned disabled_cpus;
34 
35 /*
36  * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
37  * contiguously, it equals to current allocated max logical CPU ID plus 1.
38  * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
39  * so the maximum of nr_logical_cpuids is nr_cpu_ids.
40  *
41  * NOTE: Reserve 0 for BSP.
42  */
43 static int nr_logical_cpuids = 1;
44 
45 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
46 {
47 	return phys_id == (u64)cpuid_to_apicid[cpu];
48 }
49 
50 #ifdef CONFIG_SMP
51 static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid)
52 {
53 	/* Isolate the SMT bit(s) in the APICID and check for 0 */
54 	u32 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
55 
56 	if (smp_num_siblings == 1 || !(apicid & mask))
57 		cpumask_set_cpu(cpu, &__cpu_primary_thread_mask);
58 }
59 
60 /*
61  * Due to the utter mess of CPUID evaluation smp_num_siblings is not valid
62  * during early boot. Initialize the primary thread mask before SMP
63  * bringup.
64  */
65 static int __init smp_init_primary_thread_mask(void)
66 {
67 	unsigned int cpu;
68 
69 	/*
70 	 * XEN/PV provides either none or useless topology information.
71 	 * Pretend that all vCPUs are primary threads.
72 	 */
73 	if (xen_pv_domain()) {
74 		cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
75 		return 0;
76 	}
77 
78 	for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
79 		cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
80 	return 0;
81 }
82 early_initcall(smp_init_primary_thread_mask);
83 #else
84 static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { }
85 #endif
86 
87 static int topo_lookup_cpuid(u32 apic_id)
88 {
89 	int i;
90 
91 	/* CPU# to APICID mapping is persistent once it is established */
92 	for (i = 0; i < nr_logical_cpuids; i++) {
93 		if (cpuid_to_apicid[i] == apic_id)
94 			return i;
95 	}
96 	return -ENODEV;
97 }
98 
99 /*
100  * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
101  * and cpuid_to_apicid[] synchronized.
102  */
103 static int allocate_logical_cpuid(u32 apic_id)
104 {
105 	int cpu = topo_lookup_cpuid(apic_id);
106 
107 	if (cpu >= 0)
108 		return cpu;
109 
110 	/* Allocate a new cpuid. */
111 	if (nr_logical_cpuids >= nr_cpu_ids) {
112 		WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
113 			     "Processor %d/0x%x and the rest are ignored.\n",
114 			     nr_cpu_ids, nr_logical_cpuids, apic_id);
115 		return -EINVAL;
116 	}
117 
118 	cpuid_to_apicid[nr_logical_cpuids] = apic_id;
119 	return nr_logical_cpuids++;
120 }
121 
122 static void cpu_update_apic(int cpu, u32 apicid)
123 {
124 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
125 	early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
126 #endif
127 	set_cpu_possible(cpu, true);
128 	set_bit(apicid, phys_cpu_present_map);
129 	set_cpu_present(cpu, true);
130 	num_processors++;
131 
132 	if (system_state != SYSTEM_BOOTING)
133 		cpu_mark_primary_thread(cpu, apicid);
134 }
135 
136 static int generic_processor_info(int apicid)
137 {
138 	int cpu, max = nr_cpu_ids;
139 
140 	/* The boot CPU must be set before MADT/MPTABLE parsing happens */
141 	if (cpuid_to_apicid[0] == BAD_APICID)
142 		panic("Boot CPU APIC not registered yet\n");
143 
144 	if (apicid == boot_cpu_physical_apicid)
145 		return 0;
146 
147 	if (disabled_cpu_apicid == apicid) {
148 		int thiscpu = num_processors + disabled_cpus;
149 
150 		pr_warn("APIC: Disabling requested cpu. Processor %d/0x%x ignored.\n",
151 			thiscpu, apicid);
152 
153 		disabled_cpus++;
154 		return -ENODEV;
155 	}
156 
157 	if (num_processors >= nr_cpu_ids) {
158 		int thiscpu = max + disabled_cpus;
159 
160 		pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
161 			"Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
162 
163 		disabled_cpus++;
164 		return -EINVAL;
165 	}
166 
167 	cpu = allocate_logical_cpuid(apicid);
168 	if (cpu < 0) {
169 		disabled_cpus++;
170 		return -EINVAL;
171 	}
172 
173 	cpu_update_apic(cpu, apicid);
174 	return cpu;
175 }
176 
177 /**
178  * topology_register_apic - Register an APIC in early topology maps
179  * @apic_id:	The APIC ID to set up
180  * @acpi_id:	The ACPI ID associated to the APIC
181  * @present:	True if the corresponding CPU is present
182  */
183 void __init topology_register_apic(u32 apic_id, u32 acpi_id, bool present)
184 {
185 	int cpu;
186 
187 	if (apic_id >= MAX_LOCAL_APIC) {
188 		pr_err_once("APIC ID %x exceeds kernel limit of: %x\n", apic_id, MAX_LOCAL_APIC - 1);
189 		return;
190 	}
191 
192 	if (!present) {
193 		disabled_cpus++;
194 		return;
195 	}
196 
197 	cpu = generic_processor_info(apic_id);
198 	if (cpu >= 0)
199 		early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
200 }
201 
202 /**
203  * topology_register_boot_apic - Register the boot CPU APIC
204  * @apic_id:	The APIC ID to set up
205  *
206  * Separate so CPU #0 can be assigned
207  */
208 void __init topology_register_boot_apic(u32 apic_id)
209 {
210 	cpuid_to_apicid[0] = apic_id;
211 	cpu_update_apic(0, apic_id);
212 }
213 
214 #ifdef CONFIG_ACPI_HOTPLUG_CPU
215 /**
216  * topology_hotplug_apic - Handle a physical hotplugged APIC after boot
217  * @apic_id:	The APIC ID to set up
218  * @acpi_id:	The ACPI ID associated to the APIC
219  */
220 int topology_hotplug_apic(u32 apic_id, u32 acpi_id)
221 {
222 	int cpu;
223 
224 	if (apic_id >= MAX_LOCAL_APIC)
225 		return -EINVAL;
226 
227 	cpu = topo_lookup_cpuid(apic_id);
228 	if (cpu < 0) {
229 		cpu = generic_processor_info(apic_id);
230 		if (cpu >= 0)
231 			per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
232 	}
233 	return cpu;
234 }
235 
236 /**
237  * topology_hotunplug_apic - Remove a physical hotplugged APIC after boot
238  * @cpu:	The CPU number for which the APIC ID is removed
239  */
240 void topology_hotunplug_apic(unsigned int cpu)
241 {
242 	u32 apic_id = cpuid_to_apicid[cpu];
243 
244 	if (apic_id == BAD_APICID)
245 		return;
246 
247 	per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
248 	clear_bit(apic_id, phys_cpu_present_map);
249 	set_cpu_present(cpu, false);
250 	num_processors--;
251 }
252 #endif
253 
254 static int __init apic_set_disabled_cpu_apicid(char *arg)
255 {
256 	if (!arg || !get_option(&arg, &disabled_cpu_apicid))
257 		return -EINVAL;
258 
259 	return 0;
260 }
261 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
262