xref: /linux/arch/x86/kernel/apic/apic_flat_64.c (revision e6f2a617ac53bc0753b885ffb94379ff48b2e2df)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2004 James Cleverdon, IBM.
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/cpumask.h>
12 #include <linux/export.h>
13 #include <linux/acpi.h>
14 
15 #include <asm/jailhouse_para.h>
16 #include <asm/apic.h>
17 
18 #include "local.h"
19 
20 static struct apic apic_physflat;
21 static struct apic apic_flat;
22 
23 struct apic *apic __ro_after_init = &apic_flat;
24 EXPORT_SYMBOL_GPL(apic);
25 
26 static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
27 {
28 	return 1;
29 }
30 
31 /*
32  * Set up the logical destination ID.
33  *
34  * Intel recommends to set DFR, LDR and TPR before enabling
35  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
36  * document number 292116).  So here it goes...
37  */
38 void flat_init_apic_ldr(void)
39 {
40 	unsigned long val;
41 	unsigned long num, id;
42 
43 	num = smp_processor_id();
44 	id = 1UL << num;
45 	apic_write(APIC_DFR, APIC_DFR_FLAT);
46 	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
47 	val |= SET_APIC_LOGICAL_ID(id);
48 	apic_write(APIC_LDR, val);
49 }
50 
51 static void _flat_send_IPI_mask(unsigned long mask, int vector)
52 {
53 	unsigned long flags;
54 
55 	local_irq_save(flags);
56 	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
57 	local_irq_restore(flags);
58 }
59 
60 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
61 {
62 	unsigned long mask = cpumask_bits(cpumask)[0];
63 
64 	_flat_send_IPI_mask(mask, vector);
65 }
66 
67 static void
68 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
69 {
70 	unsigned long mask = cpumask_bits(cpumask)[0];
71 	int cpu = smp_processor_id();
72 
73 	if (cpu < BITS_PER_LONG)
74 		__clear_bit(cpu, &mask);
75 
76 	_flat_send_IPI_mask(mask, vector);
77 }
78 
79 static unsigned int flat_get_apic_id(unsigned long x)
80 {
81 	return (x >> 24) & 0xFF;
82 }
83 
84 static u32 set_apic_id(unsigned int id)
85 {
86 	return (id & 0xFF) << 24;
87 }
88 
89 static unsigned int read_xapic_id(void)
90 {
91 	return flat_get_apic_id(apic_read(APIC_ID));
92 }
93 
94 static int flat_apic_id_registered(void)
95 {
96 	return physid_isset(read_xapic_id(), phys_cpu_present_map);
97 }
98 
99 static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
100 {
101 	return initial_apic_id >> index_msb;
102 }
103 
104 static int flat_probe(void)
105 {
106 	return 1;
107 }
108 
109 static struct apic apic_flat __ro_after_init = {
110 	.name				= "flat",
111 	.probe				= flat_probe,
112 	.acpi_madt_oem_check		= flat_acpi_madt_oem_check,
113 	.apic_id_valid			= default_apic_id_valid,
114 	.apic_id_registered		= flat_apic_id_registered,
115 
116 	.irq_delivery_mode		= dest_Fixed,
117 	.irq_dest_mode			= 1, /* logical */
118 
119 	.disable_esr			= 0,
120 	.dest_logical			= APIC_DEST_LOGICAL,
121 	.check_apicid_used		= NULL,
122 
123 	.init_apic_ldr			= flat_init_apic_ldr,
124 
125 	.ioapic_phys_id_map		= NULL,
126 	.setup_apic_routing		= NULL,
127 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
128 	.apicid_to_cpu_present		= NULL,
129 	.check_phys_apicid_present	= default_check_phys_apicid_present,
130 	.phys_pkg_id			= flat_phys_pkg_id,
131 
132 	.get_apic_id			= flat_get_apic_id,
133 	.set_apic_id			= set_apic_id,
134 
135 	.calc_dest_apicid		= apic_flat_calc_apicid,
136 
137 	.send_IPI			= default_send_IPI_single,
138 	.send_IPI_mask			= flat_send_IPI_mask,
139 	.send_IPI_mask_allbutself	= flat_send_IPI_mask_allbutself,
140 	.send_IPI_allbutself		= default_send_IPI_allbutself,
141 	.send_IPI_all			= default_send_IPI_all,
142 	.send_IPI_self			= default_send_IPI_self,
143 
144 	.inquire_remote_apic		= default_inquire_remote_apic,
145 
146 	.read				= native_apic_mem_read,
147 	.write				= native_apic_mem_write,
148 	.eoi_write			= native_apic_mem_write,
149 	.icr_read			= native_apic_icr_read,
150 	.icr_write			= native_apic_icr_write,
151 	.wait_icr_idle			= native_apic_wait_icr_idle,
152 	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
153 };
154 
155 /*
156  * Physflat mode is used when there are more than 8 CPUs on a system.
157  * We cannot use logical delivery in this case because the mask
158  * overflows, so use physical mode.
159  */
160 static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
161 {
162 #ifdef CONFIG_ACPI
163 	/*
164 	 * Quirk: some x86_64 machines can only use physical APIC mode
165 	 * regardless of how many processors are present (x86_64 ES7000
166 	 * is an example).
167 	 */
168 	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
169 		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
170 		printk(KERN_DEBUG "system APIC only can use physical flat");
171 		return 1;
172 	}
173 
174 	if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
175 		printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
176 		return 1;
177 	}
178 #endif
179 
180 	return 0;
181 }
182 
183 static void physflat_init_apic_ldr(void)
184 {
185 	/*
186 	 * LDR and DFR are not involved in physflat mode, rather:
187 	 * "In physical destination mode, the destination processor is
188 	 * specified by its local APIC ID [...]." (Intel SDM, 10.6.2.1)
189 	 */
190 }
191 
192 static int physflat_probe(void)
193 {
194 	if (apic == &apic_physflat || num_possible_cpus() > 8 ||
195 	    jailhouse_paravirt())
196 		return 1;
197 
198 	return 0;
199 }
200 
201 static struct apic apic_physflat __ro_after_init = {
202 
203 	.name				= "physical flat",
204 	.probe				= physflat_probe,
205 	.acpi_madt_oem_check		= physflat_acpi_madt_oem_check,
206 	.apic_id_valid			= default_apic_id_valid,
207 	.apic_id_registered		= flat_apic_id_registered,
208 
209 	.irq_delivery_mode		= dest_Fixed,
210 	.irq_dest_mode			= 0, /* physical */
211 
212 	.disable_esr			= 0,
213 	.dest_logical			= 0,
214 	.check_apicid_used		= NULL,
215 
216 	.init_apic_ldr			= physflat_init_apic_ldr,
217 
218 	.ioapic_phys_id_map		= NULL,
219 	.setup_apic_routing		= NULL,
220 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
221 	.apicid_to_cpu_present		= NULL,
222 	.check_phys_apicid_present	= default_check_phys_apicid_present,
223 	.phys_pkg_id			= flat_phys_pkg_id,
224 
225 	.get_apic_id			= flat_get_apic_id,
226 	.set_apic_id			= set_apic_id,
227 
228 	.calc_dest_apicid		= apic_default_calc_apicid,
229 
230 	.send_IPI			= default_send_IPI_single_phys,
231 	.send_IPI_mask			= default_send_IPI_mask_sequence_phys,
232 	.send_IPI_mask_allbutself	= default_send_IPI_mask_allbutself_phys,
233 	.send_IPI_allbutself		= default_send_IPI_allbutself,
234 	.send_IPI_all			= default_send_IPI_all,
235 	.send_IPI_self			= default_send_IPI_self,
236 
237 	.inquire_remote_apic		= default_inquire_remote_apic,
238 
239 	.read				= native_apic_mem_read,
240 	.write				= native_apic_mem_write,
241 	.eoi_write			= native_apic_mem_write,
242 	.icr_read			= native_apic_icr_read,
243 	.icr_write			= native_apic_icr_write,
244 	.wait_icr_idle			= native_apic_wait_icr_idle,
245 	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
246 };
247 
248 /*
249  * We need to check for physflat first, so this order is important.
250  */
251 apic_drivers(apic_physflat, apic_flat);
252