xref: /linux/arch/x86/kernel/apic/x2apic_savic.c (revision ea7d792e11e10f502933c39f3836cb73d35dac36)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure AVIC Support (SEV-SNP Guests)
4  *
5  * Copyright (C) 2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com>
8  */
9 
10 #include <linux/cc_platform.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu-defs.h>
13 #include <linux/align.h>
14 
15 #include <asm/apic.h>
16 #include <asm/sev.h>
17 
18 #include "local.h"
19 
20 struct secure_avic_page {
21 	u8 regs[PAGE_SIZE];
22 } __aligned(PAGE_SIZE);
23 
24 static struct secure_avic_page __percpu *savic_page __ro_after_init;
25 
26 static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
27 {
28 	return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC);
29 }
30 
31 static inline void *get_reg_bitmap(unsigned int cpu, unsigned int offset)
32 {
33 	return &per_cpu_ptr(savic_page, cpu)->regs[offset];
34 }
35 
36 static inline void update_vector(unsigned int cpu, unsigned int offset,
37 				 unsigned int vector, bool set)
38 {
39 	void *bitmap = get_reg_bitmap(cpu, offset);
40 
41 	if (set)
42 		apic_set_vector(vector, bitmap);
43 	else
44 		apic_clear_vector(vector, bitmap);
45 }
46 
47 #define SAVIC_ALLOWED_IRR	0x204
48 
49 /*
50  * When Secure AVIC is enabled, RDMSR/WRMSR of the APIC registers
51  * result in #VC exception (for non-accelerated register accesses)
52  * with VMEXIT_AVIC_NOACCEL error code. The #VC exception handler
53  * can read/write the x2APIC register in the guest APIC backing page.
54  *
55  * Since doing this would increase the latency of accessing x2APIC
56  * registers, instead of doing RDMSR/WRMSR based accesses and
57  * handling the APIC register reads/writes in the #VC exception handler,
58  * the read() and write() callbacks directly read/write the APIC register
59  * from/to the vCPU's APIC backing page.
60  */
61 static u32 savic_read(u32 reg)
62 {
63 	void *ap = this_cpu_ptr(savic_page);
64 
65 	switch (reg) {
66 	case APIC_LVTT:
67 	case APIC_TMICT:
68 	case APIC_TMCCT:
69 	case APIC_TDCR:
70 		return savic_ghcb_msr_read(reg);
71 	case APIC_ID:
72 	case APIC_LVR:
73 	case APIC_TASKPRI:
74 	case APIC_ARBPRI:
75 	case APIC_PROCPRI:
76 	case APIC_LDR:
77 	case APIC_SPIV:
78 	case APIC_ESR:
79 	case APIC_LVTTHMR:
80 	case APIC_LVTPC:
81 	case APIC_LVT0:
82 	case APIC_LVT1:
83 	case APIC_LVTERR:
84 	case APIC_EFEAT:
85 	case APIC_ECTRL:
86 	case APIC_SEOI:
87 	case APIC_IER:
88 	case APIC_EILVTn(0) ... APIC_EILVTn(3):
89 		return apic_get_reg(ap, reg);
90 	case APIC_ICR:
91 		return (u32)apic_get_reg64(ap, reg);
92 	case APIC_ISR ... APIC_ISR + 0x70:
93 	case APIC_TMR ... APIC_TMR + 0x70:
94 		if (WARN_ONCE(!IS_ALIGNED(reg, 16),
95 			      "APIC register read offset 0x%x not aligned at 16 bytes", reg))
96 			return 0;
97 		return apic_get_reg(ap, reg);
98 	/* IRR and ALLOWED_IRR offset range */
99 	case APIC_IRR ... APIC_IRR + 0x74:
100 		/*
101 		 * Valid APIC_IRR/SAVIC_ALLOWED_IRR registers are at 16 bytes strides from
102 		 * their respective base offset. APIC_IRRs are in the range
103 		 *
104 		 * (0x200, 0x210,  ..., 0x270)
105 		 *
106 		 * while the SAVIC_ALLOWED_IRR range starts 4 bytes later, in the range
107 		 *
108 		 * (0x204, 0x214, ..., 0x274).
109 		 *
110 		 * Filter out everything else.
111 		 */
112 		if (WARN_ONCE(!(IS_ALIGNED(reg, 16) ||
113 				IS_ALIGNED(reg - 4, 16)),
114 			      "Misaligned APIC_IRR/ALLOWED_IRR APIC register read offset 0x%x", reg))
115 			return 0;
116 		return apic_get_reg(ap, reg);
117 	default:
118 		pr_err("Error reading unknown Secure AVIC reg offset 0x%x\n", reg);
119 		return 0;
120 	}
121 }
122 
123 #define SAVIC_NMI_REQ		0x278
124 
125 /*
126  * On WRMSR to APIC_SELF_IPI register by the guest, Secure AVIC hardware
127  * updates the APIC_IRR in the APIC backing page of the vCPU. In addition,
128  * hardware evaluates the new APIC_IRR update for interrupt injection to
129  * the vCPU. So, self IPIs are hardware-accelerated.
130  */
131 static inline void self_ipi_reg_write(unsigned int vector)
132 {
133 	native_apic_msr_write(APIC_SELF_IPI, vector);
134 }
135 
136 static void send_ipi_dest(unsigned int cpu, unsigned int vector)
137 {
138 	update_vector(cpu, APIC_IRR, vector, true);
139 }
140 
141 static void send_ipi_allbut(unsigned int vector)
142 {
143 	unsigned int cpu, src_cpu;
144 
145 	guard(irqsave)();
146 
147 	src_cpu = raw_smp_processor_id();
148 
149 	for_each_cpu(cpu, cpu_online_mask) {
150 		if (cpu == src_cpu)
151 			continue;
152 		send_ipi_dest(cpu, vector);
153 	}
154 }
155 
156 static inline void self_ipi(unsigned int vector)
157 {
158 	u32 icr_low = APIC_SELF_IPI | vector;
159 
160 	native_x2apic_icr_write(icr_low, 0);
161 }
162 
163 static void savic_icr_write(u32 icr_low, u32 icr_high)
164 {
165 	unsigned int dsh, vector;
166 	u64 icr_data;
167 
168 	dsh = icr_low & APIC_DEST_ALLBUT;
169 	vector = icr_low & APIC_VECTOR_MASK;
170 
171 	switch (dsh) {
172 	case APIC_DEST_SELF:
173 		self_ipi(vector);
174 		break;
175 	case APIC_DEST_ALLINC:
176 		self_ipi(vector);
177 		fallthrough;
178 	case APIC_DEST_ALLBUT:
179 		send_ipi_allbut(vector);
180 		break;
181 	default:
182 		send_ipi_dest(icr_high, vector);
183 		break;
184 	}
185 
186 	icr_data = ((u64)icr_high) << 32 | icr_low;
187 	if (dsh != APIC_DEST_SELF)
188 		savic_ghcb_msr_write(APIC_ICR, icr_data);
189 	apic_set_reg64(this_cpu_ptr(savic_page), APIC_ICR, icr_data);
190 }
191 
192 static void savic_write(u32 reg, u32 data)
193 {
194 	void *ap = this_cpu_ptr(savic_page);
195 
196 	switch (reg) {
197 	case APIC_LVTT:
198 	case APIC_TMICT:
199 	case APIC_TDCR:
200 		savic_ghcb_msr_write(reg, data);
201 		break;
202 	case APIC_LVT0:
203 	case APIC_LVT1:
204 	case APIC_TASKPRI:
205 	case APIC_EOI:
206 	case APIC_SPIV:
207 	case SAVIC_NMI_REQ:
208 	case APIC_ESR:
209 	case APIC_LVTTHMR:
210 	case APIC_LVTPC:
211 	case APIC_LVTERR:
212 	case APIC_ECTRL:
213 	case APIC_SEOI:
214 	case APIC_IER:
215 	case APIC_EILVTn(0) ... APIC_EILVTn(3):
216 		apic_set_reg(ap, reg, data);
217 		break;
218 	case APIC_ICR:
219 		savic_icr_write(data, 0);
220 		break;
221 	case APIC_SELF_IPI:
222 		self_ipi_reg_write(data);
223 		break;
224 	/* ALLOWED_IRR offsets are writable */
225 	case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70:
226 		if (IS_ALIGNED(reg - 4, 16)) {
227 			apic_set_reg(ap, reg, data);
228 			break;
229 		}
230 		fallthrough;
231 	default:
232 		pr_err("Error writing unknown Secure AVIC reg offset 0x%x\n", reg);
233 	}
234 }
235 
236 static void send_ipi(u32 dest, unsigned int vector, unsigned int dsh)
237 {
238 	unsigned int icr_low;
239 
240 	icr_low = __prepare_ICR(dsh, vector, APIC_DEST_PHYSICAL);
241 	savic_icr_write(icr_low, dest);
242 }
243 
244 static void savic_send_ipi(int cpu, int vector)
245 {
246 	u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
247 
248 	send_ipi(dest, vector, 0);
249 }
250 
251 static void send_ipi_mask(const struct cpumask *mask, unsigned int vector, bool excl_self)
252 {
253 	unsigned int cpu, this_cpu;
254 
255 	guard(irqsave)();
256 
257 	this_cpu = raw_smp_processor_id();
258 
259 	for_each_cpu(cpu, mask) {
260 		if (excl_self && cpu == this_cpu)
261 			continue;
262 		send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
263 	}
264 }
265 
266 static void savic_send_ipi_mask(const struct cpumask *mask, int vector)
267 {
268 	send_ipi_mask(mask, vector, false);
269 }
270 
271 static void savic_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
272 {
273 	send_ipi_mask(mask, vector, true);
274 }
275 
276 static void savic_send_ipi_allbutself(int vector)
277 {
278 	send_ipi(0, vector, APIC_DEST_ALLBUT);
279 }
280 
281 static void savic_send_ipi_all(int vector)
282 {
283 	send_ipi(0, vector, APIC_DEST_ALLINC);
284 }
285 
286 static void savic_send_ipi_self(int vector)
287 {
288 	self_ipi_reg_write(vector);
289 }
290 
291 static void savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
292 {
293 	update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set);
294 }
295 
296 static void savic_setup(void)
297 {
298 	void *ap = this_cpu_ptr(savic_page);
299 	enum es_result res;
300 	unsigned long gpa;
301 
302 	/*
303 	 * Before Secure AVIC is enabled, APIC MSR reads are intercepted.
304 	 * APIC_ID MSR read returns the value from the hypervisor.
305 	 */
306 	apic_set_reg(ap, APIC_ID, native_apic_msr_read(APIC_ID));
307 
308 	gpa = __pa(ap);
309 
310 	/*
311 	 * The NPT entry for a vCPU's APIC backing page must always be
312 	 * present when the vCPU is running in order for Secure AVIC to
313 	 * function. A VMEXIT_BUSY is returned on VMRUN and the vCPU cannot
314 	 * be resumed if the NPT entry for the APIC backing page is not
315 	 * present. Notify GPA of the vCPU's APIC backing page to the
316 	 * hypervisor by calling savic_register_gpa(). Before executing
317 	 * VMRUN, the hypervisor makes use of this information to make sure
318 	 * the APIC backing page is mapped in NPT.
319 	 */
320 	res = savic_register_gpa(gpa);
321 	if (res != ES_OK)
322 		snp_abort();
323 }
324 
325 static int savic_probe(void)
326 {
327 	if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
328 		return 0;
329 
330 	if (!x2apic_mode) {
331 		pr_err("Secure AVIC enabled in non x2APIC mode\n");
332 		snp_abort();
333 		/* unreachable */
334 	}
335 
336 	savic_page = alloc_percpu(struct secure_avic_page);
337 	if (!savic_page)
338 		snp_abort();
339 
340 	return 1;
341 }
342 
343 static struct apic apic_x2apic_savic __ro_after_init = {
344 
345 	.name				= "secure avic x2apic",
346 	.probe				= savic_probe,
347 	.acpi_madt_oem_check		= savic_acpi_madt_oem_check,
348 	.setup				= savic_setup,
349 
350 	.dest_mode_logical		= false,
351 
352 	.disable_esr			= 0,
353 
354 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
355 
356 	.max_apic_id			= UINT_MAX,
357 	.x2apic_set_max_apicid		= true,
358 	.get_apic_id			= x2apic_get_apic_id,
359 
360 	.calc_dest_apicid		= apic_default_calc_apicid,
361 
362 	.send_IPI			= savic_send_ipi,
363 	.send_IPI_mask			= savic_send_ipi_mask,
364 	.send_IPI_mask_allbutself	= savic_send_ipi_mask_allbutself,
365 	.send_IPI_allbutself		= savic_send_ipi_allbutself,
366 	.send_IPI_all			= savic_send_ipi_all,
367 	.send_IPI_self			= savic_send_ipi_self,
368 
369 	.nmi_to_offline_cpu		= true,
370 
371 	.read				= savic_read,
372 	.write				= savic_write,
373 	.eoi				= native_apic_msr_eoi,
374 	.icr_read			= native_x2apic_icr_read,
375 	.icr_write			= savic_icr_write,
376 
377 	.update_vector			= savic_update_vector,
378 };
379 
380 apic_driver(apic_x2apic_savic);
381