xref: /linux/arch/x86/kernel/apic/x2apic_savic.c (revision 869e36b9660dd72ab960b74c55d7a200c22588d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Secure AVIC Support (SEV-SNP Guests)
4  *
5  * Copyright (C) 2024 Advanced Micro Devices, Inc.
6  *
7  * Author: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com>
8  */
9 
10 #include <linux/cc_platform.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu-defs.h>
13 #include <linux/align.h>
14 
15 #include <asm/apic.h>
16 #include <asm/sev.h>
17 
18 #include "local.h"
19 
20 struct secure_avic_page {
21 	u8 regs[PAGE_SIZE];
22 } __aligned(PAGE_SIZE);
23 
24 static struct secure_avic_page __percpu *savic_page __ro_after_init;
25 
26 static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
27 {
28 	return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC);
29 }
30 
31 static inline void *get_reg_bitmap(unsigned int cpu, unsigned int offset)
32 {
33 	return &per_cpu_ptr(savic_page, cpu)->regs[offset];
34 }
35 
36 static inline void update_vector(unsigned int cpu, unsigned int offset,
37 				 unsigned int vector, bool set)
38 {
39 	void *bitmap = get_reg_bitmap(cpu, offset);
40 
41 	if (set)
42 		apic_set_vector(vector, bitmap);
43 	else
44 		apic_clear_vector(vector, bitmap);
45 }
46 
47 #define SAVIC_ALLOWED_IRR	0x204
48 
49 /*
50  * When Secure AVIC is enabled, RDMSR/WRMSR of the APIC registers
51  * result in #VC exception (for non-accelerated register accesses)
52  * with VMEXIT_AVIC_NOACCEL error code. The #VC exception handler
53  * can read/write the x2APIC register in the guest APIC backing page.
54  *
55  * Since doing this would increase the latency of accessing x2APIC
56  * registers, instead of doing RDMSR/WRMSR based accesses and
57  * handling the APIC register reads/writes in the #VC exception handler,
58  * the read() and write() callbacks directly read/write the APIC register
59  * from/to the vCPU's APIC backing page.
60  */
61 static u32 savic_read(u32 reg)
62 {
63 	void *ap = this_cpu_ptr(savic_page);
64 
65 	switch (reg) {
66 	case APIC_LVTT:
67 	case APIC_TMICT:
68 	case APIC_TMCCT:
69 	case APIC_TDCR:
70 		return savic_ghcb_msr_read(reg);
71 	case APIC_ID:
72 	case APIC_LVR:
73 	case APIC_TASKPRI:
74 	case APIC_ARBPRI:
75 	case APIC_PROCPRI:
76 	case APIC_LDR:
77 	case APIC_SPIV:
78 	case APIC_ESR:
79 	case APIC_LVTTHMR:
80 	case APIC_LVTPC:
81 	case APIC_LVT0:
82 	case APIC_LVT1:
83 	case APIC_LVTERR:
84 	case APIC_EFEAT:
85 	case APIC_ECTRL:
86 	case APIC_SEOI:
87 	case APIC_IER:
88 	case APIC_EILVTn(0) ... APIC_EILVTn(3):
89 		return apic_get_reg(ap, reg);
90 	case APIC_ICR:
91 		return (u32)apic_get_reg64(ap, reg);
92 	case APIC_ISR ... APIC_ISR + 0x70:
93 	case APIC_TMR ... APIC_TMR + 0x70:
94 		if (WARN_ONCE(!IS_ALIGNED(reg, 16),
95 			      "APIC register read offset 0x%x not aligned at 16 bytes", reg))
96 			return 0;
97 		return apic_get_reg(ap, reg);
98 	/* IRR and ALLOWED_IRR offset range */
99 	case APIC_IRR ... APIC_IRR + 0x74:
100 		/*
101 		 * Valid APIC_IRR/SAVIC_ALLOWED_IRR registers are at 16 bytes strides from
102 		 * their respective base offset. APIC_IRRs are in the range
103 		 *
104 		 * (0x200, 0x210,  ..., 0x270)
105 		 *
106 		 * while the SAVIC_ALLOWED_IRR range starts 4 bytes later, in the range
107 		 *
108 		 * (0x204, 0x214, ..., 0x274).
109 		 *
110 		 * Filter out everything else.
111 		 */
112 		if (WARN_ONCE(!(IS_ALIGNED(reg, 16) ||
113 				IS_ALIGNED(reg - 4, 16)),
114 			      "Misaligned APIC_IRR/ALLOWED_IRR APIC register read offset 0x%x", reg))
115 			return 0;
116 		return apic_get_reg(ap, reg);
117 	default:
118 		pr_err("Error reading unknown Secure AVIC reg offset 0x%x\n", reg);
119 		return 0;
120 	}
121 }
122 
123 #define SAVIC_NMI_REQ		0x278
124 
125 /*
126  * On WRMSR to APIC_SELF_IPI register by the guest, Secure AVIC hardware
127  * updates the APIC_IRR in the APIC backing page of the vCPU. In addition,
128  * hardware evaluates the new APIC_IRR update for interrupt injection to
129  * the vCPU. So, self IPIs are hardware-accelerated.
130  */
131 static inline void self_ipi_reg_write(unsigned int vector)
132 {
133 	native_apic_msr_write(APIC_SELF_IPI, vector);
134 }
135 
136 static void send_ipi_dest(unsigned int cpu, unsigned int vector, bool nmi)
137 {
138 	if (nmi)
139 		apic_set_reg(per_cpu_ptr(savic_page, cpu), SAVIC_NMI_REQ, 1);
140 	else
141 		update_vector(cpu, APIC_IRR, vector, true);
142 }
143 
144 static void send_ipi_allbut(unsigned int vector, bool nmi)
145 {
146 	unsigned int cpu, src_cpu;
147 
148 	guard(irqsave)();
149 
150 	src_cpu = raw_smp_processor_id();
151 
152 	for_each_cpu(cpu, cpu_online_mask) {
153 		if (cpu == src_cpu)
154 			continue;
155 		send_ipi_dest(cpu, vector, nmi);
156 	}
157 }
158 
159 static inline void self_ipi(unsigned int vector, bool nmi)
160 {
161 	u32 icr_low = APIC_SELF_IPI | vector;
162 
163 	if (nmi)
164 		icr_low |= APIC_DM_NMI;
165 
166 	native_x2apic_icr_write(icr_low, 0);
167 }
168 
169 static void savic_icr_write(u32 icr_low, u32 icr_high)
170 {
171 	unsigned int dsh, vector;
172 	u64 icr_data;
173 	bool nmi;
174 
175 	dsh = icr_low & APIC_DEST_ALLBUT;
176 	vector = icr_low & APIC_VECTOR_MASK;
177 	nmi = ((icr_low & APIC_DM_FIXED_MASK) == APIC_DM_NMI);
178 
179 	switch (dsh) {
180 	case APIC_DEST_SELF:
181 		self_ipi(vector, nmi);
182 		break;
183 	case APIC_DEST_ALLINC:
184 		self_ipi(vector, nmi);
185 		fallthrough;
186 	case APIC_DEST_ALLBUT:
187 		send_ipi_allbut(vector, nmi);
188 		break;
189 	default:
190 		send_ipi_dest(icr_high, vector, nmi);
191 		break;
192 	}
193 
194 	icr_data = ((u64)icr_high) << 32 | icr_low;
195 	if (dsh != APIC_DEST_SELF)
196 		savic_ghcb_msr_write(APIC_ICR, icr_data);
197 	apic_set_reg64(this_cpu_ptr(savic_page), APIC_ICR, icr_data);
198 }
199 
200 static void savic_write(u32 reg, u32 data)
201 {
202 	void *ap = this_cpu_ptr(savic_page);
203 
204 	switch (reg) {
205 	case APIC_LVTT:
206 	case APIC_TMICT:
207 	case APIC_TDCR:
208 		savic_ghcb_msr_write(reg, data);
209 		break;
210 	case APIC_LVT0:
211 	case APIC_LVT1:
212 	case APIC_TASKPRI:
213 	case APIC_EOI:
214 	case APIC_SPIV:
215 	case SAVIC_NMI_REQ:
216 	case APIC_ESR:
217 	case APIC_LVTTHMR:
218 	case APIC_LVTPC:
219 	case APIC_LVTERR:
220 	case APIC_ECTRL:
221 	case APIC_SEOI:
222 	case APIC_IER:
223 	case APIC_EILVTn(0) ... APIC_EILVTn(3):
224 		apic_set_reg(ap, reg, data);
225 		break;
226 	case APIC_ICR:
227 		savic_icr_write(data, 0);
228 		break;
229 	case APIC_SELF_IPI:
230 		self_ipi_reg_write(data);
231 		break;
232 	/* ALLOWED_IRR offsets are writable */
233 	case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70:
234 		if (IS_ALIGNED(reg - 4, 16)) {
235 			apic_set_reg(ap, reg, data);
236 			break;
237 		}
238 		fallthrough;
239 	default:
240 		pr_err("Error writing unknown Secure AVIC reg offset 0x%x\n", reg);
241 	}
242 }
243 
244 static void send_ipi(u32 dest, unsigned int vector, unsigned int dsh)
245 {
246 	unsigned int icr_low;
247 
248 	icr_low = __prepare_ICR(dsh, vector, APIC_DEST_PHYSICAL);
249 	savic_icr_write(icr_low, dest);
250 }
251 
252 static void savic_send_ipi(int cpu, int vector)
253 {
254 	u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
255 
256 	send_ipi(dest, vector, 0);
257 }
258 
259 static void send_ipi_mask(const struct cpumask *mask, unsigned int vector, bool excl_self)
260 {
261 	unsigned int cpu, this_cpu;
262 
263 	guard(irqsave)();
264 
265 	this_cpu = raw_smp_processor_id();
266 
267 	for_each_cpu(cpu, mask) {
268 		if (excl_self && cpu == this_cpu)
269 			continue;
270 		send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
271 	}
272 }
273 
274 static void savic_send_ipi_mask(const struct cpumask *mask, int vector)
275 {
276 	send_ipi_mask(mask, vector, false);
277 }
278 
279 static void savic_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
280 {
281 	send_ipi_mask(mask, vector, true);
282 }
283 
284 static void savic_send_ipi_allbutself(int vector)
285 {
286 	send_ipi(0, vector, APIC_DEST_ALLBUT);
287 }
288 
289 static void savic_send_ipi_all(int vector)
290 {
291 	send_ipi(0, vector, APIC_DEST_ALLINC);
292 }
293 
294 static void savic_send_ipi_self(int vector)
295 {
296 	self_ipi_reg_write(vector);
297 }
298 
299 static void savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
300 {
301 	update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set);
302 }
303 
304 static void savic_setup(void)
305 {
306 	void *ap = this_cpu_ptr(savic_page);
307 	enum es_result res;
308 	unsigned long gpa;
309 
310 	/*
311 	 * Before Secure AVIC is enabled, APIC MSR reads are intercepted.
312 	 * APIC_ID MSR read returns the value from the hypervisor.
313 	 */
314 	apic_set_reg(ap, APIC_ID, native_apic_msr_read(APIC_ID));
315 
316 	gpa = __pa(ap);
317 
318 	/*
319 	 * The NPT entry for a vCPU's APIC backing page must always be
320 	 * present when the vCPU is running in order for Secure AVIC to
321 	 * function. A VMEXIT_BUSY is returned on VMRUN and the vCPU cannot
322 	 * be resumed if the NPT entry for the APIC backing page is not
323 	 * present. Notify GPA of the vCPU's APIC backing page to the
324 	 * hypervisor by calling savic_register_gpa(). Before executing
325 	 * VMRUN, the hypervisor makes use of this information to make sure
326 	 * the APIC backing page is mapped in NPT.
327 	 */
328 	res = savic_register_gpa(gpa);
329 	if (res != ES_OK)
330 		snp_abort();
331 
332 	native_wrmsrq(MSR_AMD64_SAVIC_CONTROL, gpa | MSR_AMD64_SAVIC_ALLOWEDNMI);
333 }
334 
335 static int savic_probe(void)
336 {
337 	if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
338 		return 0;
339 
340 	if (!x2apic_mode) {
341 		pr_err("Secure AVIC enabled in non x2APIC mode\n");
342 		snp_abort();
343 		/* unreachable */
344 	}
345 
346 	savic_page = alloc_percpu(struct secure_avic_page);
347 	if (!savic_page)
348 		snp_abort();
349 
350 	return 1;
351 }
352 
353 static struct apic apic_x2apic_savic __ro_after_init = {
354 
355 	.name				= "secure avic x2apic",
356 	.probe				= savic_probe,
357 	.acpi_madt_oem_check		= savic_acpi_madt_oem_check,
358 	.setup				= savic_setup,
359 
360 	.dest_mode_logical		= false,
361 
362 	.disable_esr			= 0,
363 
364 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
365 
366 	.max_apic_id			= UINT_MAX,
367 	.x2apic_set_max_apicid		= true,
368 	.get_apic_id			= x2apic_get_apic_id,
369 
370 	.calc_dest_apicid		= apic_default_calc_apicid,
371 
372 	.send_IPI			= savic_send_ipi,
373 	.send_IPI_mask			= savic_send_ipi_mask,
374 	.send_IPI_mask_allbutself	= savic_send_ipi_mask_allbutself,
375 	.send_IPI_allbutself		= savic_send_ipi_allbutself,
376 	.send_IPI_all			= savic_send_ipi_all,
377 	.send_IPI_self			= savic_send_ipi_self,
378 
379 	.nmi_to_offline_cpu		= true,
380 
381 	.read				= savic_read,
382 	.write				= savic_write,
383 	.eoi				= native_apic_msr_eoi,
384 	.icr_read			= native_x2apic_icr_read,
385 	.icr_write			= savic_icr_write,
386 
387 	.update_vector			= savic_update_vector,
388 };
389 
390 apic_driver(apic_x2apic_savic);
391