1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure AVIC Support (SEV-SNP Guests) 4 * 5 * Copyright (C) 2024 Advanced Micro Devices, Inc. 6 * 7 * Author: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com> 8 */ 9 10 #include <linux/cc_platform.h> 11 #include <linux/percpu-defs.h> 12 #include <linux/align.h> 13 14 #include <asm/apic.h> 15 #include <asm/sev.h> 16 17 #include "local.h" 18 19 struct secure_avic_page { 20 u8 regs[PAGE_SIZE]; 21 } __aligned(PAGE_SIZE); 22 23 static struct secure_avic_page __percpu *savic_page __ro_after_init; 24 25 static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 26 { 27 return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC); 28 } 29 30 #define SAVIC_ALLOWED_IRR 0x204 31 32 /* 33 * When Secure AVIC is enabled, RDMSR/WRMSR of the APIC registers 34 * result in #VC exception (for non-accelerated register accesses) 35 * with VMEXIT_AVIC_NOACCEL error code. The #VC exception handler 36 * can read/write the x2APIC register in the guest APIC backing page. 37 * 38 * Since doing this would increase the latency of accessing x2APIC 39 * registers, instead of doing RDMSR/WRMSR based accesses and 40 * handling the APIC register reads/writes in the #VC exception handler, 41 * the read() and write() callbacks directly read/write the APIC register 42 * from/to the vCPU's APIC backing page. 43 */ 44 static u32 savic_read(u32 reg) 45 { 46 void *ap = this_cpu_ptr(savic_page); 47 48 switch (reg) { 49 case APIC_LVTT: 50 case APIC_TMICT: 51 case APIC_TMCCT: 52 case APIC_TDCR: 53 case APIC_ID: 54 case APIC_LVR: 55 case APIC_TASKPRI: 56 case APIC_ARBPRI: 57 case APIC_PROCPRI: 58 case APIC_LDR: 59 case APIC_SPIV: 60 case APIC_ESR: 61 case APIC_LVTTHMR: 62 case APIC_LVTPC: 63 case APIC_LVT0: 64 case APIC_LVT1: 65 case APIC_LVTERR: 66 case APIC_EFEAT: 67 case APIC_ECTRL: 68 case APIC_SEOI: 69 case APIC_IER: 70 case APIC_EILVTn(0) ... APIC_EILVTn(3): 71 return apic_get_reg(ap, reg); 72 case APIC_ICR: 73 return (u32)apic_get_reg64(ap, reg); 74 case APIC_ISR ... APIC_ISR + 0x70: 75 case APIC_TMR ... APIC_TMR + 0x70: 76 if (WARN_ONCE(!IS_ALIGNED(reg, 16), 77 "APIC register read offset 0x%x not aligned at 16 bytes", reg)) 78 return 0; 79 return apic_get_reg(ap, reg); 80 /* IRR and ALLOWED_IRR offset range */ 81 case APIC_IRR ... APIC_IRR + 0x74: 82 /* 83 * Valid APIC_IRR/SAVIC_ALLOWED_IRR registers are at 16 bytes strides from 84 * their respective base offset. APIC_IRRs are in the range 85 * 86 * (0x200, 0x210, ..., 0x270) 87 * 88 * while the SAVIC_ALLOWED_IRR range starts 4 bytes later, in the range 89 * 90 * (0x204, 0x214, ..., 0x274). 91 * 92 * Filter out everything else. 93 */ 94 if (WARN_ONCE(!(IS_ALIGNED(reg, 16) || 95 IS_ALIGNED(reg - 4, 16)), 96 "Misaligned APIC_IRR/ALLOWED_IRR APIC register read offset 0x%x", reg)) 97 return 0; 98 return apic_get_reg(ap, reg); 99 default: 100 pr_err("Error reading unknown Secure AVIC reg offset 0x%x\n", reg); 101 return 0; 102 } 103 } 104 105 #define SAVIC_NMI_REQ 0x278 106 107 static void savic_write(u32 reg, u32 data) 108 { 109 void *ap = this_cpu_ptr(savic_page); 110 111 switch (reg) { 112 case APIC_LVTT: 113 case APIC_LVT0: 114 case APIC_LVT1: 115 case APIC_TMICT: 116 case APIC_TDCR: 117 case APIC_SELF_IPI: 118 case APIC_TASKPRI: 119 case APIC_EOI: 120 case APIC_SPIV: 121 case SAVIC_NMI_REQ: 122 case APIC_ESR: 123 case APIC_LVTTHMR: 124 case APIC_LVTPC: 125 case APIC_LVTERR: 126 case APIC_ECTRL: 127 case APIC_SEOI: 128 case APIC_IER: 129 case APIC_EILVTn(0) ... APIC_EILVTn(3): 130 apic_set_reg(ap, reg, data); 131 break; 132 case APIC_ICR: 133 apic_set_reg64(ap, reg, (u64)data); 134 break; 135 /* ALLOWED_IRR offsets are writable */ 136 case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70: 137 if (IS_ALIGNED(reg - 4, 16)) { 138 apic_set_reg(ap, reg, data); 139 break; 140 } 141 fallthrough; 142 default: 143 pr_err("Error writing unknown Secure AVIC reg offset 0x%x\n", reg); 144 } 145 } 146 147 static void savic_setup(void) 148 { 149 void *ap = this_cpu_ptr(savic_page); 150 enum es_result res; 151 unsigned long gpa; 152 153 /* 154 * Before Secure AVIC is enabled, APIC MSR reads are intercepted. 155 * APIC_ID MSR read returns the value from the hypervisor. 156 */ 157 apic_set_reg(ap, APIC_ID, native_apic_msr_read(APIC_ID)); 158 159 gpa = __pa(ap); 160 161 /* 162 * The NPT entry for a vCPU's APIC backing page must always be 163 * present when the vCPU is running in order for Secure AVIC to 164 * function. A VMEXIT_BUSY is returned on VMRUN and the vCPU cannot 165 * be resumed if the NPT entry for the APIC backing page is not 166 * present. Notify GPA of the vCPU's APIC backing page to the 167 * hypervisor by calling savic_register_gpa(). Before executing 168 * VMRUN, the hypervisor makes use of this information to make sure 169 * the APIC backing page is mapped in NPT. 170 */ 171 res = savic_register_gpa(gpa); 172 if (res != ES_OK) 173 snp_abort(); 174 } 175 176 static int savic_probe(void) 177 { 178 if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC)) 179 return 0; 180 181 if (!x2apic_mode) { 182 pr_err("Secure AVIC enabled in non x2APIC mode\n"); 183 snp_abort(); 184 /* unreachable */ 185 } 186 187 savic_page = alloc_percpu(struct secure_avic_page); 188 if (!savic_page) 189 snp_abort(); 190 191 return 1; 192 } 193 194 static struct apic apic_x2apic_savic __ro_after_init = { 195 196 .name = "secure avic x2apic", 197 .probe = savic_probe, 198 .acpi_madt_oem_check = savic_acpi_madt_oem_check, 199 .setup = savic_setup, 200 201 .dest_mode_logical = false, 202 203 .disable_esr = 0, 204 205 .cpu_present_to_apicid = default_cpu_present_to_apicid, 206 207 .max_apic_id = UINT_MAX, 208 .x2apic_set_max_apicid = true, 209 .get_apic_id = x2apic_get_apic_id, 210 211 .calc_dest_apicid = apic_default_calc_apicid, 212 213 .nmi_to_offline_cpu = true, 214 215 .read = savic_read, 216 .write = savic_write, 217 .eoi = native_apic_msr_eoi, 218 .icr_read = native_x2apic_icr_read, 219 .icr_write = native_x2apic_icr_write, 220 }; 221 222 apic_driver(apic_x2apic_savic); 223