1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure AVIC Support (SEV-SNP Guests) 4 * 5 * Copyright (C) 2024 Advanced Micro Devices, Inc. 6 * 7 * Author: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com> 8 */ 9 10 #include <linux/cc_platform.h> 11 #include <linux/percpu-defs.h> 12 #include <linux/align.h> 13 14 #include <asm/apic.h> 15 #include <asm/sev.h> 16 17 #include "local.h" 18 19 struct secure_avic_page { 20 u8 regs[PAGE_SIZE]; 21 } __aligned(PAGE_SIZE); 22 23 static struct secure_avic_page __percpu *savic_page __ro_after_init; 24 25 static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 26 { 27 return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC); 28 } 29 30 #define SAVIC_ALLOWED_IRR 0x204 31 32 /* 33 * When Secure AVIC is enabled, RDMSR/WRMSR of the APIC registers 34 * result in #VC exception (for non-accelerated register accesses) 35 * with VMEXIT_AVIC_NOACCEL error code. The #VC exception handler 36 * can read/write the x2APIC register in the guest APIC backing page. 37 * 38 * Since doing this would increase the latency of accessing x2APIC 39 * registers, instead of doing RDMSR/WRMSR based accesses and 40 * handling the APIC register reads/writes in the #VC exception handler, 41 * the read() and write() callbacks directly read/write the APIC register 42 * from/to the vCPU's APIC backing page. 43 */ 44 static u32 savic_read(u32 reg) 45 { 46 void *ap = this_cpu_ptr(savic_page); 47 48 switch (reg) { 49 case APIC_LVTT: 50 case APIC_TMICT: 51 case APIC_TMCCT: 52 case APIC_TDCR: 53 case APIC_ID: 54 case APIC_LVR: 55 case APIC_TASKPRI: 56 case APIC_ARBPRI: 57 case APIC_PROCPRI: 58 case APIC_LDR: 59 case APIC_SPIV: 60 case APIC_ESR: 61 case APIC_LVTTHMR: 62 case APIC_LVTPC: 63 case APIC_LVT0: 64 case APIC_LVT1: 65 case APIC_LVTERR: 66 case APIC_EFEAT: 67 case APIC_ECTRL: 68 case APIC_SEOI: 69 case APIC_IER: 70 case APIC_EILVTn(0) ... APIC_EILVTn(3): 71 return apic_get_reg(ap, reg); 72 case APIC_ICR: 73 return (u32)apic_get_reg64(ap, reg); 74 case APIC_ISR ... APIC_ISR + 0x70: 75 case APIC_TMR ... APIC_TMR + 0x70: 76 if (WARN_ONCE(!IS_ALIGNED(reg, 16), 77 "APIC register read offset 0x%x not aligned at 16 bytes", reg)) 78 return 0; 79 return apic_get_reg(ap, reg); 80 /* IRR and ALLOWED_IRR offset range */ 81 case APIC_IRR ... APIC_IRR + 0x74: 82 /* 83 * Valid APIC_IRR/SAVIC_ALLOWED_IRR registers are at 16 bytes strides from 84 * their respective base offset. APIC_IRRs are in the range 85 * 86 * (0x200, 0x210, ..., 0x270) 87 * 88 * while the SAVIC_ALLOWED_IRR range starts 4 bytes later, in the range 89 * 90 * (0x204, 0x214, ..., 0x274). 91 * 92 * Filter out everything else. 93 */ 94 if (WARN_ONCE(!(IS_ALIGNED(reg, 16) || 95 IS_ALIGNED(reg - 4, 16)), 96 "Misaligned APIC_IRR/ALLOWED_IRR APIC register read offset 0x%x", reg)) 97 return 0; 98 return apic_get_reg(ap, reg); 99 default: 100 pr_err("Error reading unknown Secure AVIC reg offset 0x%x\n", reg); 101 return 0; 102 } 103 } 104 105 #define SAVIC_NMI_REQ 0x278 106 107 static void savic_write(u32 reg, u32 data) 108 { 109 void *ap = this_cpu_ptr(savic_page); 110 111 switch (reg) { 112 case APIC_LVTT: 113 case APIC_LVT0: 114 case APIC_LVT1: 115 case APIC_TMICT: 116 case APIC_TDCR: 117 case APIC_SELF_IPI: 118 case APIC_TASKPRI: 119 case APIC_EOI: 120 case APIC_SPIV: 121 case SAVIC_NMI_REQ: 122 case APIC_ESR: 123 case APIC_LVTTHMR: 124 case APIC_LVTPC: 125 case APIC_LVTERR: 126 case APIC_ECTRL: 127 case APIC_SEOI: 128 case APIC_IER: 129 case APIC_EILVTn(0) ... APIC_EILVTn(3): 130 apic_set_reg(ap, reg, data); 131 break; 132 case APIC_ICR: 133 apic_set_reg64(ap, reg, (u64)data); 134 break; 135 /* ALLOWED_IRR offsets are writable */ 136 case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70: 137 if (IS_ALIGNED(reg - 4, 16)) { 138 apic_set_reg(ap, reg, data); 139 break; 140 } 141 fallthrough; 142 default: 143 pr_err("Error writing unknown Secure AVIC reg offset 0x%x\n", reg); 144 } 145 } 146 147 static void savic_setup(void) 148 { 149 void *ap = this_cpu_ptr(savic_page); 150 enum es_result res; 151 unsigned long gpa; 152 153 gpa = __pa(ap); 154 155 /* 156 * The NPT entry for a vCPU's APIC backing page must always be 157 * present when the vCPU is running in order for Secure AVIC to 158 * function. A VMEXIT_BUSY is returned on VMRUN and the vCPU cannot 159 * be resumed if the NPT entry for the APIC backing page is not 160 * present. Notify GPA of the vCPU's APIC backing page to the 161 * hypervisor by calling savic_register_gpa(). Before executing 162 * VMRUN, the hypervisor makes use of this information to make sure 163 * the APIC backing page is mapped in NPT. 164 */ 165 res = savic_register_gpa(gpa); 166 if (res != ES_OK) 167 snp_abort(); 168 } 169 170 static int savic_probe(void) 171 { 172 if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC)) 173 return 0; 174 175 if (!x2apic_mode) { 176 pr_err("Secure AVIC enabled in non x2APIC mode\n"); 177 snp_abort(); 178 /* unreachable */ 179 } 180 181 savic_page = alloc_percpu(struct secure_avic_page); 182 if (!savic_page) 183 snp_abort(); 184 185 return 1; 186 } 187 188 static struct apic apic_x2apic_savic __ro_after_init = { 189 190 .name = "secure avic x2apic", 191 .probe = savic_probe, 192 .acpi_madt_oem_check = savic_acpi_madt_oem_check, 193 .setup = savic_setup, 194 195 .dest_mode_logical = false, 196 197 .disable_esr = 0, 198 199 .cpu_present_to_apicid = default_cpu_present_to_apicid, 200 201 .max_apic_id = UINT_MAX, 202 .x2apic_set_max_apicid = true, 203 .get_apic_id = x2apic_get_apic_id, 204 205 .calc_dest_apicid = apic_default_calc_apicid, 206 207 .nmi_to_offline_cpu = true, 208 209 .read = savic_read, 210 .write = savic_write, 211 .eoi = native_apic_msr_eoi, 212 .icr_read = native_x2apic_icr_read, 213 .icr_write = native_x2apic_icr_write, 214 }; 215 216 apic_driver(apic_x2apic_savic); 217