1a3cbbb47SArd Biesheuvel // SPDX-License-Identifier: GPL-2.0 2a3cbbb47SArd Biesheuvel /* 3a3cbbb47SArd Biesheuvel * AMD Encrypted Register State Support 4a3cbbb47SArd Biesheuvel * 5a3cbbb47SArd Biesheuvel * Author: Joerg Roedel <jroedel@suse.de> 6a3cbbb47SArd Biesheuvel * 7a3cbbb47SArd Biesheuvel * This file is not compiled stand-alone. It contains code shared 8a3cbbb47SArd Biesheuvel * between the pre-decompression boot code and the running Linux kernel 9a3cbbb47SArd Biesheuvel * and is included directly into both code-bases. 10a3cbbb47SArd Biesheuvel */ 11a3cbbb47SArd Biesheuvel 12a3cbbb47SArd Biesheuvel #include <asm/setup_data.h> 13a3cbbb47SArd Biesheuvel 14a3cbbb47SArd Biesheuvel #ifndef __BOOT_COMPRESSED 15a3cbbb47SArd Biesheuvel #define error(v) pr_err(v) 16a3cbbb47SArd Biesheuvel #define has_cpuflag(f) boot_cpu_has(f) 17a3cbbb47SArd Biesheuvel #define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__) 18a3cbbb47SArd Biesheuvel #define sev_printk_rtl(fmt, ...) printk_ratelimited(fmt, ##__VA_ARGS__) 19a3cbbb47SArd Biesheuvel #else 20a3cbbb47SArd Biesheuvel #undef WARN 21a3cbbb47SArd Biesheuvel #define WARN(condition, format...) (!!(condition)) 22a3cbbb47SArd Biesheuvel #define sev_printk(fmt, ...) 23a3cbbb47SArd Biesheuvel #define sev_printk_rtl(fmt, ...) 24a3cbbb47SArd Biesheuvel #undef vc_forward_exception 25a3cbbb47SArd Biesheuvel #define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n") 26a3cbbb47SArd Biesheuvel #endif 27a3cbbb47SArd Biesheuvel 28a3cbbb47SArd Biesheuvel /* 29a3cbbb47SArd Biesheuvel * SVSM related information: 30a3cbbb47SArd Biesheuvel * During boot, the page tables are set up as identity mapped and later 31a3cbbb47SArd Biesheuvel * changed to use kernel virtual addresses. Maintain separate virtual and 32a3cbbb47SArd Biesheuvel * physical addresses for the CAA to allow SVSM functions to be used during 33a3cbbb47SArd Biesheuvel * early boot, both with identity mapped virtual addresses and proper kernel 34a3cbbb47SArd Biesheuvel * virtual addresses. 35a3cbbb47SArd Biesheuvel */ 36a3cbbb47SArd Biesheuvel struct svsm_ca *boot_svsm_caa __ro_after_init; 37a3cbbb47SArd Biesheuvel u64 boot_svsm_caa_pa __ro_after_init; 38a3cbbb47SArd Biesheuvel 39a3cbbb47SArd Biesheuvel /* I/O parameters for CPUID-related helpers */ 40a3cbbb47SArd Biesheuvel struct cpuid_leaf { 41a3cbbb47SArd Biesheuvel u32 fn; 42a3cbbb47SArd Biesheuvel u32 subfn; 43a3cbbb47SArd Biesheuvel u32 eax; 44a3cbbb47SArd Biesheuvel u32 ebx; 45a3cbbb47SArd Biesheuvel u32 ecx; 46a3cbbb47SArd Biesheuvel u32 edx; 47a3cbbb47SArd Biesheuvel }; 48a3cbbb47SArd Biesheuvel 49a3cbbb47SArd Biesheuvel /* 50a3cbbb47SArd Biesheuvel * Since feature negotiation related variables are set early in the boot 51a3cbbb47SArd Biesheuvel * process they must reside in the .data section so as not to be zeroed 52a3cbbb47SArd Biesheuvel * out when the .bss section is later cleared. 53a3cbbb47SArd Biesheuvel * 54a3cbbb47SArd Biesheuvel * GHCB protocol version negotiated with the hypervisor. 55a3cbbb47SArd Biesheuvel */ 56a3cbbb47SArd Biesheuvel static u16 ghcb_version __ro_after_init; 57a3cbbb47SArd Biesheuvel 58a3cbbb47SArd Biesheuvel /* Copy of the SNP firmware's CPUID page. */ 59a3cbbb47SArd Biesheuvel static struct snp_cpuid_table cpuid_table_copy __ro_after_init; 60a3cbbb47SArd Biesheuvel 61a3cbbb47SArd Biesheuvel /* 62a3cbbb47SArd Biesheuvel * These will be initialized based on CPUID table so that non-present 63a3cbbb47SArd Biesheuvel * all-zero leaves (for sparse tables) can be differentiated from 64a3cbbb47SArd Biesheuvel * invalid/out-of-range leaves. This is needed since all-zero leaves 65a3cbbb47SArd Biesheuvel * still need to be post-processed. 66a3cbbb47SArd Biesheuvel */ 67a3cbbb47SArd Biesheuvel static u32 cpuid_std_range_max __ro_after_init; 68a3cbbb47SArd Biesheuvel static u32 cpuid_hyp_range_max __ro_after_init; 69a3cbbb47SArd Biesheuvel static u32 cpuid_ext_range_max __ro_after_init; 70a3cbbb47SArd Biesheuvel 71a3cbbb47SArd Biesheuvel bool __init sev_es_check_cpu_features(void) 72a3cbbb47SArd Biesheuvel { 73a3cbbb47SArd Biesheuvel if (!has_cpuflag(X86_FEATURE_RDRAND)) { 74a3cbbb47SArd Biesheuvel error("RDRAND instruction not supported - no trusted source of randomness available\n"); 75a3cbbb47SArd Biesheuvel return false; 76a3cbbb47SArd Biesheuvel } 77a3cbbb47SArd Biesheuvel 78a3cbbb47SArd Biesheuvel return true; 79a3cbbb47SArd Biesheuvel } 80a3cbbb47SArd Biesheuvel 81a3cbbb47SArd Biesheuvel void __head __noreturn 82a3cbbb47SArd Biesheuvel sev_es_terminate(unsigned int set, unsigned int reason) 83a3cbbb47SArd Biesheuvel { 84a3cbbb47SArd Biesheuvel u64 val = GHCB_MSR_TERM_REQ; 85a3cbbb47SArd Biesheuvel 86a3cbbb47SArd Biesheuvel /* Tell the hypervisor what went wrong. */ 87a3cbbb47SArd Biesheuvel val |= GHCB_SEV_TERM_REASON(set, reason); 88a3cbbb47SArd Biesheuvel 89a3cbbb47SArd Biesheuvel /* Request Guest Termination from Hypervisor */ 90a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(val); 91a3cbbb47SArd Biesheuvel VMGEXIT(); 92a3cbbb47SArd Biesheuvel 93a3cbbb47SArd Biesheuvel while (true) 94a3cbbb47SArd Biesheuvel asm volatile("hlt\n" : : : "memory"); 95a3cbbb47SArd Biesheuvel } 96a3cbbb47SArd Biesheuvel 97a3cbbb47SArd Biesheuvel /* 98a3cbbb47SArd Biesheuvel * The hypervisor features are available from GHCB version 2 onward. 99a3cbbb47SArd Biesheuvel */ 100a3cbbb47SArd Biesheuvel u64 get_hv_features(void) 101a3cbbb47SArd Biesheuvel { 102a3cbbb47SArd Biesheuvel u64 val; 103a3cbbb47SArd Biesheuvel 104a3cbbb47SArd Biesheuvel if (ghcb_version < 2) 105a3cbbb47SArd Biesheuvel return 0; 106a3cbbb47SArd Biesheuvel 107a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(GHCB_MSR_HV_FT_REQ); 108a3cbbb47SArd Biesheuvel VMGEXIT(); 109a3cbbb47SArd Biesheuvel 110a3cbbb47SArd Biesheuvel val = sev_es_rd_ghcb_msr(); 111a3cbbb47SArd Biesheuvel if (GHCB_RESP_CODE(val) != GHCB_MSR_HV_FT_RESP) 112a3cbbb47SArd Biesheuvel return 0; 113a3cbbb47SArd Biesheuvel 114a3cbbb47SArd Biesheuvel return GHCB_MSR_HV_FT_RESP_VAL(val); 115a3cbbb47SArd Biesheuvel } 116a3cbbb47SArd Biesheuvel 117a3cbbb47SArd Biesheuvel void snp_register_ghcb_early(unsigned long paddr) 118a3cbbb47SArd Biesheuvel { 119a3cbbb47SArd Biesheuvel unsigned long pfn = paddr >> PAGE_SHIFT; 120a3cbbb47SArd Biesheuvel u64 val; 121a3cbbb47SArd Biesheuvel 122a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn)); 123a3cbbb47SArd Biesheuvel VMGEXIT(); 124a3cbbb47SArd Biesheuvel 125a3cbbb47SArd Biesheuvel val = sev_es_rd_ghcb_msr(); 126a3cbbb47SArd Biesheuvel 127a3cbbb47SArd Biesheuvel /* If the response GPA is not ours then abort the guest */ 128a3cbbb47SArd Biesheuvel if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) || 129a3cbbb47SArd Biesheuvel (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn)) 130a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER); 131a3cbbb47SArd Biesheuvel } 132a3cbbb47SArd Biesheuvel 133a3cbbb47SArd Biesheuvel bool sev_es_negotiate_protocol(void) 134a3cbbb47SArd Biesheuvel { 135a3cbbb47SArd Biesheuvel u64 val; 136a3cbbb47SArd Biesheuvel 137a3cbbb47SArd Biesheuvel /* Do the GHCB protocol version negotiation */ 138a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); 139a3cbbb47SArd Biesheuvel VMGEXIT(); 140a3cbbb47SArd Biesheuvel val = sev_es_rd_ghcb_msr(); 141a3cbbb47SArd Biesheuvel 142a3cbbb47SArd Biesheuvel if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) 143a3cbbb47SArd Biesheuvel return false; 144a3cbbb47SArd Biesheuvel 145a3cbbb47SArd Biesheuvel if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || 146a3cbbb47SArd Biesheuvel GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) 147a3cbbb47SArd Biesheuvel return false; 148a3cbbb47SArd Biesheuvel 149a3cbbb47SArd Biesheuvel ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX); 150a3cbbb47SArd Biesheuvel 151a3cbbb47SArd Biesheuvel return true; 152a3cbbb47SArd Biesheuvel } 153a3cbbb47SArd Biesheuvel 154a3cbbb47SArd Biesheuvel static bool vc_decoding_needed(unsigned long exit_code) 155a3cbbb47SArd Biesheuvel { 156a3cbbb47SArd Biesheuvel /* Exceptions don't require to decode the instruction */ 157a3cbbb47SArd Biesheuvel return !(exit_code >= SVM_EXIT_EXCP_BASE && 158a3cbbb47SArd Biesheuvel exit_code <= SVM_EXIT_LAST_EXCP); 159a3cbbb47SArd Biesheuvel } 160a3cbbb47SArd Biesheuvel 161a3cbbb47SArd Biesheuvel static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, 162a3cbbb47SArd Biesheuvel struct pt_regs *regs, 163a3cbbb47SArd Biesheuvel unsigned long exit_code) 164a3cbbb47SArd Biesheuvel { 165a3cbbb47SArd Biesheuvel enum es_result ret = ES_OK; 166a3cbbb47SArd Biesheuvel 167a3cbbb47SArd Biesheuvel memset(ctxt, 0, sizeof(*ctxt)); 168a3cbbb47SArd Biesheuvel ctxt->regs = regs; 169a3cbbb47SArd Biesheuvel 170a3cbbb47SArd Biesheuvel if (vc_decoding_needed(exit_code)) 171a3cbbb47SArd Biesheuvel ret = vc_decode_insn(ctxt); 172a3cbbb47SArd Biesheuvel 173a3cbbb47SArd Biesheuvel return ret; 174a3cbbb47SArd Biesheuvel } 175a3cbbb47SArd Biesheuvel 176a3cbbb47SArd Biesheuvel static void vc_finish_insn(struct es_em_ctxt *ctxt) 177a3cbbb47SArd Biesheuvel { 178a3cbbb47SArd Biesheuvel ctxt->regs->ip += ctxt->insn.length; 179a3cbbb47SArd Biesheuvel } 180a3cbbb47SArd Biesheuvel 181a3cbbb47SArd Biesheuvel static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 182a3cbbb47SArd Biesheuvel { 183a3cbbb47SArd Biesheuvel u32 ret; 184a3cbbb47SArd Biesheuvel 185a3cbbb47SArd Biesheuvel ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); 186a3cbbb47SArd Biesheuvel if (!ret) 187a3cbbb47SArd Biesheuvel return ES_OK; 188a3cbbb47SArd Biesheuvel 189a3cbbb47SArd Biesheuvel if (ret == 1) { 190a3cbbb47SArd Biesheuvel u64 info = ghcb->save.sw_exit_info_2; 191a3cbbb47SArd Biesheuvel unsigned long v = info & SVM_EVTINJ_VEC_MASK; 192a3cbbb47SArd Biesheuvel 193a3cbbb47SArd Biesheuvel /* Check if exception information from hypervisor is sane. */ 194a3cbbb47SArd Biesheuvel if ((info & SVM_EVTINJ_VALID) && 195a3cbbb47SArd Biesheuvel ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && 196a3cbbb47SArd Biesheuvel ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { 197a3cbbb47SArd Biesheuvel ctxt->fi.vector = v; 198a3cbbb47SArd Biesheuvel 199a3cbbb47SArd Biesheuvel if (info & SVM_EVTINJ_VALID_ERR) 200a3cbbb47SArd Biesheuvel ctxt->fi.error_code = info >> 32; 201a3cbbb47SArd Biesheuvel 202a3cbbb47SArd Biesheuvel return ES_EXCEPTION; 203a3cbbb47SArd Biesheuvel } 204a3cbbb47SArd Biesheuvel } 205a3cbbb47SArd Biesheuvel 206a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 207a3cbbb47SArd Biesheuvel } 208a3cbbb47SArd Biesheuvel 209a3cbbb47SArd Biesheuvel static inline int svsm_process_result_codes(struct svsm_call *call) 210a3cbbb47SArd Biesheuvel { 211a3cbbb47SArd Biesheuvel switch (call->rax_out) { 212a3cbbb47SArd Biesheuvel case SVSM_SUCCESS: 213a3cbbb47SArd Biesheuvel return 0; 214a3cbbb47SArd Biesheuvel case SVSM_ERR_INCOMPLETE: 215a3cbbb47SArd Biesheuvel case SVSM_ERR_BUSY: 216a3cbbb47SArd Biesheuvel return -EAGAIN; 217a3cbbb47SArd Biesheuvel default: 218a3cbbb47SArd Biesheuvel return -EINVAL; 219a3cbbb47SArd Biesheuvel } 220a3cbbb47SArd Biesheuvel } 221a3cbbb47SArd Biesheuvel 222a3cbbb47SArd Biesheuvel /* 223a3cbbb47SArd Biesheuvel * Issue a VMGEXIT to call the SVSM: 224a3cbbb47SArd Biesheuvel * - Load the SVSM register state (RAX, RCX, RDX, R8 and R9) 225a3cbbb47SArd Biesheuvel * - Set the CA call pending field to 1 226a3cbbb47SArd Biesheuvel * - Issue VMGEXIT 227a3cbbb47SArd Biesheuvel * - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9) 228a3cbbb47SArd Biesheuvel * - Perform atomic exchange of the CA call pending field 229a3cbbb47SArd Biesheuvel * 230a3cbbb47SArd Biesheuvel * - See the "Secure VM Service Module for SEV-SNP Guests" specification for 231a3cbbb47SArd Biesheuvel * details on the calling convention. 232a3cbbb47SArd Biesheuvel * - The calling convention loosely follows the Microsoft X64 calling 233a3cbbb47SArd Biesheuvel * convention by putting arguments in RCX, RDX, R8 and R9. 234a3cbbb47SArd Biesheuvel * - RAX specifies the SVSM protocol/callid as input and the return code 235a3cbbb47SArd Biesheuvel * as output. 236a3cbbb47SArd Biesheuvel */ 237a3cbbb47SArd Biesheuvel static __always_inline void svsm_issue_call(struct svsm_call *call, u8 *pending) 238a3cbbb47SArd Biesheuvel { 239a3cbbb47SArd Biesheuvel register unsigned long rax asm("rax") = call->rax; 240a3cbbb47SArd Biesheuvel register unsigned long rcx asm("rcx") = call->rcx; 241a3cbbb47SArd Biesheuvel register unsigned long rdx asm("rdx") = call->rdx; 242a3cbbb47SArd Biesheuvel register unsigned long r8 asm("r8") = call->r8; 243a3cbbb47SArd Biesheuvel register unsigned long r9 asm("r9") = call->r9; 244a3cbbb47SArd Biesheuvel 245a3cbbb47SArd Biesheuvel call->caa->call_pending = 1; 246a3cbbb47SArd Biesheuvel 247a3cbbb47SArd Biesheuvel asm volatile("rep; vmmcall\n\t" 248a3cbbb47SArd Biesheuvel : "+r" (rax), "+r" (rcx), "+r" (rdx), "+r" (r8), "+r" (r9) 249a3cbbb47SArd Biesheuvel : : "memory"); 250a3cbbb47SArd Biesheuvel 251a3cbbb47SArd Biesheuvel *pending = xchg(&call->caa->call_pending, *pending); 252a3cbbb47SArd Biesheuvel 253a3cbbb47SArd Biesheuvel call->rax_out = rax; 254a3cbbb47SArd Biesheuvel call->rcx_out = rcx; 255a3cbbb47SArd Biesheuvel call->rdx_out = rdx; 256a3cbbb47SArd Biesheuvel call->r8_out = r8; 257a3cbbb47SArd Biesheuvel call->r9_out = r9; 258a3cbbb47SArd Biesheuvel } 259a3cbbb47SArd Biesheuvel 260a3cbbb47SArd Biesheuvel static int svsm_perform_msr_protocol(struct svsm_call *call) 261a3cbbb47SArd Biesheuvel { 262a3cbbb47SArd Biesheuvel u8 pending = 0; 263a3cbbb47SArd Biesheuvel u64 val, resp; 264a3cbbb47SArd Biesheuvel 265a3cbbb47SArd Biesheuvel /* 266a3cbbb47SArd Biesheuvel * When using the MSR protocol, be sure to save and restore 267a3cbbb47SArd Biesheuvel * the current MSR value. 268a3cbbb47SArd Biesheuvel */ 269a3cbbb47SArd Biesheuvel val = sev_es_rd_ghcb_msr(); 270a3cbbb47SArd Biesheuvel 271a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(GHCB_MSR_VMPL_REQ_LEVEL(0)); 272a3cbbb47SArd Biesheuvel 273a3cbbb47SArd Biesheuvel svsm_issue_call(call, &pending); 274a3cbbb47SArd Biesheuvel 275a3cbbb47SArd Biesheuvel resp = sev_es_rd_ghcb_msr(); 276a3cbbb47SArd Biesheuvel 277a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(val); 278a3cbbb47SArd Biesheuvel 279a3cbbb47SArd Biesheuvel if (pending) 280a3cbbb47SArd Biesheuvel return -EINVAL; 281a3cbbb47SArd Biesheuvel 282a3cbbb47SArd Biesheuvel if (GHCB_RESP_CODE(resp) != GHCB_MSR_VMPL_RESP) 283a3cbbb47SArd Biesheuvel return -EINVAL; 284a3cbbb47SArd Biesheuvel 285a3cbbb47SArd Biesheuvel if (GHCB_MSR_VMPL_RESP_VAL(resp)) 286a3cbbb47SArd Biesheuvel return -EINVAL; 287a3cbbb47SArd Biesheuvel 288a3cbbb47SArd Biesheuvel return svsm_process_result_codes(call); 289a3cbbb47SArd Biesheuvel } 290a3cbbb47SArd Biesheuvel 291a3cbbb47SArd Biesheuvel static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) 292a3cbbb47SArd Biesheuvel { 293a3cbbb47SArd Biesheuvel struct es_em_ctxt ctxt; 294a3cbbb47SArd Biesheuvel u8 pending = 0; 295a3cbbb47SArd Biesheuvel 296a3cbbb47SArd Biesheuvel vc_ghcb_invalidate(ghcb); 297a3cbbb47SArd Biesheuvel 298a3cbbb47SArd Biesheuvel /* 299a3cbbb47SArd Biesheuvel * Fill in protocol and format specifiers. This can be called very early 300a3cbbb47SArd Biesheuvel * in the boot, so use rip-relative references as needed. 301a3cbbb47SArd Biesheuvel */ 302*681e2901SArd Biesheuvel ghcb->protocol_version = ghcb_version; 303a3cbbb47SArd Biesheuvel ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 304a3cbbb47SArd Biesheuvel 305a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); 306a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_info_1(ghcb, 0); 307a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_info_2(ghcb, 0); 308a3cbbb47SArd Biesheuvel 309a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(__pa(ghcb)); 310a3cbbb47SArd Biesheuvel 311a3cbbb47SArd Biesheuvel svsm_issue_call(call, &pending); 312a3cbbb47SArd Biesheuvel 313a3cbbb47SArd Biesheuvel if (pending) 314a3cbbb47SArd Biesheuvel return -EINVAL; 315a3cbbb47SArd Biesheuvel 316a3cbbb47SArd Biesheuvel switch (verify_exception_info(ghcb, &ctxt)) { 317a3cbbb47SArd Biesheuvel case ES_OK: 318a3cbbb47SArd Biesheuvel break; 319a3cbbb47SArd Biesheuvel case ES_EXCEPTION: 320a3cbbb47SArd Biesheuvel vc_forward_exception(&ctxt); 321a3cbbb47SArd Biesheuvel fallthrough; 322a3cbbb47SArd Biesheuvel default: 323a3cbbb47SArd Biesheuvel return -EINVAL; 324a3cbbb47SArd Biesheuvel } 325a3cbbb47SArd Biesheuvel 326a3cbbb47SArd Biesheuvel return svsm_process_result_codes(call); 327a3cbbb47SArd Biesheuvel } 328a3cbbb47SArd Biesheuvel 329a3cbbb47SArd Biesheuvel enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 330a3cbbb47SArd Biesheuvel struct es_em_ctxt *ctxt, 331a3cbbb47SArd Biesheuvel u64 exit_code, u64 exit_info_1, 332a3cbbb47SArd Biesheuvel u64 exit_info_2) 333a3cbbb47SArd Biesheuvel { 334a3cbbb47SArd Biesheuvel /* Fill in protocol and format specifiers */ 335a3cbbb47SArd Biesheuvel ghcb->protocol_version = ghcb_version; 336a3cbbb47SArd Biesheuvel ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 337a3cbbb47SArd Biesheuvel 338a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_code(ghcb, exit_code); 339a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 340a3cbbb47SArd Biesheuvel ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 341a3cbbb47SArd Biesheuvel 342a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(__pa(ghcb)); 343a3cbbb47SArd Biesheuvel VMGEXIT(); 344a3cbbb47SArd Biesheuvel 345a3cbbb47SArd Biesheuvel return verify_exception_info(ghcb, ctxt); 346a3cbbb47SArd Biesheuvel } 347a3cbbb47SArd Biesheuvel 348a3cbbb47SArd Biesheuvel static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg) 349a3cbbb47SArd Biesheuvel { 350a3cbbb47SArd Biesheuvel u64 val; 351a3cbbb47SArd Biesheuvel 352a3cbbb47SArd Biesheuvel sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, reg_idx)); 353a3cbbb47SArd Biesheuvel VMGEXIT(); 354a3cbbb47SArd Biesheuvel val = sev_es_rd_ghcb_msr(); 355a3cbbb47SArd Biesheuvel if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP) 356a3cbbb47SArd Biesheuvel return -EIO; 357a3cbbb47SArd Biesheuvel 358a3cbbb47SArd Biesheuvel *reg = (val >> 32); 359a3cbbb47SArd Biesheuvel 360a3cbbb47SArd Biesheuvel return 0; 361a3cbbb47SArd Biesheuvel } 362a3cbbb47SArd Biesheuvel 363a3cbbb47SArd Biesheuvel static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf) 364a3cbbb47SArd Biesheuvel { 365a3cbbb47SArd Biesheuvel int ret; 366a3cbbb47SArd Biesheuvel 367a3cbbb47SArd Biesheuvel /* 368a3cbbb47SArd Biesheuvel * MSR protocol does not support fetching non-zero subfunctions, but is 369a3cbbb47SArd Biesheuvel * sufficient to handle current early-boot cases. Should that change, 370a3cbbb47SArd Biesheuvel * make sure to report an error rather than ignoring the index and 371a3cbbb47SArd Biesheuvel * grabbing random values. If this issue arises in the future, handling 372a3cbbb47SArd Biesheuvel * can be added here to use GHCB-page protocol for cases that occur late 373a3cbbb47SArd Biesheuvel * enough in boot that GHCB page is available. 374a3cbbb47SArd Biesheuvel */ 375a3cbbb47SArd Biesheuvel if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) 376a3cbbb47SArd Biesheuvel return -EINVAL; 377a3cbbb47SArd Biesheuvel 378a3cbbb47SArd Biesheuvel ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); 379a3cbbb47SArd Biesheuvel ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); 380a3cbbb47SArd Biesheuvel ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); 381a3cbbb47SArd Biesheuvel ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx); 382a3cbbb47SArd Biesheuvel 383a3cbbb47SArd Biesheuvel return ret; 384a3cbbb47SArd Biesheuvel } 385a3cbbb47SArd Biesheuvel 386a3cbbb47SArd Biesheuvel static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) 387a3cbbb47SArd Biesheuvel { 388a3cbbb47SArd Biesheuvel u32 cr4 = native_read_cr4(); 389a3cbbb47SArd Biesheuvel int ret; 390a3cbbb47SArd Biesheuvel 391a3cbbb47SArd Biesheuvel ghcb_set_rax(ghcb, leaf->fn); 392a3cbbb47SArd Biesheuvel ghcb_set_rcx(ghcb, leaf->subfn); 393a3cbbb47SArd Biesheuvel 394a3cbbb47SArd Biesheuvel if (cr4 & X86_CR4_OSXSAVE) 395a3cbbb47SArd Biesheuvel /* Safe to read xcr0 */ 396a3cbbb47SArd Biesheuvel ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); 397a3cbbb47SArd Biesheuvel else 398a3cbbb47SArd Biesheuvel /* xgetbv will cause #UD - use reset value for xcr0 */ 399a3cbbb47SArd Biesheuvel ghcb_set_xcr0(ghcb, 1); 400a3cbbb47SArd Biesheuvel 401a3cbbb47SArd Biesheuvel ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 402a3cbbb47SArd Biesheuvel if (ret != ES_OK) 403a3cbbb47SArd Biesheuvel return ret; 404a3cbbb47SArd Biesheuvel 405a3cbbb47SArd Biesheuvel if (!(ghcb_rax_is_valid(ghcb) && 406a3cbbb47SArd Biesheuvel ghcb_rbx_is_valid(ghcb) && 407a3cbbb47SArd Biesheuvel ghcb_rcx_is_valid(ghcb) && 408a3cbbb47SArd Biesheuvel ghcb_rdx_is_valid(ghcb))) 409a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 410a3cbbb47SArd Biesheuvel 411a3cbbb47SArd Biesheuvel leaf->eax = ghcb->save.rax; 412a3cbbb47SArd Biesheuvel leaf->ebx = ghcb->save.rbx; 413a3cbbb47SArd Biesheuvel leaf->ecx = ghcb->save.rcx; 414a3cbbb47SArd Biesheuvel leaf->edx = ghcb->save.rdx; 415a3cbbb47SArd Biesheuvel 416a3cbbb47SArd Biesheuvel return ES_OK; 417a3cbbb47SArd Biesheuvel } 418a3cbbb47SArd Biesheuvel 419a3cbbb47SArd Biesheuvel static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) 420a3cbbb47SArd Biesheuvel { 421a3cbbb47SArd Biesheuvel return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf) 422a3cbbb47SArd Biesheuvel : __sev_cpuid_hv_msr(leaf); 423a3cbbb47SArd Biesheuvel } 424a3cbbb47SArd Biesheuvel 425a3cbbb47SArd Biesheuvel /* 426a3cbbb47SArd Biesheuvel * This may be called early while still running on the initial identity 427a3cbbb47SArd Biesheuvel * mapping. Use RIP-relative addressing to obtain the correct address 428a3cbbb47SArd Biesheuvel * while running with the initial identity mapping as well as the 429a3cbbb47SArd Biesheuvel * switch-over to kernel virtual addresses later. 430a3cbbb47SArd Biesheuvel */ 431a3cbbb47SArd Biesheuvel const struct snp_cpuid_table *snp_cpuid_get_table(void) 432a3cbbb47SArd Biesheuvel { 433a3cbbb47SArd Biesheuvel return rip_rel_ptr(&cpuid_table_copy); 434a3cbbb47SArd Biesheuvel } 435a3cbbb47SArd Biesheuvel 436a3cbbb47SArd Biesheuvel /* 437a3cbbb47SArd Biesheuvel * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of 438a3cbbb47SArd Biesheuvel * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0 439a3cbbb47SArd Biesheuvel * and 1 based on the corresponding features enabled by a particular 440a3cbbb47SArd Biesheuvel * combination of XCR0 and XSS registers so that a guest can look up the 441a3cbbb47SArd Biesheuvel * version corresponding to the features currently enabled in its XCR0/XSS 442a3cbbb47SArd Biesheuvel * registers. The only values that differ between these versions/table 443a3cbbb47SArd Biesheuvel * entries is the enabled XSAVE area size advertised via EBX. 444a3cbbb47SArd Biesheuvel * 445a3cbbb47SArd Biesheuvel * While hypervisors may choose to make use of this support, it is more 446a3cbbb47SArd Biesheuvel * robust/secure for a guest to simply find the entry corresponding to the 447a3cbbb47SArd Biesheuvel * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the 448a3cbbb47SArd Biesheuvel * XSAVE area size using subfunctions 2 through 64, as documented in APM 449a3cbbb47SArd Biesheuvel * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here. 450a3cbbb47SArd Biesheuvel * 451a3cbbb47SArd Biesheuvel * Since base/legacy XSAVE area size is documented as 0x240, use that value 452a3cbbb47SArd Biesheuvel * directly rather than relying on the base size in the CPUID table. 453a3cbbb47SArd Biesheuvel * 454a3cbbb47SArd Biesheuvel * Return: XSAVE area size on success, 0 otherwise. 455a3cbbb47SArd Biesheuvel */ 456a3cbbb47SArd Biesheuvel static u32 __head snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted) 457a3cbbb47SArd Biesheuvel { 458a3cbbb47SArd Biesheuvel const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 459a3cbbb47SArd Biesheuvel u64 xfeatures_found = 0; 460a3cbbb47SArd Biesheuvel u32 xsave_size = 0x240; 461a3cbbb47SArd Biesheuvel int i; 462a3cbbb47SArd Biesheuvel 463a3cbbb47SArd Biesheuvel for (i = 0; i < cpuid_table->count; i++) { 464a3cbbb47SArd Biesheuvel const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; 465a3cbbb47SArd Biesheuvel 466a3cbbb47SArd Biesheuvel if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64)) 467a3cbbb47SArd Biesheuvel continue; 468a3cbbb47SArd Biesheuvel if (!(xfeatures_en & (BIT_ULL(e->ecx_in)))) 469a3cbbb47SArd Biesheuvel continue; 470a3cbbb47SArd Biesheuvel if (xfeatures_found & (BIT_ULL(e->ecx_in))) 471a3cbbb47SArd Biesheuvel continue; 472a3cbbb47SArd Biesheuvel 473a3cbbb47SArd Biesheuvel xfeatures_found |= (BIT_ULL(e->ecx_in)); 474a3cbbb47SArd Biesheuvel 475a3cbbb47SArd Biesheuvel if (compacted) 476a3cbbb47SArd Biesheuvel xsave_size += e->eax; 477a3cbbb47SArd Biesheuvel else 478a3cbbb47SArd Biesheuvel xsave_size = max(xsave_size, e->eax + e->ebx); 479a3cbbb47SArd Biesheuvel } 480a3cbbb47SArd Biesheuvel 481a3cbbb47SArd Biesheuvel /* 482a3cbbb47SArd Biesheuvel * Either the guest set unsupported XCR0/XSS bits, or the corresponding 483a3cbbb47SArd Biesheuvel * entries in the CPUID table were not present. This is not a valid 484a3cbbb47SArd Biesheuvel * state to be in. 485a3cbbb47SArd Biesheuvel */ 486a3cbbb47SArd Biesheuvel if (xfeatures_found != (xfeatures_en & GENMASK_ULL(63, 2))) 487a3cbbb47SArd Biesheuvel return 0; 488a3cbbb47SArd Biesheuvel 489a3cbbb47SArd Biesheuvel return xsave_size; 490a3cbbb47SArd Biesheuvel } 491a3cbbb47SArd Biesheuvel 492a3cbbb47SArd Biesheuvel static bool __head 493a3cbbb47SArd Biesheuvel snp_cpuid_get_validated_func(struct cpuid_leaf *leaf) 494a3cbbb47SArd Biesheuvel { 495a3cbbb47SArd Biesheuvel const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 496a3cbbb47SArd Biesheuvel int i; 497a3cbbb47SArd Biesheuvel 498a3cbbb47SArd Biesheuvel for (i = 0; i < cpuid_table->count; i++) { 499a3cbbb47SArd Biesheuvel const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; 500a3cbbb47SArd Biesheuvel 501a3cbbb47SArd Biesheuvel if (e->eax_in != leaf->fn) 502a3cbbb47SArd Biesheuvel continue; 503a3cbbb47SArd Biesheuvel 504a3cbbb47SArd Biesheuvel if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) 505a3cbbb47SArd Biesheuvel continue; 506a3cbbb47SArd Biesheuvel 507a3cbbb47SArd Biesheuvel /* 508a3cbbb47SArd Biesheuvel * For 0xD subfunctions 0 and 1, only use the entry corresponding 509a3cbbb47SArd Biesheuvel * to the base/legacy XSAVE area size (XCR0=1 or XCR0=3, XSS=0). 510a3cbbb47SArd Biesheuvel * See the comments above snp_cpuid_calc_xsave_size() for more 511a3cbbb47SArd Biesheuvel * details. 512a3cbbb47SArd Biesheuvel */ 513a3cbbb47SArd Biesheuvel if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1)) 514a3cbbb47SArd Biesheuvel if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in) 515a3cbbb47SArd Biesheuvel continue; 516a3cbbb47SArd Biesheuvel 517a3cbbb47SArd Biesheuvel leaf->eax = e->eax; 518a3cbbb47SArd Biesheuvel leaf->ebx = e->ebx; 519a3cbbb47SArd Biesheuvel leaf->ecx = e->ecx; 520a3cbbb47SArd Biesheuvel leaf->edx = e->edx; 521a3cbbb47SArd Biesheuvel 522a3cbbb47SArd Biesheuvel return true; 523a3cbbb47SArd Biesheuvel } 524a3cbbb47SArd Biesheuvel 525a3cbbb47SArd Biesheuvel return false; 526a3cbbb47SArd Biesheuvel } 527a3cbbb47SArd Biesheuvel 528a3cbbb47SArd Biesheuvel static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) 529a3cbbb47SArd Biesheuvel { 530a3cbbb47SArd Biesheuvel if (sev_cpuid_hv(ghcb, ctxt, leaf)) 531a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); 532a3cbbb47SArd Biesheuvel } 533a3cbbb47SArd Biesheuvel 534a3cbbb47SArd Biesheuvel static int __head 535a3cbbb47SArd Biesheuvel snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt, 536a3cbbb47SArd Biesheuvel struct cpuid_leaf *leaf) 537a3cbbb47SArd Biesheuvel { 538a3cbbb47SArd Biesheuvel struct cpuid_leaf leaf_hv = *leaf; 539a3cbbb47SArd Biesheuvel 540a3cbbb47SArd Biesheuvel switch (leaf->fn) { 541a3cbbb47SArd Biesheuvel case 0x1: 542a3cbbb47SArd Biesheuvel snp_cpuid_hv(ghcb, ctxt, &leaf_hv); 543a3cbbb47SArd Biesheuvel 544a3cbbb47SArd Biesheuvel /* initial APIC ID */ 545a3cbbb47SArd Biesheuvel leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); 546a3cbbb47SArd Biesheuvel /* APIC enabled bit */ 547a3cbbb47SArd Biesheuvel leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); 548a3cbbb47SArd Biesheuvel 549a3cbbb47SArd Biesheuvel /* OSXSAVE enabled bit */ 550a3cbbb47SArd Biesheuvel if (native_read_cr4() & X86_CR4_OSXSAVE) 551a3cbbb47SArd Biesheuvel leaf->ecx |= BIT(27); 552a3cbbb47SArd Biesheuvel break; 553a3cbbb47SArd Biesheuvel case 0x7: 554a3cbbb47SArd Biesheuvel /* OSPKE enabled bit */ 555a3cbbb47SArd Biesheuvel leaf->ecx &= ~BIT(4); 556a3cbbb47SArd Biesheuvel if (native_read_cr4() & X86_CR4_PKE) 557a3cbbb47SArd Biesheuvel leaf->ecx |= BIT(4); 558a3cbbb47SArd Biesheuvel break; 559a3cbbb47SArd Biesheuvel case 0xB: 560a3cbbb47SArd Biesheuvel leaf_hv.subfn = 0; 561a3cbbb47SArd Biesheuvel snp_cpuid_hv(ghcb, ctxt, &leaf_hv); 562a3cbbb47SArd Biesheuvel 563a3cbbb47SArd Biesheuvel /* extended APIC ID */ 564a3cbbb47SArd Biesheuvel leaf->edx = leaf_hv.edx; 565a3cbbb47SArd Biesheuvel break; 566a3cbbb47SArd Biesheuvel case 0xD: { 567a3cbbb47SArd Biesheuvel bool compacted = false; 568a3cbbb47SArd Biesheuvel u64 xcr0 = 1, xss = 0; 569a3cbbb47SArd Biesheuvel u32 xsave_size; 570a3cbbb47SArd Biesheuvel 571a3cbbb47SArd Biesheuvel if (leaf->subfn != 0 && leaf->subfn != 1) 572a3cbbb47SArd Biesheuvel return 0; 573a3cbbb47SArd Biesheuvel 574a3cbbb47SArd Biesheuvel if (native_read_cr4() & X86_CR4_OSXSAVE) 575a3cbbb47SArd Biesheuvel xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 576a3cbbb47SArd Biesheuvel if (leaf->subfn == 1) { 577a3cbbb47SArd Biesheuvel /* Get XSS value if XSAVES is enabled. */ 578a3cbbb47SArd Biesheuvel if (leaf->eax & BIT(3)) { 579a3cbbb47SArd Biesheuvel unsigned long lo, hi; 580a3cbbb47SArd Biesheuvel 581a3cbbb47SArd Biesheuvel asm volatile("rdmsr" : "=a" (lo), "=d" (hi) 582a3cbbb47SArd Biesheuvel : "c" (MSR_IA32_XSS)); 583a3cbbb47SArd Biesheuvel xss = (hi << 32) | lo; 584a3cbbb47SArd Biesheuvel } 585a3cbbb47SArd Biesheuvel 586a3cbbb47SArd Biesheuvel /* 587a3cbbb47SArd Biesheuvel * The PPR and APM aren't clear on what size should be 588a3cbbb47SArd Biesheuvel * encoded in 0xD:0x1:EBX when compaction is not enabled 589a3cbbb47SArd Biesheuvel * by either XSAVEC (feature bit 1) or XSAVES (feature 590a3cbbb47SArd Biesheuvel * bit 3) since SNP-capable hardware has these feature 591a3cbbb47SArd Biesheuvel * bits fixed as 1. KVM sets it to 0 in this case, but 592a3cbbb47SArd Biesheuvel * to avoid this becoming an issue it's safer to simply 593a3cbbb47SArd Biesheuvel * treat this as unsupported for SNP guests. 594a3cbbb47SArd Biesheuvel */ 595a3cbbb47SArd Biesheuvel if (!(leaf->eax & (BIT(1) | BIT(3)))) 596a3cbbb47SArd Biesheuvel return -EINVAL; 597a3cbbb47SArd Biesheuvel 598a3cbbb47SArd Biesheuvel compacted = true; 599a3cbbb47SArd Biesheuvel } 600a3cbbb47SArd Biesheuvel 601a3cbbb47SArd Biesheuvel xsave_size = snp_cpuid_calc_xsave_size(xcr0 | xss, compacted); 602a3cbbb47SArd Biesheuvel if (!xsave_size) 603a3cbbb47SArd Biesheuvel return -EINVAL; 604a3cbbb47SArd Biesheuvel 605a3cbbb47SArd Biesheuvel leaf->ebx = xsave_size; 606a3cbbb47SArd Biesheuvel } 607a3cbbb47SArd Biesheuvel break; 608a3cbbb47SArd Biesheuvel case 0x8000001E: 609a3cbbb47SArd Biesheuvel snp_cpuid_hv(ghcb, ctxt, &leaf_hv); 610a3cbbb47SArd Biesheuvel 611a3cbbb47SArd Biesheuvel /* extended APIC ID */ 612a3cbbb47SArd Biesheuvel leaf->eax = leaf_hv.eax; 613a3cbbb47SArd Biesheuvel /* compute ID */ 614a3cbbb47SArd Biesheuvel leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0)); 615a3cbbb47SArd Biesheuvel /* node ID */ 616a3cbbb47SArd Biesheuvel leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0)); 617a3cbbb47SArd Biesheuvel break; 618a3cbbb47SArd Biesheuvel default: 619a3cbbb47SArd Biesheuvel /* No fix-ups needed, use values as-is. */ 620a3cbbb47SArd Biesheuvel break; 621a3cbbb47SArd Biesheuvel } 622a3cbbb47SArd Biesheuvel 623a3cbbb47SArd Biesheuvel return 0; 624a3cbbb47SArd Biesheuvel } 625a3cbbb47SArd Biesheuvel 626a3cbbb47SArd Biesheuvel /* 627a3cbbb47SArd Biesheuvel * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value 628a3cbbb47SArd Biesheuvel * should be treated as fatal by caller. 629a3cbbb47SArd Biesheuvel */ 630a3cbbb47SArd Biesheuvel static int __head 631a3cbbb47SArd Biesheuvel snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) 632a3cbbb47SArd Biesheuvel { 633a3cbbb47SArd Biesheuvel const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 634a3cbbb47SArd Biesheuvel 635a3cbbb47SArd Biesheuvel if (!cpuid_table->count) 636a3cbbb47SArd Biesheuvel return -EOPNOTSUPP; 637a3cbbb47SArd Biesheuvel 638a3cbbb47SArd Biesheuvel if (!snp_cpuid_get_validated_func(leaf)) { 639a3cbbb47SArd Biesheuvel /* 640a3cbbb47SArd Biesheuvel * Some hypervisors will avoid keeping track of CPUID entries 641a3cbbb47SArd Biesheuvel * where all values are zero, since they can be handled the 642a3cbbb47SArd Biesheuvel * same as out-of-range values (all-zero). This is useful here 643a3cbbb47SArd Biesheuvel * as well as it allows virtually all guest configurations to 644a3cbbb47SArd Biesheuvel * work using a single SNP CPUID table. 645a3cbbb47SArd Biesheuvel * 646a3cbbb47SArd Biesheuvel * To allow for this, there is a need to distinguish between 647a3cbbb47SArd Biesheuvel * out-of-range entries and in-range zero entries, since the 648a3cbbb47SArd Biesheuvel * CPUID table entries are only a template that may need to be 649a3cbbb47SArd Biesheuvel * augmented with additional values for things like 650a3cbbb47SArd Biesheuvel * CPU-specific information during post-processing. So if it's 651a3cbbb47SArd Biesheuvel * not in the table, set the values to zero. Then, if they are 652a3cbbb47SArd Biesheuvel * within a valid CPUID range, proceed with post-processing 653a3cbbb47SArd Biesheuvel * using zeros as the initial values. Otherwise, skip 654a3cbbb47SArd Biesheuvel * post-processing and just return zeros immediately. 655a3cbbb47SArd Biesheuvel */ 656a3cbbb47SArd Biesheuvel leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; 657a3cbbb47SArd Biesheuvel 658a3cbbb47SArd Biesheuvel /* Skip post-processing for out-of-range zero leafs. */ 659*681e2901SArd Biesheuvel if (!(leaf->fn <= cpuid_std_range_max || 660*681e2901SArd Biesheuvel (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || 661*681e2901SArd Biesheuvel (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) 662a3cbbb47SArd Biesheuvel return 0; 663a3cbbb47SArd Biesheuvel } 664a3cbbb47SArd Biesheuvel 665a3cbbb47SArd Biesheuvel return snp_cpuid_postprocess(ghcb, ctxt, leaf); 666a3cbbb47SArd Biesheuvel } 667a3cbbb47SArd Biesheuvel 668a3cbbb47SArd Biesheuvel /* 669a3cbbb47SArd Biesheuvel * Boot VC Handler - This is the first VC handler during boot, there is no GHCB 670a3cbbb47SArd Biesheuvel * page yet, so it only supports the MSR based communication with the 671a3cbbb47SArd Biesheuvel * hypervisor and only the CPUID exit-code. 672a3cbbb47SArd Biesheuvel */ 673a3cbbb47SArd Biesheuvel void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) 674a3cbbb47SArd Biesheuvel { 675a3cbbb47SArd Biesheuvel unsigned int subfn = lower_bits(regs->cx, 32); 676a3cbbb47SArd Biesheuvel unsigned int fn = lower_bits(regs->ax, 32); 677a3cbbb47SArd Biesheuvel u16 opcode = *(unsigned short *)regs->ip; 678a3cbbb47SArd Biesheuvel struct cpuid_leaf leaf; 679a3cbbb47SArd Biesheuvel int ret; 680a3cbbb47SArd Biesheuvel 681a3cbbb47SArd Biesheuvel /* Only CPUID is supported via MSR protocol */ 682a3cbbb47SArd Biesheuvel if (exit_code != SVM_EXIT_CPUID) 683a3cbbb47SArd Biesheuvel goto fail; 684a3cbbb47SArd Biesheuvel 685a3cbbb47SArd Biesheuvel /* Is it really a CPUID insn? */ 686a3cbbb47SArd Biesheuvel if (opcode != 0xa20f) 687a3cbbb47SArd Biesheuvel goto fail; 688a3cbbb47SArd Biesheuvel 689a3cbbb47SArd Biesheuvel leaf.fn = fn; 690a3cbbb47SArd Biesheuvel leaf.subfn = subfn; 691a3cbbb47SArd Biesheuvel 692a3cbbb47SArd Biesheuvel ret = snp_cpuid(NULL, NULL, &leaf); 693a3cbbb47SArd Biesheuvel if (!ret) 694a3cbbb47SArd Biesheuvel goto cpuid_done; 695a3cbbb47SArd Biesheuvel 696a3cbbb47SArd Biesheuvel if (ret != -EOPNOTSUPP) 697a3cbbb47SArd Biesheuvel goto fail; 698a3cbbb47SArd Biesheuvel 699a3cbbb47SArd Biesheuvel if (__sev_cpuid_hv_msr(&leaf)) 700a3cbbb47SArd Biesheuvel goto fail; 701a3cbbb47SArd Biesheuvel 702a3cbbb47SArd Biesheuvel cpuid_done: 703a3cbbb47SArd Biesheuvel regs->ax = leaf.eax; 704a3cbbb47SArd Biesheuvel regs->bx = leaf.ebx; 705a3cbbb47SArd Biesheuvel regs->cx = leaf.ecx; 706a3cbbb47SArd Biesheuvel regs->dx = leaf.edx; 707a3cbbb47SArd Biesheuvel 708a3cbbb47SArd Biesheuvel /* 709a3cbbb47SArd Biesheuvel * This is a VC handler and the #VC is only raised when SEV-ES is 710a3cbbb47SArd Biesheuvel * active, which means SEV must be active too. Do sanity checks on the 711a3cbbb47SArd Biesheuvel * CPUID results to make sure the hypervisor does not trick the kernel 712a3cbbb47SArd Biesheuvel * into the no-sev path. This could map sensitive data unencrypted and 713a3cbbb47SArd Biesheuvel * make it accessible to the hypervisor. 714a3cbbb47SArd Biesheuvel * 715a3cbbb47SArd Biesheuvel * In particular, check for: 716a3cbbb47SArd Biesheuvel * - Availability of CPUID leaf 0x8000001f 717a3cbbb47SArd Biesheuvel * - SEV CPUID bit. 718a3cbbb47SArd Biesheuvel * 719a3cbbb47SArd Biesheuvel * The hypervisor might still report the wrong C-bit position, but this 720a3cbbb47SArd Biesheuvel * can't be checked here. 721a3cbbb47SArd Biesheuvel */ 722a3cbbb47SArd Biesheuvel 723a3cbbb47SArd Biesheuvel if (fn == 0x80000000 && (regs->ax < 0x8000001f)) 724a3cbbb47SArd Biesheuvel /* SEV leaf check */ 725a3cbbb47SArd Biesheuvel goto fail; 726a3cbbb47SArd Biesheuvel else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) 727a3cbbb47SArd Biesheuvel /* SEV bit */ 728a3cbbb47SArd Biesheuvel goto fail; 729a3cbbb47SArd Biesheuvel 730a3cbbb47SArd Biesheuvel /* Skip over the CPUID two-byte opcode */ 731a3cbbb47SArd Biesheuvel regs->ip += 2; 732a3cbbb47SArd Biesheuvel 733a3cbbb47SArd Biesheuvel return; 734a3cbbb47SArd Biesheuvel 735a3cbbb47SArd Biesheuvel fail: 736a3cbbb47SArd Biesheuvel /* Terminate the guest */ 737a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 738a3cbbb47SArd Biesheuvel } 739a3cbbb47SArd Biesheuvel 740a3cbbb47SArd Biesheuvel static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt, 741a3cbbb47SArd Biesheuvel unsigned long address, 742a3cbbb47SArd Biesheuvel bool write) 743a3cbbb47SArd Biesheuvel { 744a3cbbb47SArd Biesheuvel if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { 745a3cbbb47SArd Biesheuvel ctxt->fi.vector = X86_TRAP_PF; 746a3cbbb47SArd Biesheuvel ctxt->fi.error_code = X86_PF_USER; 747a3cbbb47SArd Biesheuvel ctxt->fi.cr2 = address; 748a3cbbb47SArd Biesheuvel if (write) 749a3cbbb47SArd Biesheuvel ctxt->fi.error_code |= X86_PF_WRITE; 750a3cbbb47SArd Biesheuvel 751a3cbbb47SArd Biesheuvel return ES_EXCEPTION; 752a3cbbb47SArd Biesheuvel } 753a3cbbb47SArd Biesheuvel 754a3cbbb47SArd Biesheuvel return ES_OK; 755a3cbbb47SArd Biesheuvel } 756a3cbbb47SArd Biesheuvel 757a3cbbb47SArd Biesheuvel static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, 758a3cbbb47SArd Biesheuvel void *src, char *buf, 759a3cbbb47SArd Biesheuvel unsigned int data_size, 760a3cbbb47SArd Biesheuvel unsigned int count, 761a3cbbb47SArd Biesheuvel bool backwards) 762a3cbbb47SArd Biesheuvel { 763a3cbbb47SArd Biesheuvel int i, b = backwards ? -1 : 1; 764a3cbbb47SArd Biesheuvel unsigned long address = (unsigned long)src; 765a3cbbb47SArd Biesheuvel enum es_result ret; 766a3cbbb47SArd Biesheuvel 767a3cbbb47SArd Biesheuvel ret = vc_insn_string_check(ctxt, address, false); 768a3cbbb47SArd Biesheuvel if (ret != ES_OK) 769a3cbbb47SArd Biesheuvel return ret; 770a3cbbb47SArd Biesheuvel 771a3cbbb47SArd Biesheuvel for (i = 0; i < count; i++) { 772a3cbbb47SArd Biesheuvel void *s = src + (i * data_size * b); 773a3cbbb47SArd Biesheuvel char *d = buf + (i * data_size); 774a3cbbb47SArd Biesheuvel 775a3cbbb47SArd Biesheuvel ret = vc_read_mem(ctxt, s, d, data_size); 776a3cbbb47SArd Biesheuvel if (ret != ES_OK) 777a3cbbb47SArd Biesheuvel break; 778a3cbbb47SArd Biesheuvel } 779a3cbbb47SArd Biesheuvel 780a3cbbb47SArd Biesheuvel return ret; 781a3cbbb47SArd Biesheuvel } 782a3cbbb47SArd Biesheuvel 783a3cbbb47SArd Biesheuvel static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, 784a3cbbb47SArd Biesheuvel void *dst, char *buf, 785a3cbbb47SArd Biesheuvel unsigned int data_size, 786a3cbbb47SArd Biesheuvel unsigned int count, 787a3cbbb47SArd Biesheuvel bool backwards) 788a3cbbb47SArd Biesheuvel { 789a3cbbb47SArd Biesheuvel int i, s = backwards ? -1 : 1; 790a3cbbb47SArd Biesheuvel unsigned long address = (unsigned long)dst; 791a3cbbb47SArd Biesheuvel enum es_result ret; 792a3cbbb47SArd Biesheuvel 793a3cbbb47SArd Biesheuvel ret = vc_insn_string_check(ctxt, address, true); 794a3cbbb47SArd Biesheuvel if (ret != ES_OK) 795a3cbbb47SArd Biesheuvel return ret; 796a3cbbb47SArd Biesheuvel 797a3cbbb47SArd Biesheuvel for (i = 0; i < count; i++) { 798a3cbbb47SArd Biesheuvel void *d = dst + (i * data_size * s); 799a3cbbb47SArd Biesheuvel char *b = buf + (i * data_size); 800a3cbbb47SArd Biesheuvel 801a3cbbb47SArd Biesheuvel ret = vc_write_mem(ctxt, d, b, data_size); 802a3cbbb47SArd Biesheuvel if (ret != ES_OK) 803a3cbbb47SArd Biesheuvel break; 804a3cbbb47SArd Biesheuvel } 805a3cbbb47SArd Biesheuvel 806a3cbbb47SArd Biesheuvel return ret; 807a3cbbb47SArd Biesheuvel } 808a3cbbb47SArd Biesheuvel 809a3cbbb47SArd Biesheuvel #define IOIO_TYPE_STR BIT(2) 810a3cbbb47SArd Biesheuvel #define IOIO_TYPE_IN 1 811a3cbbb47SArd Biesheuvel #define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) 812a3cbbb47SArd Biesheuvel #define IOIO_TYPE_OUT 0 813a3cbbb47SArd Biesheuvel #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) 814a3cbbb47SArd Biesheuvel 815a3cbbb47SArd Biesheuvel #define IOIO_REP BIT(3) 816a3cbbb47SArd Biesheuvel 817a3cbbb47SArd Biesheuvel #define IOIO_ADDR_64 BIT(9) 818a3cbbb47SArd Biesheuvel #define IOIO_ADDR_32 BIT(8) 819a3cbbb47SArd Biesheuvel #define IOIO_ADDR_16 BIT(7) 820a3cbbb47SArd Biesheuvel 821a3cbbb47SArd Biesheuvel #define IOIO_DATA_32 BIT(6) 822a3cbbb47SArd Biesheuvel #define IOIO_DATA_16 BIT(5) 823a3cbbb47SArd Biesheuvel #define IOIO_DATA_8 BIT(4) 824a3cbbb47SArd Biesheuvel 825a3cbbb47SArd Biesheuvel #define IOIO_SEG_ES (0 << 10) 826a3cbbb47SArd Biesheuvel #define IOIO_SEG_DS (3 << 10) 827a3cbbb47SArd Biesheuvel 828a3cbbb47SArd Biesheuvel static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) 829a3cbbb47SArd Biesheuvel { 830a3cbbb47SArd Biesheuvel struct insn *insn = &ctxt->insn; 831a3cbbb47SArd Biesheuvel size_t size; 832a3cbbb47SArd Biesheuvel u64 port; 833a3cbbb47SArd Biesheuvel 834a3cbbb47SArd Biesheuvel *exitinfo = 0; 835a3cbbb47SArd Biesheuvel 836a3cbbb47SArd Biesheuvel switch (insn->opcode.bytes[0]) { 837a3cbbb47SArd Biesheuvel /* INS opcodes */ 838a3cbbb47SArd Biesheuvel case 0x6c: 839a3cbbb47SArd Biesheuvel case 0x6d: 840a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_INS; 841a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_SEG_ES; 842a3cbbb47SArd Biesheuvel port = ctxt->regs->dx & 0xffff; 843a3cbbb47SArd Biesheuvel break; 844a3cbbb47SArd Biesheuvel 845a3cbbb47SArd Biesheuvel /* OUTS opcodes */ 846a3cbbb47SArd Biesheuvel case 0x6e: 847a3cbbb47SArd Biesheuvel case 0x6f: 848a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_OUTS; 849a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_SEG_DS; 850a3cbbb47SArd Biesheuvel port = ctxt->regs->dx & 0xffff; 851a3cbbb47SArd Biesheuvel break; 852a3cbbb47SArd Biesheuvel 853a3cbbb47SArd Biesheuvel /* IN immediate opcodes */ 854a3cbbb47SArd Biesheuvel case 0xe4: 855a3cbbb47SArd Biesheuvel case 0xe5: 856a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_IN; 857a3cbbb47SArd Biesheuvel port = (u8)insn->immediate.value & 0xffff; 858a3cbbb47SArd Biesheuvel break; 859a3cbbb47SArd Biesheuvel 860a3cbbb47SArd Biesheuvel /* OUT immediate opcodes */ 861a3cbbb47SArd Biesheuvel case 0xe6: 862a3cbbb47SArd Biesheuvel case 0xe7: 863a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_OUT; 864a3cbbb47SArd Biesheuvel port = (u8)insn->immediate.value & 0xffff; 865a3cbbb47SArd Biesheuvel break; 866a3cbbb47SArd Biesheuvel 867a3cbbb47SArd Biesheuvel /* IN register opcodes */ 868a3cbbb47SArd Biesheuvel case 0xec: 869a3cbbb47SArd Biesheuvel case 0xed: 870a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_IN; 871a3cbbb47SArd Biesheuvel port = ctxt->regs->dx & 0xffff; 872a3cbbb47SArd Biesheuvel break; 873a3cbbb47SArd Biesheuvel 874a3cbbb47SArd Biesheuvel /* OUT register opcodes */ 875a3cbbb47SArd Biesheuvel case 0xee: 876a3cbbb47SArd Biesheuvel case 0xef: 877a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_TYPE_OUT; 878a3cbbb47SArd Biesheuvel port = ctxt->regs->dx & 0xffff; 879a3cbbb47SArd Biesheuvel break; 880a3cbbb47SArd Biesheuvel 881a3cbbb47SArd Biesheuvel default: 882a3cbbb47SArd Biesheuvel return ES_DECODE_FAILED; 883a3cbbb47SArd Biesheuvel } 884a3cbbb47SArd Biesheuvel 885a3cbbb47SArd Biesheuvel *exitinfo |= port << 16; 886a3cbbb47SArd Biesheuvel 887a3cbbb47SArd Biesheuvel switch (insn->opcode.bytes[0]) { 888a3cbbb47SArd Biesheuvel case 0x6c: 889a3cbbb47SArd Biesheuvel case 0x6e: 890a3cbbb47SArd Biesheuvel case 0xe4: 891a3cbbb47SArd Biesheuvel case 0xe6: 892a3cbbb47SArd Biesheuvel case 0xec: 893a3cbbb47SArd Biesheuvel case 0xee: 894a3cbbb47SArd Biesheuvel /* Single byte opcodes */ 895a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_DATA_8; 896a3cbbb47SArd Biesheuvel size = 1; 897a3cbbb47SArd Biesheuvel break; 898a3cbbb47SArd Biesheuvel default: 899a3cbbb47SArd Biesheuvel /* Length determined by instruction parsing */ 900a3cbbb47SArd Biesheuvel *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 901a3cbbb47SArd Biesheuvel : IOIO_DATA_32; 902a3cbbb47SArd Biesheuvel size = (insn->opnd_bytes == 2) ? 2 : 4; 903a3cbbb47SArd Biesheuvel } 904a3cbbb47SArd Biesheuvel 905a3cbbb47SArd Biesheuvel switch (insn->addr_bytes) { 906a3cbbb47SArd Biesheuvel case 2: 907a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_ADDR_16; 908a3cbbb47SArd Biesheuvel break; 909a3cbbb47SArd Biesheuvel case 4: 910a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_ADDR_32; 911a3cbbb47SArd Biesheuvel break; 912a3cbbb47SArd Biesheuvel case 8: 913a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_ADDR_64; 914a3cbbb47SArd Biesheuvel break; 915a3cbbb47SArd Biesheuvel } 916a3cbbb47SArd Biesheuvel 917a3cbbb47SArd Biesheuvel if (insn_has_rep_prefix(insn)) 918a3cbbb47SArd Biesheuvel *exitinfo |= IOIO_REP; 919a3cbbb47SArd Biesheuvel 920a3cbbb47SArd Biesheuvel return vc_ioio_check(ctxt, (u16)port, size); 921a3cbbb47SArd Biesheuvel } 922a3cbbb47SArd Biesheuvel 923a3cbbb47SArd Biesheuvel static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 924a3cbbb47SArd Biesheuvel { 925a3cbbb47SArd Biesheuvel struct pt_regs *regs = ctxt->regs; 926a3cbbb47SArd Biesheuvel u64 exit_info_1, exit_info_2; 927a3cbbb47SArd Biesheuvel enum es_result ret; 928a3cbbb47SArd Biesheuvel 929a3cbbb47SArd Biesheuvel ret = vc_ioio_exitinfo(ctxt, &exit_info_1); 930a3cbbb47SArd Biesheuvel if (ret != ES_OK) 931a3cbbb47SArd Biesheuvel return ret; 932a3cbbb47SArd Biesheuvel 933a3cbbb47SArd Biesheuvel if (exit_info_1 & IOIO_TYPE_STR) { 934a3cbbb47SArd Biesheuvel 935a3cbbb47SArd Biesheuvel /* (REP) INS/OUTS */ 936a3cbbb47SArd Biesheuvel 937a3cbbb47SArd Biesheuvel bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); 938a3cbbb47SArd Biesheuvel unsigned int io_bytes, exit_bytes; 939a3cbbb47SArd Biesheuvel unsigned int ghcb_count, op_count; 940a3cbbb47SArd Biesheuvel unsigned long es_base; 941a3cbbb47SArd Biesheuvel u64 sw_scratch; 942a3cbbb47SArd Biesheuvel 943a3cbbb47SArd Biesheuvel /* 944a3cbbb47SArd Biesheuvel * For the string variants with rep prefix the amount of in/out 945a3cbbb47SArd Biesheuvel * operations per #VC exception is limited so that the kernel 946a3cbbb47SArd Biesheuvel * has a chance to take interrupts and re-schedule while the 947a3cbbb47SArd Biesheuvel * instruction is emulated. 948a3cbbb47SArd Biesheuvel */ 949a3cbbb47SArd Biesheuvel io_bytes = (exit_info_1 >> 4) & 0x7; 950a3cbbb47SArd Biesheuvel ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; 951a3cbbb47SArd Biesheuvel 952a3cbbb47SArd Biesheuvel op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; 953a3cbbb47SArd Biesheuvel exit_info_2 = min(op_count, ghcb_count); 954a3cbbb47SArd Biesheuvel exit_bytes = exit_info_2 * io_bytes; 955a3cbbb47SArd Biesheuvel 956a3cbbb47SArd Biesheuvel es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); 957a3cbbb47SArd Biesheuvel 958a3cbbb47SArd Biesheuvel /* Read bytes of OUTS into the shared buffer */ 959a3cbbb47SArd Biesheuvel if (!(exit_info_1 & IOIO_TYPE_IN)) { 960a3cbbb47SArd Biesheuvel ret = vc_insn_string_read(ctxt, 961a3cbbb47SArd Biesheuvel (void *)(es_base + regs->si), 962a3cbbb47SArd Biesheuvel ghcb->shared_buffer, io_bytes, 963a3cbbb47SArd Biesheuvel exit_info_2, df); 964a3cbbb47SArd Biesheuvel if (ret) 965a3cbbb47SArd Biesheuvel return ret; 966a3cbbb47SArd Biesheuvel } 967a3cbbb47SArd Biesheuvel 968a3cbbb47SArd Biesheuvel /* 969a3cbbb47SArd Biesheuvel * Issue an VMGEXIT to the HV to consume the bytes from the 970a3cbbb47SArd Biesheuvel * shared buffer or to have it write them into the shared buffer 971a3cbbb47SArd Biesheuvel * depending on the instruction: OUTS or INS. 972a3cbbb47SArd Biesheuvel */ 973a3cbbb47SArd Biesheuvel sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 974a3cbbb47SArd Biesheuvel ghcb_set_sw_scratch(ghcb, sw_scratch); 975a3cbbb47SArd Biesheuvel ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, 976a3cbbb47SArd Biesheuvel exit_info_1, exit_info_2); 977a3cbbb47SArd Biesheuvel if (ret != ES_OK) 978a3cbbb47SArd Biesheuvel return ret; 979a3cbbb47SArd Biesheuvel 980a3cbbb47SArd Biesheuvel /* Read bytes from shared buffer into the guest's destination. */ 981a3cbbb47SArd Biesheuvel if (exit_info_1 & IOIO_TYPE_IN) { 982a3cbbb47SArd Biesheuvel ret = vc_insn_string_write(ctxt, 983a3cbbb47SArd Biesheuvel (void *)(es_base + regs->di), 984a3cbbb47SArd Biesheuvel ghcb->shared_buffer, io_bytes, 985a3cbbb47SArd Biesheuvel exit_info_2, df); 986a3cbbb47SArd Biesheuvel if (ret) 987a3cbbb47SArd Biesheuvel return ret; 988a3cbbb47SArd Biesheuvel 989a3cbbb47SArd Biesheuvel if (df) 990a3cbbb47SArd Biesheuvel regs->di -= exit_bytes; 991a3cbbb47SArd Biesheuvel else 992a3cbbb47SArd Biesheuvel regs->di += exit_bytes; 993a3cbbb47SArd Biesheuvel } else { 994a3cbbb47SArd Biesheuvel if (df) 995a3cbbb47SArd Biesheuvel regs->si -= exit_bytes; 996a3cbbb47SArd Biesheuvel else 997a3cbbb47SArd Biesheuvel regs->si += exit_bytes; 998a3cbbb47SArd Biesheuvel } 999a3cbbb47SArd Biesheuvel 1000a3cbbb47SArd Biesheuvel if (exit_info_1 & IOIO_REP) 1001a3cbbb47SArd Biesheuvel regs->cx -= exit_info_2; 1002a3cbbb47SArd Biesheuvel 1003a3cbbb47SArd Biesheuvel ret = regs->cx ? ES_RETRY : ES_OK; 1004a3cbbb47SArd Biesheuvel 1005a3cbbb47SArd Biesheuvel } else { 1006a3cbbb47SArd Biesheuvel 1007a3cbbb47SArd Biesheuvel /* IN/OUT into/from rAX */ 1008a3cbbb47SArd Biesheuvel 1009a3cbbb47SArd Biesheuvel int bits = (exit_info_1 & 0x70) >> 1; 1010a3cbbb47SArd Biesheuvel u64 rax = 0; 1011a3cbbb47SArd Biesheuvel 1012a3cbbb47SArd Biesheuvel if (!(exit_info_1 & IOIO_TYPE_IN)) 1013a3cbbb47SArd Biesheuvel rax = lower_bits(regs->ax, bits); 1014a3cbbb47SArd Biesheuvel 1015a3cbbb47SArd Biesheuvel ghcb_set_rax(ghcb, rax); 1016a3cbbb47SArd Biesheuvel 1017a3cbbb47SArd Biesheuvel ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); 1018a3cbbb47SArd Biesheuvel if (ret != ES_OK) 1019a3cbbb47SArd Biesheuvel return ret; 1020a3cbbb47SArd Biesheuvel 1021a3cbbb47SArd Biesheuvel if (exit_info_1 & IOIO_TYPE_IN) { 1022a3cbbb47SArd Biesheuvel if (!ghcb_rax_is_valid(ghcb)) 1023a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 1024a3cbbb47SArd Biesheuvel regs->ax = lower_bits(ghcb->save.rax, bits); 1025a3cbbb47SArd Biesheuvel } 1026a3cbbb47SArd Biesheuvel } 1027a3cbbb47SArd Biesheuvel 1028a3cbbb47SArd Biesheuvel return ret; 1029a3cbbb47SArd Biesheuvel } 1030a3cbbb47SArd Biesheuvel 1031a3cbbb47SArd Biesheuvel static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 1032a3cbbb47SArd Biesheuvel { 1033a3cbbb47SArd Biesheuvel struct pt_regs *regs = ctxt->regs; 1034a3cbbb47SArd Biesheuvel struct cpuid_leaf leaf; 1035a3cbbb47SArd Biesheuvel int ret; 1036a3cbbb47SArd Biesheuvel 1037a3cbbb47SArd Biesheuvel leaf.fn = regs->ax; 1038a3cbbb47SArd Biesheuvel leaf.subfn = regs->cx; 1039a3cbbb47SArd Biesheuvel ret = snp_cpuid(ghcb, ctxt, &leaf); 1040a3cbbb47SArd Biesheuvel if (!ret) { 1041a3cbbb47SArd Biesheuvel regs->ax = leaf.eax; 1042a3cbbb47SArd Biesheuvel regs->bx = leaf.ebx; 1043a3cbbb47SArd Biesheuvel regs->cx = leaf.ecx; 1044a3cbbb47SArd Biesheuvel regs->dx = leaf.edx; 1045a3cbbb47SArd Biesheuvel } 1046a3cbbb47SArd Biesheuvel 1047a3cbbb47SArd Biesheuvel return ret; 1048a3cbbb47SArd Biesheuvel } 1049a3cbbb47SArd Biesheuvel 1050a3cbbb47SArd Biesheuvel static enum es_result vc_handle_cpuid(struct ghcb *ghcb, 1051a3cbbb47SArd Biesheuvel struct es_em_ctxt *ctxt) 1052a3cbbb47SArd Biesheuvel { 1053a3cbbb47SArd Biesheuvel struct pt_regs *regs = ctxt->regs; 1054a3cbbb47SArd Biesheuvel u32 cr4 = native_read_cr4(); 1055a3cbbb47SArd Biesheuvel enum es_result ret; 1056a3cbbb47SArd Biesheuvel int snp_cpuid_ret; 1057a3cbbb47SArd Biesheuvel 1058a3cbbb47SArd Biesheuvel snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt); 1059a3cbbb47SArd Biesheuvel if (!snp_cpuid_ret) 1060a3cbbb47SArd Biesheuvel return ES_OK; 1061a3cbbb47SArd Biesheuvel if (snp_cpuid_ret != -EOPNOTSUPP) 1062a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 1063a3cbbb47SArd Biesheuvel 1064a3cbbb47SArd Biesheuvel ghcb_set_rax(ghcb, regs->ax); 1065a3cbbb47SArd Biesheuvel ghcb_set_rcx(ghcb, regs->cx); 1066a3cbbb47SArd Biesheuvel 1067a3cbbb47SArd Biesheuvel if (cr4 & X86_CR4_OSXSAVE) 1068a3cbbb47SArd Biesheuvel /* Safe to read xcr0 */ 1069a3cbbb47SArd Biesheuvel ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); 1070a3cbbb47SArd Biesheuvel else 1071a3cbbb47SArd Biesheuvel /* xgetbv will cause #GP - use reset value for xcr0 */ 1072a3cbbb47SArd Biesheuvel ghcb_set_xcr0(ghcb, 1); 1073a3cbbb47SArd Biesheuvel 1074a3cbbb47SArd Biesheuvel ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 1075a3cbbb47SArd Biesheuvel if (ret != ES_OK) 1076a3cbbb47SArd Biesheuvel return ret; 1077a3cbbb47SArd Biesheuvel 1078a3cbbb47SArd Biesheuvel if (!(ghcb_rax_is_valid(ghcb) && 1079a3cbbb47SArd Biesheuvel ghcb_rbx_is_valid(ghcb) && 1080a3cbbb47SArd Biesheuvel ghcb_rcx_is_valid(ghcb) && 1081a3cbbb47SArd Biesheuvel ghcb_rdx_is_valid(ghcb))) 1082a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 1083a3cbbb47SArd Biesheuvel 1084a3cbbb47SArd Biesheuvel regs->ax = ghcb->save.rax; 1085a3cbbb47SArd Biesheuvel regs->bx = ghcb->save.rbx; 1086a3cbbb47SArd Biesheuvel regs->cx = ghcb->save.rcx; 1087a3cbbb47SArd Biesheuvel regs->dx = ghcb->save.rdx; 1088a3cbbb47SArd Biesheuvel 1089a3cbbb47SArd Biesheuvel return ES_OK; 1090a3cbbb47SArd Biesheuvel } 1091a3cbbb47SArd Biesheuvel 1092a3cbbb47SArd Biesheuvel static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, 1093a3cbbb47SArd Biesheuvel struct es_em_ctxt *ctxt, 1094a3cbbb47SArd Biesheuvel unsigned long exit_code) 1095a3cbbb47SArd Biesheuvel { 1096a3cbbb47SArd Biesheuvel bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 1097a3cbbb47SArd Biesheuvel enum es_result ret; 1098a3cbbb47SArd Biesheuvel 1099a3cbbb47SArd Biesheuvel /* 1100a3cbbb47SArd Biesheuvel * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure 1101a3cbbb47SArd Biesheuvel * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP 1102a3cbbb47SArd Biesheuvel * instructions are being intercepted. If this should occur and Secure 1103a3cbbb47SArd Biesheuvel * TSC is enabled, guest execution should be terminated as the guest 1104a3cbbb47SArd Biesheuvel * cannot rely on the TSC value provided by the hypervisor. 1105a3cbbb47SArd Biesheuvel */ 1106a3cbbb47SArd Biesheuvel if (sev_status & MSR_AMD64_SNP_SECURE_TSC) 1107a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 1108a3cbbb47SArd Biesheuvel 1109a3cbbb47SArd Biesheuvel ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); 1110a3cbbb47SArd Biesheuvel if (ret != ES_OK) 1111a3cbbb47SArd Biesheuvel return ret; 1112a3cbbb47SArd Biesheuvel 1113a3cbbb47SArd Biesheuvel if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && 1114a3cbbb47SArd Biesheuvel (!rdtscp || ghcb_rcx_is_valid(ghcb)))) 1115a3cbbb47SArd Biesheuvel return ES_VMM_ERROR; 1116a3cbbb47SArd Biesheuvel 1117a3cbbb47SArd Biesheuvel ctxt->regs->ax = ghcb->save.rax; 1118a3cbbb47SArd Biesheuvel ctxt->regs->dx = ghcb->save.rdx; 1119a3cbbb47SArd Biesheuvel if (rdtscp) 1120a3cbbb47SArd Biesheuvel ctxt->regs->cx = ghcb->save.rcx; 1121a3cbbb47SArd Biesheuvel 1122a3cbbb47SArd Biesheuvel return ES_OK; 1123a3cbbb47SArd Biesheuvel } 1124a3cbbb47SArd Biesheuvel 1125a3cbbb47SArd Biesheuvel struct cc_setup_data { 1126a3cbbb47SArd Biesheuvel struct setup_data header; 1127a3cbbb47SArd Biesheuvel u32 cc_blob_address; 1128a3cbbb47SArd Biesheuvel }; 1129a3cbbb47SArd Biesheuvel 1130a3cbbb47SArd Biesheuvel /* 1131a3cbbb47SArd Biesheuvel * Search for a Confidential Computing blob passed in as a setup_data entry 1132a3cbbb47SArd Biesheuvel * via the Linux Boot Protocol. 1133a3cbbb47SArd Biesheuvel */ 1134a3cbbb47SArd Biesheuvel static __head 1135a3cbbb47SArd Biesheuvel struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp) 1136a3cbbb47SArd Biesheuvel { 1137a3cbbb47SArd Biesheuvel struct cc_setup_data *sd = NULL; 1138a3cbbb47SArd Biesheuvel struct setup_data *hdr; 1139a3cbbb47SArd Biesheuvel 1140a3cbbb47SArd Biesheuvel hdr = (struct setup_data *)bp->hdr.setup_data; 1141a3cbbb47SArd Biesheuvel 1142a3cbbb47SArd Biesheuvel while (hdr) { 1143a3cbbb47SArd Biesheuvel if (hdr->type == SETUP_CC_BLOB) { 1144a3cbbb47SArd Biesheuvel sd = (struct cc_setup_data *)hdr; 1145a3cbbb47SArd Biesheuvel return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address; 1146a3cbbb47SArd Biesheuvel } 1147a3cbbb47SArd Biesheuvel hdr = (struct setup_data *)hdr->next; 1148a3cbbb47SArd Biesheuvel } 1149a3cbbb47SArd Biesheuvel 1150a3cbbb47SArd Biesheuvel return NULL; 1151a3cbbb47SArd Biesheuvel } 1152a3cbbb47SArd Biesheuvel 1153a3cbbb47SArd Biesheuvel /* 1154a3cbbb47SArd Biesheuvel * Initialize the kernel's copy of the SNP CPUID table, and set up the 1155a3cbbb47SArd Biesheuvel * pointer that will be used to access it. 1156a3cbbb47SArd Biesheuvel * 1157a3cbbb47SArd Biesheuvel * Maintaining a direct mapping of the SNP CPUID table used by firmware would 1158a3cbbb47SArd Biesheuvel * be possible as an alternative, but the approach is brittle since the 1159a3cbbb47SArd Biesheuvel * mapping needs to be updated in sync with all the changes to virtual memory 1160a3cbbb47SArd Biesheuvel * layout and related mapping facilities throughout the boot process. 1161a3cbbb47SArd Biesheuvel */ 1162a3cbbb47SArd Biesheuvel static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info) 1163a3cbbb47SArd Biesheuvel { 1164a3cbbb47SArd Biesheuvel const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table; 1165a3cbbb47SArd Biesheuvel int i; 1166a3cbbb47SArd Biesheuvel 1167a3cbbb47SArd Biesheuvel if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE) 1168a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID); 1169a3cbbb47SArd Biesheuvel 1170a3cbbb47SArd Biesheuvel cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys; 1171a3cbbb47SArd Biesheuvel if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX) 1172a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID); 1173a3cbbb47SArd Biesheuvel 1174a3cbbb47SArd Biesheuvel cpuid_table = snp_cpuid_get_table(); 1175a3cbbb47SArd Biesheuvel memcpy((void *)cpuid_table, cpuid_table_fw, sizeof(*cpuid_table)); 1176a3cbbb47SArd Biesheuvel 1177a3cbbb47SArd Biesheuvel /* Initialize CPUID ranges for range-checking. */ 1178a3cbbb47SArd Biesheuvel for (i = 0; i < cpuid_table->count; i++) { 1179a3cbbb47SArd Biesheuvel const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; 1180a3cbbb47SArd Biesheuvel 1181a3cbbb47SArd Biesheuvel if (fn->eax_in == 0x0) 1182*681e2901SArd Biesheuvel cpuid_std_range_max = fn->eax; 1183a3cbbb47SArd Biesheuvel else if (fn->eax_in == 0x40000000) 1184*681e2901SArd Biesheuvel cpuid_hyp_range_max = fn->eax; 1185a3cbbb47SArd Biesheuvel else if (fn->eax_in == 0x80000000) 1186*681e2901SArd Biesheuvel cpuid_ext_range_max = fn->eax; 1187a3cbbb47SArd Biesheuvel } 1188a3cbbb47SArd Biesheuvel } 1189a3cbbb47SArd Biesheuvel 1190a3cbbb47SArd Biesheuvel static void __head svsm_pval_4k_page(unsigned long paddr, bool validate) 1191a3cbbb47SArd Biesheuvel { 1192a3cbbb47SArd Biesheuvel struct svsm_pvalidate_call *pc; 1193a3cbbb47SArd Biesheuvel struct svsm_call call = {}; 1194a3cbbb47SArd Biesheuvel unsigned long flags; 1195a3cbbb47SArd Biesheuvel u64 pc_pa; 1196a3cbbb47SArd Biesheuvel int ret; 1197a3cbbb47SArd Biesheuvel 1198a3cbbb47SArd Biesheuvel /* 1199a3cbbb47SArd Biesheuvel * This can be called very early in the boot, use native functions in 1200a3cbbb47SArd Biesheuvel * order to avoid paravirt issues. 1201a3cbbb47SArd Biesheuvel */ 1202a3cbbb47SArd Biesheuvel flags = native_local_irq_save(); 1203a3cbbb47SArd Biesheuvel 1204a3cbbb47SArd Biesheuvel call.caa = svsm_get_caa(); 1205a3cbbb47SArd Biesheuvel 1206a3cbbb47SArd Biesheuvel pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; 1207a3cbbb47SArd Biesheuvel pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); 1208a3cbbb47SArd Biesheuvel 1209a3cbbb47SArd Biesheuvel pc->num_entries = 1; 1210a3cbbb47SArd Biesheuvel pc->cur_index = 0; 1211a3cbbb47SArd Biesheuvel pc->entry[0].page_size = RMP_PG_SIZE_4K; 1212a3cbbb47SArd Biesheuvel pc->entry[0].action = validate; 1213a3cbbb47SArd Biesheuvel pc->entry[0].ignore_cf = 0; 1214a3cbbb47SArd Biesheuvel pc->entry[0].pfn = paddr >> PAGE_SHIFT; 1215a3cbbb47SArd Biesheuvel 1216a3cbbb47SArd Biesheuvel /* Protocol 0, Call ID 1 */ 1217a3cbbb47SArd Biesheuvel call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); 1218a3cbbb47SArd Biesheuvel call.rcx = pc_pa; 1219a3cbbb47SArd Biesheuvel 1220a3cbbb47SArd Biesheuvel ret = svsm_perform_call_protocol(&call); 1221a3cbbb47SArd Biesheuvel if (ret) 1222a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 1223a3cbbb47SArd Biesheuvel 1224a3cbbb47SArd Biesheuvel native_local_irq_restore(flags); 1225a3cbbb47SArd Biesheuvel } 1226a3cbbb47SArd Biesheuvel 1227a3cbbb47SArd Biesheuvel static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, 1228a3cbbb47SArd Biesheuvel bool validate) 1229a3cbbb47SArd Biesheuvel { 1230a3cbbb47SArd Biesheuvel int ret; 1231a3cbbb47SArd Biesheuvel 1232*681e2901SArd Biesheuvel if (snp_vmpl) { 1233a3cbbb47SArd Biesheuvel svsm_pval_4k_page(paddr, validate); 1234a3cbbb47SArd Biesheuvel } else { 1235a3cbbb47SArd Biesheuvel ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); 1236a3cbbb47SArd Biesheuvel if (ret) 1237a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 1238a3cbbb47SArd Biesheuvel } 1239a3cbbb47SArd Biesheuvel } 1240a3cbbb47SArd Biesheuvel 1241a3cbbb47SArd Biesheuvel static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, 1242a3cbbb47SArd Biesheuvel unsigned long exit_code) 1243a3cbbb47SArd Biesheuvel { 1244a3cbbb47SArd Biesheuvel unsigned int opcode = (unsigned int)ctxt->insn.opcode.value; 1245a3cbbb47SArd Biesheuvel u8 modrm = ctxt->insn.modrm.value; 1246a3cbbb47SArd Biesheuvel 1247a3cbbb47SArd Biesheuvel switch (exit_code) { 1248a3cbbb47SArd Biesheuvel 1249a3cbbb47SArd Biesheuvel case SVM_EXIT_IOIO: 1250a3cbbb47SArd Biesheuvel case SVM_EXIT_NPF: 1251a3cbbb47SArd Biesheuvel /* handled separately */ 1252a3cbbb47SArd Biesheuvel return ES_OK; 1253a3cbbb47SArd Biesheuvel 1254a3cbbb47SArd Biesheuvel case SVM_EXIT_CPUID: 1255a3cbbb47SArd Biesheuvel if (opcode == 0xa20f) 1256a3cbbb47SArd Biesheuvel return ES_OK; 1257a3cbbb47SArd Biesheuvel break; 1258a3cbbb47SArd Biesheuvel 1259a3cbbb47SArd Biesheuvel case SVM_EXIT_INVD: 1260a3cbbb47SArd Biesheuvel if (opcode == 0x080f) 1261a3cbbb47SArd Biesheuvel return ES_OK; 1262a3cbbb47SArd Biesheuvel break; 1263a3cbbb47SArd Biesheuvel 1264a3cbbb47SArd Biesheuvel case SVM_EXIT_MONITOR: 1265a3cbbb47SArd Biesheuvel /* MONITOR and MONITORX instructions generate the same error code */ 1266a3cbbb47SArd Biesheuvel if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa)) 1267a3cbbb47SArd Biesheuvel return ES_OK; 1268a3cbbb47SArd Biesheuvel break; 1269a3cbbb47SArd Biesheuvel 1270a3cbbb47SArd Biesheuvel case SVM_EXIT_MWAIT: 1271a3cbbb47SArd Biesheuvel /* MWAIT and MWAITX instructions generate the same error code */ 1272a3cbbb47SArd Biesheuvel if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb)) 1273a3cbbb47SArd Biesheuvel return ES_OK; 1274a3cbbb47SArd Biesheuvel break; 1275a3cbbb47SArd Biesheuvel 1276a3cbbb47SArd Biesheuvel case SVM_EXIT_MSR: 1277a3cbbb47SArd Biesheuvel /* RDMSR */ 1278a3cbbb47SArd Biesheuvel if (opcode == 0x320f || 1279a3cbbb47SArd Biesheuvel /* WRMSR */ 1280a3cbbb47SArd Biesheuvel opcode == 0x300f) 1281a3cbbb47SArd Biesheuvel return ES_OK; 1282a3cbbb47SArd Biesheuvel break; 1283a3cbbb47SArd Biesheuvel 1284a3cbbb47SArd Biesheuvel case SVM_EXIT_RDPMC: 1285a3cbbb47SArd Biesheuvel if (opcode == 0x330f) 1286a3cbbb47SArd Biesheuvel return ES_OK; 1287a3cbbb47SArd Biesheuvel break; 1288a3cbbb47SArd Biesheuvel 1289a3cbbb47SArd Biesheuvel case SVM_EXIT_RDTSC: 1290a3cbbb47SArd Biesheuvel if (opcode == 0x310f) 1291a3cbbb47SArd Biesheuvel return ES_OK; 1292a3cbbb47SArd Biesheuvel break; 1293a3cbbb47SArd Biesheuvel 1294a3cbbb47SArd Biesheuvel case SVM_EXIT_RDTSCP: 1295a3cbbb47SArd Biesheuvel if (opcode == 0x010f && modrm == 0xf9) 1296a3cbbb47SArd Biesheuvel return ES_OK; 1297a3cbbb47SArd Biesheuvel break; 1298a3cbbb47SArd Biesheuvel 1299a3cbbb47SArd Biesheuvel case SVM_EXIT_READ_DR7: 1300a3cbbb47SArd Biesheuvel if (opcode == 0x210f && 1301a3cbbb47SArd Biesheuvel X86_MODRM_REG(ctxt->insn.modrm.value) == 7) 1302a3cbbb47SArd Biesheuvel return ES_OK; 1303a3cbbb47SArd Biesheuvel break; 1304a3cbbb47SArd Biesheuvel 1305a3cbbb47SArd Biesheuvel case SVM_EXIT_VMMCALL: 1306a3cbbb47SArd Biesheuvel if (opcode == 0x010f && modrm == 0xd9) 1307a3cbbb47SArd Biesheuvel return ES_OK; 1308a3cbbb47SArd Biesheuvel 1309a3cbbb47SArd Biesheuvel break; 1310a3cbbb47SArd Biesheuvel 1311a3cbbb47SArd Biesheuvel case SVM_EXIT_WRITE_DR7: 1312a3cbbb47SArd Biesheuvel if (opcode == 0x230f && 1313a3cbbb47SArd Biesheuvel X86_MODRM_REG(ctxt->insn.modrm.value) == 7) 1314a3cbbb47SArd Biesheuvel return ES_OK; 1315a3cbbb47SArd Biesheuvel break; 1316a3cbbb47SArd Biesheuvel 1317a3cbbb47SArd Biesheuvel case SVM_EXIT_WBINVD: 1318a3cbbb47SArd Biesheuvel if (opcode == 0x90f) 1319a3cbbb47SArd Biesheuvel return ES_OK; 1320a3cbbb47SArd Biesheuvel break; 1321a3cbbb47SArd Biesheuvel 1322a3cbbb47SArd Biesheuvel default: 1323a3cbbb47SArd Biesheuvel break; 1324a3cbbb47SArd Biesheuvel } 1325a3cbbb47SArd Biesheuvel 1326a3cbbb47SArd Biesheuvel sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n", 1327a3cbbb47SArd Biesheuvel opcode, exit_code, ctxt->regs->ip); 1328a3cbbb47SArd Biesheuvel 1329a3cbbb47SArd Biesheuvel return ES_UNSUPPORTED; 1330a3cbbb47SArd Biesheuvel } 1331a3cbbb47SArd Biesheuvel 1332a3cbbb47SArd Biesheuvel /* 1333a3cbbb47SArd Biesheuvel * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM 1334a3cbbb47SArd Biesheuvel * services needed when not running in VMPL0. 1335a3cbbb47SArd Biesheuvel */ 1336a3cbbb47SArd Biesheuvel static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info) 1337a3cbbb47SArd Biesheuvel { 1338a3cbbb47SArd Biesheuvel struct snp_secrets_page *secrets_page; 1339a3cbbb47SArd Biesheuvel struct snp_cpuid_table *cpuid_table; 1340a3cbbb47SArd Biesheuvel unsigned int i; 1341a3cbbb47SArd Biesheuvel u64 caa; 1342a3cbbb47SArd Biesheuvel 1343a3cbbb47SArd Biesheuvel BUILD_BUG_ON(sizeof(*secrets_page) != PAGE_SIZE); 1344a3cbbb47SArd Biesheuvel 1345a3cbbb47SArd Biesheuvel /* 1346a3cbbb47SArd Biesheuvel * Check if running at VMPL0. 1347a3cbbb47SArd Biesheuvel * 1348a3cbbb47SArd Biesheuvel * Use RMPADJUST (see the rmpadjust() function for a description of what 1349a3cbbb47SArd Biesheuvel * the instruction does) to update the VMPL1 permissions of a page. If 1350a3cbbb47SArd Biesheuvel * the guest is running at VMPL0, this will succeed and implies there is 1351a3cbbb47SArd Biesheuvel * no SVSM. If the guest is running at any other VMPL, this will fail. 1352a3cbbb47SArd Biesheuvel * Linux SNP guests only ever run at a single VMPL level so permission mask 1353a3cbbb47SArd Biesheuvel * changes of a lesser-privileged VMPL are a don't-care. 1354a3cbbb47SArd Biesheuvel * 1355a3cbbb47SArd Biesheuvel * Use a rip-relative reference to obtain the proper address, since this 1356a3cbbb47SArd Biesheuvel * routine is running identity mapped when called, both by the decompressor 1357a3cbbb47SArd Biesheuvel * code and the early kernel code. 1358a3cbbb47SArd Biesheuvel */ 1359a3cbbb47SArd Biesheuvel if (!rmpadjust((unsigned long)rip_rel_ptr(&boot_ghcb_page), RMP_PG_SIZE_4K, 1)) 1360a3cbbb47SArd Biesheuvel return false; 1361a3cbbb47SArd Biesheuvel 1362a3cbbb47SArd Biesheuvel /* 1363a3cbbb47SArd Biesheuvel * Not running at VMPL0, ensure everything has been properly supplied 1364a3cbbb47SArd Biesheuvel * for running under an SVSM. 1365a3cbbb47SArd Biesheuvel */ 1366a3cbbb47SArd Biesheuvel if (!cc_info || !cc_info->secrets_phys || cc_info->secrets_len != PAGE_SIZE) 1367a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECRETS_PAGE); 1368a3cbbb47SArd Biesheuvel 1369a3cbbb47SArd Biesheuvel secrets_page = (struct snp_secrets_page *)cc_info->secrets_phys; 1370a3cbbb47SArd Biesheuvel if (!secrets_page->svsm_size) 1371a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NO_SVSM); 1372a3cbbb47SArd Biesheuvel 1373a3cbbb47SArd Biesheuvel if (!secrets_page->svsm_guest_vmpl) 1374a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0); 1375a3cbbb47SArd Biesheuvel 1376*681e2901SArd Biesheuvel snp_vmpl = secrets_page->svsm_guest_vmpl; 1377a3cbbb47SArd Biesheuvel 1378a3cbbb47SArd Biesheuvel caa = secrets_page->svsm_caa; 1379a3cbbb47SArd Biesheuvel 1380a3cbbb47SArd Biesheuvel /* 1381a3cbbb47SArd Biesheuvel * An open-coded PAGE_ALIGNED() in order to avoid including 1382a3cbbb47SArd Biesheuvel * kernel-proper headers into the decompressor. 1383a3cbbb47SArd Biesheuvel */ 1384a3cbbb47SArd Biesheuvel if (caa & (PAGE_SIZE - 1)) 1385a3cbbb47SArd Biesheuvel sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CAA); 1386a3cbbb47SArd Biesheuvel 1387a3cbbb47SArd Biesheuvel /* 1388a3cbbb47SArd Biesheuvel * The CA is identity mapped when this routine is called, both by the 1389a3cbbb47SArd Biesheuvel * decompressor code and the early kernel code. 1390a3cbbb47SArd Biesheuvel */ 1391*681e2901SArd Biesheuvel boot_svsm_caa = (struct svsm_ca *)caa; 1392*681e2901SArd Biesheuvel boot_svsm_caa_pa = caa; 1393a3cbbb47SArd Biesheuvel 1394a3cbbb47SArd Biesheuvel /* Advertise the SVSM presence via CPUID. */ 1395a3cbbb47SArd Biesheuvel cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table(); 1396a3cbbb47SArd Biesheuvel for (i = 0; i < cpuid_table->count; i++) { 1397a3cbbb47SArd Biesheuvel struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; 1398a3cbbb47SArd Biesheuvel 1399a3cbbb47SArd Biesheuvel if (fn->eax_in == 0x8000001f) 1400a3cbbb47SArd Biesheuvel fn->eax |= BIT(28); 1401a3cbbb47SArd Biesheuvel } 1402a3cbbb47SArd Biesheuvel 1403a3cbbb47SArd Biesheuvel return true; 1404a3cbbb47SArd Biesheuvel } 1405