Lines Matching +full:post +full:- +full:processing
1 // SPDX-License-Identifier: GPL-2.0
7 * This file is not compiled stand-alone. It contains code shared
8 * between the pre-decompression boot code and the running Linux kernel
9 * and is included directly into both code-bases.
25 * These will be initialized based on CPUID table so that non-present
26 * all-zero leaves (for sparse tables) can be differentiated from
27 * invalid/out-of-range leaves. This is needed since all-zero leaves
28 * still need to be post-processed.
74 switch (call->rax_out) { in svsm_process_result_codes()
79 return -EAGAIN; in svsm_process_result_codes()
81 return -EINVAL; in svsm_process_result_codes()
87 * - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
88 * - Set the CA call pending field to 1
89 * - Issue VMGEXIT
90 * - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
91 * - Perform atomic exchange of the CA call pending field
93 * - See the "Secure VM Service Module for SEV-SNP Guests" specification for
95 * - The calling convention loosely follows the Microsoft X64 calling
97 * - RAX specifies the SVSM protocol/callid as input and the return code
102 register unsigned long rax asm("rax") = call->rax; in svsm_issue_call()
103 register unsigned long rcx asm("rcx") = call->rcx; in svsm_issue_call()
104 register unsigned long rdx asm("rdx") = call->rdx; in svsm_issue_call()
105 register unsigned long r8 asm("r8") = call->r8; in svsm_issue_call()
106 register unsigned long r9 asm("r9") = call->r9; in svsm_issue_call()
108 call->caa->call_pending = 1; in svsm_issue_call()
114 *pending = xchg(&call->caa->call_pending, *pending); in svsm_issue_call()
116 call->rax_out = rax; in svsm_issue_call()
117 call->rcx_out = rcx; in svsm_issue_call()
118 call->rdx_out = rdx; in svsm_issue_call()
119 call->r8_out = r8; in svsm_issue_call()
120 call->r9_out = r9; in svsm_issue_call()
143 return -EINVAL; in svsm_perform_msr_protocol()
146 return -EINVAL; in svsm_perform_msr_protocol()
149 return -EINVAL; in svsm_perform_msr_protocol()
162 return -EIO; in __sev_cpuid_hv()
174 * MSR protocol does not support fetching non-zero subfunctions, but is in __sev_cpuid_hv_msr()
175 * sufficient to handle current early-boot cases. Should that change, in __sev_cpuid_hv_msr()
178 * can be added here to use GHCB-page protocol for cases that occur late in __sev_cpuid_hv_msr()
181 if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) in __sev_cpuid_hv_msr()
182 return -EINVAL; in __sev_cpuid_hv_msr()
184 ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); in __sev_cpuid_hv_msr()
185 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); in __sev_cpuid_hv_msr()
186 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); in __sev_cpuid_hv_msr()
187 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx); in __sev_cpuid_hv_msr()
196 * mapping. Use RIP-relative addressing to obtain the correct address
198 * switch-over to kernel virtual addresses later.
232 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_calc_xsave_size()
233 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_calc_xsave_size()
235 if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64)) in snp_cpuid_calc_xsave_size()
237 if (!(xfeatures_en & (BIT_ULL(e->ecx_in)))) in snp_cpuid_calc_xsave_size()
239 if (xfeatures_found & (BIT_ULL(e->ecx_in))) in snp_cpuid_calc_xsave_size()
242 xfeatures_found |= (BIT_ULL(e->ecx_in)); in snp_cpuid_calc_xsave_size()
245 xsave_size += e->eax; in snp_cpuid_calc_xsave_size()
247 xsave_size = max(xsave_size, e->eax + e->ebx); in snp_cpuid_calc_xsave_size()
267 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_get_validated_func()
268 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_get_validated_func()
270 if (e->eax_in != leaf->fn) in snp_cpuid_get_validated_func()
273 if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) in snp_cpuid_get_validated_func()
282 if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1)) in snp_cpuid_get_validated_func()
283 if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in) in snp_cpuid_get_validated_func()
286 leaf->eax = e->eax; in snp_cpuid_get_validated_func()
287 leaf->ebx = e->ebx; in snp_cpuid_get_validated_func()
288 leaf->ecx = e->ecx; in snp_cpuid_get_validated_func()
289 leaf->edx = e->edx; in snp_cpuid_get_validated_func()
309 switch (leaf->fn) { in snp_cpuid_postprocess()
314 leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); in snp_cpuid_postprocess()
316 leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); in snp_cpuid_postprocess()
320 leaf->ecx |= BIT(27); in snp_cpuid_postprocess()
324 leaf->ecx &= ~BIT(4); in snp_cpuid_postprocess()
326 leaf->ecx |= BIT(4); in snp_cpuid_postprocess()
333 leaf->edx = leaf_hv.edx; in snp_cpuid_postprocess()
340 if (leaf->subfn != 0 && leaf->subfn != 1) in snp_cpuid_postprocess()
345 if (leaf->subfn == 1) { in snp_cpuid_postprocess()
347 if (leaf->eax & BIT(3)) { in snp_cpuid_postprocess()
359 * bit 3) since SNP-capable hardware has these feature in snp_cpuid_postprocess()
364 if (!(leaf->eax & (BIT(1) | BIT(3)))) in snp_cpuid_postprocess()
365 return -EINVAL; in snp_cpuid_postprocess()
372 return -EINVAL; in snp_cpuid_postprocess()
374 leaf->ebx = xsave_size; in snp_cpuid_postprocess()
381 leaf->eax = leaf_hv.eax; in snp_cpuid_postprocess()
383 leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0)); in snp_cpuid_postprocess()
385 leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0)); in snp_cpuid_postprocess()
388 /* No fix-ups needed, use values as-is. */ in snp_cpuid_postprocess()
396 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
404 if (!cpuid_table->count) in snp_cpuid()
405 return -EOPNOTSUPP; in snp_cpuid()
411 * same as out-of-range values (all-zero). This is useful here in snp_cpuid()
416 * out-of-range entries and in-range zero entries, since the in snp_cpuid()
419 * CPU-specific information during post-processing. So if it's in snp_cpuid()
421 * within a valid CPUID range, proceed with post-processing in snp_cpuid()
423 * post-processing and just return zeros immediately. in snp_cpuid()
425 leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; in snp_cpuid()
427 /* Skip post-processing for out-of-range zero leafs. */ in snp_cpuid()
428 if (!(leaf->fn <= cpuid_std_range_max || in snp_cpuid()
429 (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || in snp_cpuid()
430 (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) in snp_cpuid()
438 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
440 * hypervisor and only the CPUID exit-code.
444 unsigned int subfn = lower_bits(regs->cx, 32); in do_vc_no_ghcb()
445 unsigned int fn = lower_bits(regs->ax, 32); in do_vc_no_ghcb()
446 u16 opcode = *(unsigned short *)regs->ip; in do_vc_no_ghcb()
463 * CPUID values (with possible HV interaction during post-processing of in do_vc_no_ghcb()
465 * snp_cpuid() returns -EOPNOTSUPP so that an SEV-ES guest can call the in do_vc_no_ghcb()
472 if (ret != -EOPNOTSUPP) in do_vc_no_ghcb()
476 * This is reached by a SEV-ES guest and needs to invoke the HV for in do_vc_no_ghcb()
483 regs->ax = leaf.eax; in do_vc_no_ghcb()
484 regs->bx = leaf.ebx; in do_vc_no_ghcb()
485 regs->cx = leaf.ecx; in do_vc_no_ghcb()
486 regs->dx = leaf.edx; in do_vc_no_ghcb()
489 * This is a VC handler and the #VC is only raised when SEV-ES is in do_vc_no_ghcb()
492 * into the no-sev path. This could map sensitive data unencrypted and in do_vc_no_ghcb()
496 * - Availability of CPUID leaf 0x8000001f in do_vc_no_ghcb()
497 * - SEV CPUID bit. in do_vc_no_ghcb()
499 * The hypervisor might still report the wrong C-bit position, but this in do_vc_no_ghcb()
503 if (fn == 0x80000000 && (regs->ax < 0x8000001f)) in do_vc_no_ghcb()
506 else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) in do_vc_no_ghcb()
510 /* Skip over the CPUID two-byte opcode */ in do_vc_no_ghcb()
511 regs->ip += 2; in do_vc_no_ghcb()
535 hdr = (struct setup_data *)bp->hdr.setup_data; in find_cc_blob_setup_data()
538 if (hdr->type == SETUP_CC_BLOB) { in find_cc_blob_setup_data()
540 return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address; in find_cc_blob_setup_data()
542 hdr = (struct setup_data *)hdr->next; in find_cc_blob_setup_data()
562 if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE) in setup_cpuid_table()
565 cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys; in setup_cpuid_table()
566 if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX) in setup_cpuid_table()
572 /* Initialize CPUID ranges for range-checking. */ in setup_cpuid_table()
573 for (i = 0; i < cpuid_table->count; i++) { in setup_cpuid_table()
574 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in setup_cpuid_table()
576 if (fn->eax_in == 0x0) in setup_cpuid_table()
577 cpuid_std_range_max = fn->eax; in setup_cpuid_table()
578 else if (fn->eax_in == 0x40000000) in setup_cpuid_table()
579 cpuid_hyp_range_max = fn->eax; in setup_cpuid_table()
580 else if (fn->eax_in == 0x80000000) in setup_cpuid_table()
581 cpuid_ext_range_max = fn->eax; in setup_cpuid_table()
591 } while (ret == -EAGAIN); in svsm_call_msr_protocol()
612 pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; in svsm_pval_4k_page()
615 pc->num_entries = 1; in svsm_pval_4k_page()
616 pc->cur_index = 0; in svsm_pval_4k_page()
617 pc->entry[0].page_size = RMP_PG_SIZE_4K; in svsm_pval_4k_page()
618 pc->entry[0].action = validate; in svsm_pval_4k_page()
619 pc->entry[0].ignore_cf = 0; in svsm_pval_4k_page()
620 pc->entry[0].rsvd = 0; in svsm_pval_4k_page()
621 pc->entry[0].pfn = paddr >> PAGE_SHIFT; in svsm_pval_4k_page()
653 * cache-coherency vulnerability, perform the cache eviction mitigation. in pvalidate_4k_page()
665 * If private -> shared then invalidate the page before requesting the in __page_state_change()
668 if (desc->op == SNP_PAGE_STATE_SHARED) in __page_state_change()
669 pvalidate_4k_page(vaddr, paddr, false, desc->ca, desc->caa_pa); in __page_state_change()
675 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, desc->op)); in __page_state_change()
690 if (desc->op == SNP_PAGE_STATE_PRIVATE) in __page_state_change()
691 pvalidate_4k_page(vaddr, paddr, true, desc->ca, desc->caa_pa); in __page_state_change()
716 * changes of a lesser-privileged VMPL are a don't-care. in svsm_setup_ca()
718 * Use a rip-relative reference to obtain the proper address, since this in svsm_setup_ca()
729 if (!cc_info || !cc_info->secrets_phys || cc_info->secrets_len != PAGE_SIZE) in svsm_setup_ca()
732 secrets_page = (struct snp_secrets_page *)cc_info->secrets_phys; in svsm_setup_ca()
733 if (!secrets_page->svsm_size) in svsm_setup_ca()
736 if (!secrets_page->svsm_guest_vmpl) in svsm_setup_ca()
739 snp_vmpl = secrets_page->svsm_guest_vmpl; in svsm_setup_ca()
741 caa = secrets_page->svsm_caa; in svsm_setup_ca()
744 * An open-coded PAGE_ALIGNED() in order to avoid including in svsm_setup_ca()
745 * kernel-proper headers into the decompressor. in svsm_setup_ca()
747 if (caa & (PAGE_SIZE - 1)) in svsm_setup_ca()
754 for (i = 0; i < cpuid_table->count; i++) { in svsm_setup_ca()
755 struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in svsm_setup_ca()
757 if (fn->eax_in == 0x8000001f) in svsm_setup_ca()
758 fn->eax |= BIT(28); in svsm_setup_ca()