Lines Matching +full:disable +full:- +full:report +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
16 #include <asm/insn-eval.h>
80 /* Read TD-scoped metadata */
94 /* Write TD-scoped metadata */
95 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask) in tdg_vm_wr() argument
100 .r9 = mask, in tdg_vm_wr()
107 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
108 * subtype 0) using TDG.MR.REPORT TDCALL.
109 * @reportdata: Address of the input buffer which contains user-defined
113 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module v1.0
114 * specification for more information on TDG.MR.REPORT TDCALL.
118 * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation,
119 * or -EIO on other TDCALL failures.
133 return -ENXIO; in tdx_mcall_get_report0()
135 return -EBUSY; in tdx_mcall_get_report0()
136 return -EIO; in tdx_mcall_get_report0()
144 * tdx_mcall_extend_rtmr() - Wrapper to extend RTMR registers using
155 * Return 0 on success, -ENXIO for invalid operands, -EBUSY for busy operation,
156 * or -EIO on other TDCALL failures.
169 return -ENXIO; in tdx_mcall_extend_rtmr()
171 return -EBUSY; in tdx_mcall_extend_rtmr()
172 return -EIO; in tdx_mcall_extend_rtmr()
180 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
185 * @size: size of the tdquote buffer (4KB-aligned).
237 * that no #VE will be delivered for accesses to TD-private memory.
239 * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
246 * Check if the feature is available and disable SEPT #VE if possible.
248 * If the TD is allowed to disable/enable SEPT #VEs, the TDX_ATTR_SEPT_VE_DISABLE
260 /* Is this TD allowed to disable SEPT #VE */ in disable_sept_ve()
284 /* Disable SEPT #VEs */ in disable_sept_ve()
290 * TDX 1.0 generates a #VE when accessing topology-related CPUID leafs (0xB and
296 * Enabling the feature eliminates topology-related #VEs: the TDX module
339 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL in tdx_setup()
352 *cc_mask = BIT_ULL(gpa_width - 1); in tdx_setup()
357 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL); in tdx_setup()
368 * - Emulation of the architectural #VE injection on EPT violation;
370 * - As a result of guest TD execution of a disallowed instruction,
373 * - A notification to the guest TD about anomalous behavior;
375 * The last one is opt-in and is not used by the kernel.
387 switch (ve->exit_reason) { in ve_instr_len()
393 /* It is safe to use ve->instr_len for #VE due instructions */ in ve_instr_len()
394 return ve->instr_len; in ve_instr_len()
397 * For EPT violations, ve->insn_len is not defined. For those, in ve_instr_len()
401 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations"); in ve_instr_len()
404 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason); in ve_instr_len()
405 return ve->instr_len; in ve_instr_len()
419 * can be found in TDX Guest-Host-Communication Interface in __halt()
442 return -EIO; in handle_halt()
445 return -EIO; in handle_halt()
455 * Use WARN_ONCE() to report the failure. in tdx_halt()
476 .r12 = regs->cx, in read_msr()
481 * can be found in TDX Guest-Host-Communication Interface in read_msr()
485 return -EIO; in read_msr()
487 regs->ax = lower_32_bits(args.r11); in read_msr()
488 regs->dx = upper_32_bits(args.r11); in read_msr()
497 .r12 = regs->cx, in write_msr()
498 .r13 = (u64)regs->dx << 32 | regs->ax, in write_msr()
503 * can be found in TDX Guest-Host-Communication Interface in write_msr()
507 return -EIO; in write_msr()
517 .r12 = regs->ax, in handle_cpuid()
518 .r13 = regs->cx, in handle_cpuid()
525 * Return all-zeros for any CPUID outside the range. It matches CPU in handle_cpuid()
526 * behaviour for non-supported leaf. in handle_cpuid()
528 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) { in handle_cpuid()
529 regs->ax = regs->bx = regs->cx = regs->dx = 0; in handle_cpuid()
535 * ABI can be found in TDX Guest-Host-Communication Interface in handle_cpuid()
539 return -EIO; in handle_cpuid()
542 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of in handle_cpuid()
546 regs->ax = args.r12; in handle_cpuid()
547 regs->bx = args.r13; in handle_cpuid()
548 regs->cx = args.r14; in handle_cpuid()
549 regs->dx = args.r15; in handle_cpuid()
586 /* Only in-kernel MMIO is supported */ in handle_mmio()
588 return -EFAULT; in handle_mmio()
590 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE)) in handle_mmio()
591 return -EFAULT; in handle_mmio()
594 return -EINVAL; in handle_mmio()
598 return -EINVAL; in handle_mmio()
603 return -EINVAL; in handle_mmio()
606 if (!fault_in_kernel_space(ve->gla)) { in handle_mmio()
608 return -EINVAL; in handle_mmio()
621 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE) in handle_mmio()
622 return -EFAULT; in handle_mmio()
628 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
629 return -EIO; in handle_mmio()
633 if (!mmio_write(size, ve->gpa, val)) in handle_mmio()
634 return -EIO; in handle_mmio()
648 return -EINVAL; in handle_mmio()
651 return -EINVAL; in handle_mmio()
655 if (!mmio_read(size, ve->gpa, &val)) in handle_mmio()
656 return -EIO; in handle_mmio()
660 /* Zero-extend for 32-bit operation */ in handle_mmio()
678 return -EINVAL; in handle_mmio()
696 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); in handle_in() local
701 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_in()
707 regs->ax &= ~mask; in handle_in()
709 regs->ax |= args.r11 & mask; in handle_in()
716 u64 mask = GENMASK(BITS_PER_BYTE * size, 0); in handle_out() local
720 * in TDX Guest-Host-Communication Interface (GHCI) section titled in handle_out()
724 PORT_WRITE, port, regs->ax & mask); in handle_out()
737 u32 exit_qual = ve->exit_qual; in handle_io()
742 return -EIO; in handle_io()
754 return -EIO; in handle_io()
777 regs->ip += insn_len; in tdx_early_handle_ve()
803 ve->exit_reason = args.rcx; in tdx_get_ve_info()
804 ve->exit_qual = args.rdx; in tdx_get_ve_info()
805 ve->gla = args.r8; in tdx_get_ve_info()
806 ve->gpa = args.r9; in tdx_get_ve_info()
807 ve->instr_len = lower_32_bits(args.r10); in tdx_get_ve_info()
808 ve->instr_info = upper_32_bits(args.r10); in tdx_get_ve_info()
815 * or -errno on error.
819 switch (ve->exit_reason) { in virt_exception_user()
823 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_user()
824 return -EIO; in virt_exception_user()
837 * or -errno on error.
841 switch (ve->exit_reason) { in virt_exception_kernel()
851 if (is_private_gpa(ve->gpa)) in virt_exception_kernel()
852 panic("Unexpected EPT-violation on private memory."); in virt_exception_kernel()
857 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason); in virt_exception_kernel()
858 return -EIO; in virt_exception_kernel()
874 regs->ip += insn_len; in tdx_handle_virt_exception()
882 * TDX guest is responsible for flushing TLB on private->shared in tdx_tlb_flush_required()
883 * transition. VMM is responsible for flushing on shared->private. in tdx_tlb_flush_required()
910 * can be found in TDX Guest-Host-Communication Interface (GHCI),
930 .r13 = end - start }; in tdx_map_gpa()
972 /* shared->private conversion requires memory to be accepted before use */ in tdx_enc_status_changed()
983 * Only handle shared->private conversion here. in tdx_enc_status_change_prepare()
987 return -EIO; in tdx_enc_status_change_prepare()
996 * Only handle private->shared conversion here. in tdx_enc_status_change_finish()
1000 return -EIO; in tdx_enc_status_change_finish()
1010 /* Stop new private<->shared conversions */
1020 * If race happened, just report and proceed. in tdx_kexec_begin()
1023 pr_warn("Failed to stop shared<->private conversions\n"); in tdx_kexec_begin()
1070 * a pre-reserved memory range that is always private. in tdx_kexec_finish()
1080 pr_err("Failed to unshare range %#lx-%#lx\n", in tdx_kexec_finish()
1139 * Adjust physical mask to only cover valid GPA bits. in tdx_early_init()
1141 physical_mask &= cc_mask - 1; in tdx_early_init()
1149 * - Private mapping => Shared Page == Guest shutdown in tdx_early_init()
1150 * - Shared mapping => Private Page == Recoverable #VE in tdx_early_init()
1174 * in STI-shadow, possibly resulting in missed wakeup events. in tdx_early_init()
1189 * Intel-TDX has a secure RDMSR hypercall, but that needs to be in tdx_early_init()
1191 * Until that is in place, disable parallel bringup for TDX. in tdx_early_init()