xref: /linux/tools/testing/selftests/kvm/x86/nested_exceptions_test.c (revision a382b06d297e78ed7ac67afd0d8e8690406ac4ca)
167730e6cSSean Christopherson // SPDX-License-Identifier: GPL-2.0-only
267730e6cSSean Christopherson #include "test_util.h"
367730e6cSSean Christopherson #include "kvm_util.h"
467730e6cSSean Christopherson #include "processor.h"
567730e6cSSean Christopherson #include "vmx.h"
667730e6cSSean Christopherson #include "svm_util.h"
767730e6cSSean Christopherson 
867730e6cSSean Christopherson #define L2_GUEST_STACK_SIZE 256
967730e6cSSean Christopherson 
1067730e6cSSean Christopherson /*
1167730e6cSSean Christopherson  * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
1267730e6cSSean Christopherson  * the "real" exceptions used, #SS/#GP/#DF (12/13/8).
1367730e6cSSean Christopherson  */
1467730e6cSSean Christopherson #define FAKE_TRIPLE_FAULT_VECTOR	0xaa
1567730e6cSSean Christopherson 
1667730e6cSSean Christopherson /* Arbitrary 32-bit error code injected by this test. */
1767730e6cSSean Christopherson #define SS_ERROR_CODE 0xdeadbeef
1867730e6cSSean Christopherson 
1967730e6cSSean Christopherson /*
2067730e6cSSean Christopherson  * Bit '0' is set on Intel if the exception occurs while delivering a previous
2167730e6cSSean Christopherson  * event/exception.  AMD's wording is ambiguous, but presumably the bit is set
2267730e6cSSean Christopherson  * if the exception occurs while delivering an external event, e.g. NMI or INTR,
2367730e6cSSean Christopherson  * but not for exceptions that occur when delivering other exceptions or
2467730e6cSSean Christopherson  * software interrupts.
2567730e6cSSean Christopherson  *
2667730e6cSSean Christopherson  * Note, Intel's name for it, "External event", is misleading and much more
2767730e6cSSean Christopherson  * aligned with AMD's behavior, but the SDM is quite clear on its behavior.
2867730e6cSSean Christopherson  */
2967730e6cSSean Christopherson #define ERROR_CODE_EXT_FLAG	BIT(0)
3067730e6cSSean Christopherson 
3167730e6cSSean Christopherson /*
3267730e6cSSean Christopherson  * Bit '1' is set if the fault occurred when looking up a descriptor in the
3367730e6cSSean Christopherson  * IDT, which is the case here as the IDT is empty/NULL.
3467730e6cSSean Christopherson  */
3567730e6cSSean Christopherson #define ERROR_CODE_IDT_FLAG	BIT(1)
3667730e6cSSean Christopherson 
3767730e6cSSean Christopherson /*
3867730e6cSSean Christopherson  * The #GP that occurs when vectoring #SS should show the index into the IDT
3967730e6cSSean Christopherson  * for #SS, plus have the "IDT flag" set.
4067730e6cSSean Christopherson  */
4167730e6cSSean Christopherson #define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
4267730e6cSSean Christopherson #define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
4367730e6cSSean Christopherson 
4467730e6cSSean Christopherson /*
4567730e6cSSean Christopherson  * Intel and AMD both shove '0' into the error code on #DF, regardless of what
4667730e6cSSean Christopherson  * led to the double fault.
4767730e6cSSean Christopherson  */
4867730e6cSSean Christopherson #define DF_ERROR_CODE 0
4967730e6cSSean Christopherson 
5067730e6cSSean Christopherson #define INTERCEPT_SS		(BIT_ULL(SS_VECTOR))
5167730e6cSSean Christopherson #define INTERCEPT_SS_DF		(INTERCEPT_SS | BIT_ULL(DF_VECTOR))
5267730e6cSSean Christopherson #define INTERCEPT_SS_GP_DF	(INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
5367730e6cSSean Christopherson 
l2_ss_pending_test(void)5467730e6cSSean Christopherson static void l2_ss_pending_test(void)
5567730e6cSSean Christopherson {
5667730e6cSSean Christopherson 	GUEST_SYNC(SS_VECTOR);
5767730e6cSSean Christopherson }
5867730e6cSSean Christopherson 
l2_ss_injected_gp_test(void)5967730e6cSSean Christopherson static void l2_ss_injected_gp_test(void)
6067730e6cSSean Christopherson {
6167730e6cSSean Christopherson 	GUEST_SYNC(GP_VECTOR);
6267730e6cSSean Christopherson }
6367730e6cSSean Christopherson 
l2_ss_injected_df_test(void)6467730e6cSSean Christopherson static void l2_ss_injected_df_test(void)
6567730e6cSSean Christopherson {
6667730e6cSSean Christopherson 	GUEST_SYNC(DF_VECTOR);
6767730e6cSSean Christopherson }
6867730e6cSSean Christopherson 
l2_ss_injected_tf_test(void)6967730e6cSSean Christopherson static void l2_ss_injected_tf_test(void)
7067730e6cSSean Christopherson {
7167730e6cSSean Christopherson 	GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
7267730e6cSSean Christopherson }
7367730e6cSSean Christopherson 
svm_run_l2(struct svm_test_data * svm,void * l2_code,int vector,uint32_t error_code)7467730e6cSSean Christopherson static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
7567730e6cSSean Christopherson 		       uint32_t error_code)
7667730e6cSSean Christopherson {
7767730e6cSSean Christopherson 	struct vmcb *vmcb = svm->vmcb;
7867730e6cSSean Christopherson 	struct vmcb_control_area *ctrl = &vmcb->control;
7967730e6cSSean Christopherson 
8067730e6cSSean Christopherson 	vmcb->save.rip = (u64)l2_code;
8167730e6cSSean Christopherson 	run_guest(vmcb, svm->vmcb_gpa);
8267730e6cSSean Christopherson 
8367730e6cSSean Christopherson 	if (vector == FAKE_TRIPLE_FAULT_VECTOR)
8467730e6cSSean Christopherson 		return;
8567730e6cSSean Christopherson 
8667730e6cSSean Christopherson 	GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
8767730e6cSSean Christopherson 	GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
88*f3513a33SSean Christopherson 	GUEST_ASSERT(!ctrl->int_state);
8967730e6cSSean Christopherson }
9067730e6cSSean Christopherson 
l1_svm_code(struct svm_test_data * svm)9167730e6cSSean Christopherson static void l1_svm_code(struct svm_test_data *svm)
9267730e6cSSean Christopherson {
9367730e6cSSean Christopherson 	struct vmcb_control_area *ctrl = &svm->vmcb->control;
9467730e6cSSean Christopherson 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
9567730e6cSSean Christopherson 
9667730e6cSSean Christopherson 	generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
9767730e6cSSean Christopherson 	svm->vmcb->save.idtr.limit = 0;
9867730e6cSSean Christopherson 	ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
9967730e6cSSean Christopherson 
10067730e6cSSean Christopherson 	ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
10167730e6cSSean Christopherson 	svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
10267730e6cSSean Christopherson 	svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
10367730e6cSSean Christopherson 
10467730e6cSSean Christopherson 	ctrl->intercept_exceptions = INTERCEPT_SS_DF;
10567730e6cSSean Christopherson 	svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
10667730e6cSSean Christopherson 
10767730e6cSSean Christopherson 	ctrl->intercept_exceptions = INTERCEPT_SS;
10867730e6cSSean Christopherson 	svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
10967730e6cSSean Christopherson 	GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
11067730e6cSSean Christopherson 
11167730e6cSSean Christopherson 	GUEST_DONE();
11267730e6cSSean Christopherson }
11367730e6cSSean Christopherson 
vmx_run_l2(void * l2_code,int vector,uint32_t error_code)11467730e6cSSean Christopherson static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
11567730e6cSSean Christopherson {
11667730e6cSSean Christopherson 	GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
11767730e6cSSean Christopherson 
11867730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
11967730e6cSSean Christopherson 
12067730e6cSSean Christopherson 	if (vector == FAKE_TRIPLE_FAULT_VECTOR)
12167730e6cSSean Christopherson 		return;
12267730e6cSSean Christopherson 
12367730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
12467730e6cSSean Christopherson 	GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
12567730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
126*f3513a33SSean Christopherson 	GUEST_ASSERT(!vmreadz(GUEST_INTERRUPTIBILITY_INFO));
12767730e6cSSean Christopherson }
12867730e6cSSean Christopherson 
l1_vmx_code(struct vmx_pages * vmx)12967730e6cSSean Christopherson static void l1_vmx_code(struct vmx_pages *vmx)
13067730e6cSSean Christopherson {
13167730e6cSSean Christopherson 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
13267730e6cSSean Christopherson 
13367730e6cSSean Christopherson 	GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
13467730e6cSSean Christopherson 
13567730e6cSSean Christopherson 	GUEST_ASSERT_EQ(load_vmcs(vmx), true);
13667730e6cSSean Christopherson 
13767730e6cSSean Christopherson 	prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
13867730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
13967730e6cSSean Christopherson 
14067730e6cSSean Christopherson 	/*
14167730e6cSSean Christopherson 	 * VMX disallows injecting an exception with error_code[31:16] != 0,
14267730e6cSSean Christopherson 	 * and hardware will never generate a VM-Exit with bits 31:16 set.
14367730e6cSSean Christopherson 	 * KVM should likewise truncate the "bad" userspace value.
14467730e6cSSean Christopherson 	 */
14567730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
14667730e6cSSean Christopherson 	vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
14767730e6cSSean Christopherson 	vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
14867730e6cSSean Christopherson 
14967730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
15067730e6cSSean Christopherson 	vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
15167730e6cSSean Christopherson 
15267730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
15367730e6cSSean Christopherson 	vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
15467730e6cSSean Christopherson 	GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
15567730e6cSSean Christopherson 
15667730e6cSSean Christopherson 	GUEST_DONE();
15767730e6cSSean Christopherson }
15867730e6cSSean Christopherson 
l1_guest_code(void * test_data)15967730e6cSSean Christopherson static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
16067730e6cSSean Christopherson {
16167730e6cSSean Christopherson 	if (this_cpu_has(X86_FEATURE_SVM))
16267730e6cSSean Christopherson 		l1_svm_code(test_data);
16367730e6cSSean Christopherson 	else
16467730e6cSSean Christopherson 		l1_vmx_code(test_data);
16567730e6cSSean Christopherson }
16667730e6cSSean Christopherson 
assert_ucall_vector(struct kvm_vcpu * vcpu,int vector)16767730e6cSSean Christopherson static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
16867730e6cSSean Christopherson {
16967730e6cSSean Christopherson 	struct ucall uc;
17067730e6cSSean Christopherson 
17167730e6cSSean Christopherson 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
17267730e6cSSean Christopherson 
17367730e6cSSean Christopherson 	switch (get_ucall(vcpu, &uc)) {
17467730e6cSSean Christopherson 	case UCALL_SYNC:
17567730e6cSSean Christopherson 		TEST_ASSERT(vector == uc.args[1],
17667730e6cSSean Christopherson 			    "Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
17767730e6cSSean Christopherson 		break;
17867730e6cSSean Christopherson 	case UCALL_DONE:
17967730e6cSSean Christopherson 		TEST_ASSERT(vector == -1,
18067730e6cSSean Christopherson 			    "Expected L2 to ask for %d, L2 says it's done", vector);
18167730e6cSSean Christopherson 		break;
18267730e6cSSean Christopherson 	case UCALL_ABORT:
18367730e6cSSean Christopherson 		REPORT_GUEST_ASSERT(uc);
18467730e6cSSean Christopherson 		break;
18567730e6cSSean Christopherson 	default:
18667730e6cSSean Christopherson 		TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
18767730e6cSSean Christopherson 	}
18867730e6cSSean Christopherson }
18967730e6cSSean Christopherson 
queue_ss_exception(struct kvm_vcpu * vcpu,bool inject)19067730e6cSSean Christopherson static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
19167730e6cSSean Christopherson {
19267730e6cSSean Christopherson 	struct kvm_vcpu_events events;
19367730e6cSSean Christopherson 
19467730e6cSSean Christopherson 	vcpu_events_get(vcpu, &events);
19567730e6cSSean Christopherson 
19667730e6cSSean Christopherson 	TEST_ASSERT(!events.exception.pending,
19767730e6cSSean Christopherson 		    "Vector %d unexpectedlt pending", events.exception.nr);
19867730e6cSSean Christopherson 	TEST_ASSERT(!events.exception.injected,
19967730e6cSSean Christopherson 		    "Vector %d unexpectedly injected", events.exception.nr);
20067730e6cSSean Christopherson 
20167730e6cSSean Christopherson 	events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
20267730e6cSSean Christopherson 	events.exception.pending = !inject;
20367730e6cSSean Christopherson 	events.exception.injected = inject;
20467730e6cSSean Christopherson 	events.exception.nr = SS_VECTOR;
20567730e6cSSean Christopherson 	events.exception.has_error_code = true;
20667730e6cSSean Christopherson 	events.exception.error_code = SS_ERROR_CODE;
20767730e6cSSean Christopherson 	vcpu_events_set(vcpu, &events);
20867730e6cSSean Christopherson }
20967730e6cSSean Christopherson 
21067730e6cSSean Christopherson /*
21167730e6cSSean Christopherson  * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
21267730e6cSSean Christopherson  * when an exception is being queued for L2.  Specifically, verify that KVM
21367730e6cSSean Christopherson  * honors L1 exception intercept controls when a #SS is pending/injected,
21467730e6cSSean Christopherson  * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
21567730e6cSSean Christopherson  * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
21667730e6cSSean Christopherson  */
main(int argc,char * argv[])21767730e6cSSean Christopherson int main(int argc, char *argv[])
21867730e6cSSean Christopherson {
21967730e6cSSean Christopherson 	vm_vaddr_t nested_test_data_gva;
22067730e6cSSean Christopherson 	struct kvm_vcpu_events events;
22167730e6cSSean Christopherson 	struct kvm_vcpu *vcpu;
22267730e6cSSean Christopherson 	struct kvm_vm *vm;
22367730e6cSSean Christopherson 
22467730e6cSSean Christopherson 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
22567730e6cSSean Christopherson 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
22667730e6cSSean Christopherson 
22767730e6cSSean Christopherson 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
22867730e6cSSean Christopherson 	vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
22967730e6cSSean Christopherson 
23067730e6cSSean Christopherson 	if (kvm_cpu_has(X86_FEATURE_SVM))
23167730e6cSSean Christopherson 		vcpu_alloc_svm(vm, &nested_test_data_gva);
23267730e6cSSean Christopherson 	else
23367730e6cSSean Christopherson 		vcpu_alloc_vmx(vm, &nested_test_data_gva);
23467730e6cSSean Christopherson 
23567730e6cSSean Christopherson 	vcpu_args_set(vcpu, 1, nested_test_data_gva);
23667730e6cSSean Christopherson 
23767730e6cSSean Christopherson 	/* Run L1 => L2.  L2 should sync and request #SS. */
23867730e6cSSean Christopherson 	vcpu_run(vcpu);
23967730e6cSSean Christopherson 	assert_ucall_vector(vcpu, SS_VECTOR);
24067730e6cSSean Christopherson 
24167730e6cSSean Christopherson 	/* Pend #SS and request immediate exit.  #SS should still be pending. */
24267730e6cSSean Christopherson 	queue_ss_exception(vcpu, false);
24367730e6cSSean Christopherson 	vcpu->run->immediate_exit = true;
24467730e6cSSean Christopherson 	vcpu_run_complete_io(vcpu);
24567730e6cSSean Christopherson 
24667730e6cSSean Christopherson 	/* Verify the pending events comes back out the same as it went in. */
24767730e6cSSean Christopherson 	vcpu_events_get(vcpu, &events);
24867730e6cSSean Christopherson 	TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
24967730e6cSSean Christopherson 			KVM_VCPUEVENT_VALID_PAYLOAD);
25067730e6cSSean Christopherson 	TEST_ASSERT_EQ(events.exception.pending, true);
25167730e6cSSean Christopherson 	TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
25267730e6cSSean Christopherson 	TEST_ASSERT_EQ(events.exception.has_error_code, true);
25367730e6cSSean Christopherson 	TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
25467730e6cSSean Christopherson 
25567730e6cSSean Christopherson 	/*
25667730e6cSSean Christopherson 	 * Run for real with the pending #SS, L1 should get a VM-Exit due to
25767730e6cSSean Christopherson 	 * #SS interception and re-enter L2 to request #GP (via injected #SS).
25867730e6cSSean Christopherson 	 */
25967730e6cSSean Christopherson 	vcpu->run->immediate_exit = false;
26067730e6cSSean Christopherson 	vcpu_run(vcpu);
26167730e6cSSean Christopherson 	assert_ucall_vector(vcpu, GP_VECTOR);
26267730e6cSSean Christopherson 
26367730e6cSSean Christopherson 	/*
26467730e6cSSean Christopherson 	 * Inject #SS, the #SS should bypass interception and cause #GP, which
26567730e6cSSean Christopherson 	 * L1 should intercept before KVM morphs it to #DF.  L1 should then
26667730e6cSSean Christopherson 	 * disable #GP interception and run L2 to request #DF (via #SS => #GP).
26767730e6cSSean Christopherson 	 */
26867730e6cSSean Christopherson 	queue_ss_exception(vcpu, true);
26967730e6cSSean Christopherson 	vcpu_run(vcpu);
27067730e6cSSean Christopherson 	assert_ucall_vector(vcpu, DF_VECTOR);
27167730e6cSSean Christopherson 
27267730e6cSSean Christopherson 	/*
27367730e6cSSean Christopherson 	 * Inject #SS, the #SS should bypass interception and cause #GP, which
27467730e6cSSean Christopherson 	 * L1 is no longer interception, and so should see a #DF VM-Exit.  L1
27567730e6cSSean Christopherson 	 * should then signal that is done.
27667730e6cSSean Christopherson 	 */
27767730e6cSSean Christopherson 	queue_ss_exception(vcpu, true);
27867730e6cSSean Christopherson 	vcpu_run(vcpu);
27967730e6cSSean Christopherson 	assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
28067730e6cSSean Christopherson 
28167730e6cSSean Christopherson 	/*
28267730e6cSSean Christopherson 	 * Inject #SS yet again.  L1 is not intercepting #GP or #DF, and so
28367730e6cSSean Christopherson 	 * should see nested TRIPLE_FAULT / SHUTDOWN.
28467730e6cSSean Christopherson 	 */
28567730e6cSSean Christopherson 	queue_ss_exception(vcpu, true);
28667730e6cSSean Christopherson 	vcpu_run(vcpu);
28767730e6cSSean Christopherson 	assert_ucall_vector(vcpu, -1);
28867730e6cSSean Christopherson 
28967730e6cSSean Christopherson 	kvm_vm_free(vm);
29067730e6cSSean Christopherson }
291