xref: /linux/tools/testing/selftests/kvm/x86/vmx_apicv_updates_test.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "test_util.h"
3 #include "kvm_util.h"
4 #include "processor.h"
5 #include "vmx.h"
6 
7 #define GOOD_IPI_VECTOR 0xe0
8 #define BAD_IPI_VECTOR 0xf0
9 
10 static volatile int good_ipis_received;
11 
12 static void good_ipi_handler(struct ex_regs *regs)
13 {
14 	good_ipis_received++;
15 }
16 
17 static void bad_ipi_handler(struct ex_regs *regs)
18 {
19 	GUEST_FAIL("Received \"bad\" IPI; ICR MMIO write should have been ignored");
20 }
21 
22 static void l2_guest_code(void)
23 {
24 	x2apic_enable();
25 	vmcall();
26 
27 	xapic_enable();
28 	xapic_write_reg(APIC_ID, 1 << 24);
29 	vmcall();
30 }
31 
32 static void l1_guest_code(struct vmx_pages *vmx_pages)
33 {
34 #define L2_GUEST_STACK_SIZE 64
35 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
36 	uint32_t control;
37 
38 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
39 	GUEST_ASSERT(load_vmcs(vmx_pages));
40 
41 	/* Prepare the VMCS for L2 execution. */
42 	prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
43 	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
44 	control |= CPU_BASED_USE_MSR_BITMAPS;
45 	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
46 
47 	/* Modify APIC ID to coerce KVM into inhibiting APICv. */
48 	xapic_enable();
49 	xapic_write_reg(APIC_ID, 1 << 24);
50 
51 	/*
52 	 * Generate+receive an IRQ without doing EOI to get an IRQ set in vISR
53 	 * but not SVI.  APICv should be inhibited due to running with a
54 	 * modified APIC ID.
55 	 */
56 	xapic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_DM_FIXED | GOOD_IPI_VECTOR);
57 	GUEST_ASSERT_EQ(xapic_read_reg(APIC_ID), 1 << 24);
58 
59 	/* Enable IRQs and verify the IRQ was received. */
60 	sti_nop();
61 	GUEST_ASSERT_EQ(good_ipis_received, 1);
62 
63 	/*
64 	 * Run L2 to switch to x2APIC mode, which in turn will uninhibit APICv,
65 	 * as KVM should force the APIC ID back to its default.
66 	 */
67 	GUEST_ASSERT(!vmlaunch());
68 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
69 	vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
70 	GUEST_ASSERT(rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_EXTD);
71 
72 	/*
73 	 * Scribble the APIC access page to verify KVM disabled xAPIC
74 	 * virtualization in vmcs01, and to verify that KVM flushes L1's TLB
75 	 * when L2 switches back to accelerated xAPIC mode.
76 	 */
77 	xapic_write_reg(APIC_ICR2, 0xdeadbeefu);
78 	xapic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_DM_FIXED | BAD_IPI_VECTOR);
79 
80 	/*
81 	 * Verify the IRQ is still in-service and emit an EOI to verify KVM
82 	 * propagates the highest vISR vector to SVI when APICv is activated
83 	 * (and does so even if APICv was uninhibited while L2 was active).
84 	 */
85 	GUEST_ASSERT_EQ(x2apic_read_reg(APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(GOOD_IPI_VECTOR)),
86 			BIT(APIC_VECTOR_TO_BIT_NUMBER(GOOD_IPI_VECTOR)));
87 	x2apic_write_reg(APIC_EOI, 0);
88 	GUEST_ASSERT_EQ(x2apic_read_reg(APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(GOOD_IPI_VECTOR)), 0);
89 
90 	/*
91 	 * Run L2 one more time to switch back to xAPIC mode to verify that KVM
92 	 * handles the x2APIC => xAPIC transition and inhibits APICv while L2
93 	 * is active.
94 	 */
95 	GUEST_ASSERT(!vmresume());
96 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
97 	GUEST_ASSERT(!(rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_EXTD));
98 
99 	xapic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_DM_FIXED | GOOD_IPI_VECTOR);
100 	/* Re-enable IRQs, as VM-Exit clears RFLAGS.IF. */
101 	sti_nop();
102 	GUEST_ASSERT_EQ(good_ipis_received, 2);
103 
104 	GUEST_ASSERT_EQ(xapic_read_reg(APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(GOOD_IPI_VECTOR)),
105 			BIT(APIC_VECTOR_TO_BIT_NUMBER(GOOD_IPI_VECTOR)));
106 	xapic_write_reg(APIC_EOI, 0);
107 	GUEST_ASSERT_EQ(xapic_read_reg(APIC_ISR + APIC_VECTOR_TO_REG_OFFSET(GOOD_IPI_VECTOR)), 0);
108 	GUEST_DONE();
109 }
110 
111 int main(int argc, char *argv[])
112 {
113 	vm_vaddr_t vmx_pages_gva;
114 	struct vmx_pages *vmx;
115 	struct kvm_vcpu *vcpu;
116 	struct kvm_vm *vm;
117 	struct ucall uc;
118 
119 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
120 
121 	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
122 
123 	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
124 	prepare_virtualize_apic_accesses(vmx, vm);
125 	vcpu_args_set(vcpu, 1, vmx_pages_gva);
126 
127 	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
128 	vm_install_exception_handler(vm, BAD_IPI_VECTOR, bad_ipi_handler);
129 	vm_install_exception_handler(vm, GOOD_IPI_VECTOR, good_ipi_handler);
130 
131 	vcpu_run(vcpu);
132 	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
133 
134 	switch (get_ucall(vcpu, &uc)) {
135 	case UCALL_ABORT:
136 		REPORT_GUEST_ASSERT(uc);
137 		/* NOT REACHED */
138 	case UCALL_DONE:
139 		break;
140 	default:
141 		TEST_FAIL("Unexpected ucall %lu", uc.cmd);
142 	}
143 
144 	/*
145 	 * Verify at least two IRQs were injected.  Unfortunately, KVM counts
146 	 * re-injected IRQs (e.g. if delivering the IRQ hits an EPT violation),
147 	 * so being more precise isn't possible given the current stats.
148 	 */
149 	TEST_ASSERT(vcpu_get_stat(vcpu, irq_injections) >= 2,
150 		    "Wanted at least 2 IRQ injections, got %lu\n",
151 		    vcpu_get_stat(vcpu, irq_injections));
152 
153 	kvm_vm_free(vm);
154 	return 0;
155 }
156