xref: /linux/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c (revision 249ebf3f65f8530beb2cbfb91bff1d83ba88d23c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <string.h>
9 #include "kvm_util.h"
10 #include "processor.h"
11 
12 #define UNITY                  (1ull << 30)
13 #define HOST_ADJUST            (UNITY * 64)
14 #define GUEST_STEP             (UNITY * 4)
15 #define ROUND(x)               ((x + UNITY / 2) & -UNITY)
16 #define rounded_rdmsr(x)       ROUND(rdmsr(x))
17 #define rounded_host_rdmsr(x)  ROUND(vcpu_get_msr(vcpu, x))
18 
19 static void guest_code(void)
20 {
21 	u64 val = 0;
22 
23 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
24 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
25 
26 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
27 	val = 1ull * GUEST_STEP;
28 	wrmsr(MSR_IA32_TSC, val);
29 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
30 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
31 
32 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
33 	GUEST_SYNC(2);
34 	val = 2ull * GUEST_STEP;
35 	wrmsr(MSR_IA32_TSC_ADJUST, val);
36 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
37 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
38 
39 	/* Host: setting the TSC offset.  */
40 	GUEST_SYNC(3);
41 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
42 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
43 
44 	/*
45 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
46 	 * host-side offset and affect both MSRs.
47 	 */
48 	GUEST_SYNC(4);
49 	val = 3ull * GUEST_STEP;
50 	wrmsr(MSR_IA32_TSC_ADJUST, val);
51 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
52 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
53 
54 	/*
55 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
56 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
57 	 */
58 	GUEST_SYNC(5);
59 	val = 4ull * GUEST_STEP;
60 	wrmsr(MSR_IA32_TSC, val);
61 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
62 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
63 
64 	GUEST_DONE();
65 }
66 
67 static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
68 {
69 	struct ucall uc;
70 
71 	vcpu_run(vcpu);
72 
73 	switch (get_ucall(vcpu, &uc)) {
74 	case UCALL_SYNC:
75 		if (!strcmp((const char *)uc.args[0], "hello") &&
76 		    uc.args[1] == stage + 1)
77 			ksft_test_result_pass("stage %d passed\n", stage + 1);
78 		else
79 			ksft_test_result_fail(
80 				"stage %d: Unexpected register values vmexit, got %lx",
81 				stage + 1, (ulong)uc.args[1]);
82 		return;
83 	case UCALL_DONE:
84 		ksft_test_result_pass("stage %d passed\n", stage + 1);
85 		return;
86 	case UCALL_ABORT:
87 		REPORT_GUEST_ASSERT(uc);
88 	default:
89 		TEST_ASSERT(false, "Unexpected exit: %s",
90 			    exit_reason_str(vcpu->run->exit_reason));
91 	}
92 }
93 
94 int main(void)
95 {
96 	struct kvm_vcpu *vcpu;
97 	struct kvm_vm *vm;
98 	uint64_t val;
99 
100 	ksft_print_header();
101 	ksft_set_plan(5);
102 
103 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
104 
105 	val = 0;
106 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
107 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
108 
109 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
110 	run_vcpu(vcpu, 1);
111 	val = 1ull * GUEST_STEP;
112 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
113 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
114 
115 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
116 	run_vcpu(vcpu, 2);
117 	val = 2ull * GUEST_STEP;
118 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
119 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
120 
121 	/*
122 	 * Host: writes to MSR_IA32_TSC set the host-side offset
123 	 * and therefore do not change MSR_IA32_TSC_ADJUST.
124 	 */
125 	vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
126 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
127 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
128 	run_vcpu(vcpu, 3);
129 
130 	/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC.  */
131 	vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
132 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
133 	TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
134 
135 	/* Restore previous value.  */
136 	vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
137 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
138 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
139 
140 	/*
141 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
142 	 * host-side offset and affect both MSRs.
143 	 */
144 	run_vcpu(vcpu, 4);
145 	val = 3ull * GUEST_STEP;
146 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
147 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
148 
149 	/*
150 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
151 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
152 	 */
153 	run_vcpu(vcpu, 5);
154 	val = 4ull * GUEST_STEP;
155 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
156 	TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
157 
158 	kvm_vm_free(vm);
159 
160 	ksft_finished();	/* Print results and exit() accordingly */
161 }
162