xref: /linux/tools/testing/selftests/kvm/aarch64/hypercalls.c (revision 156010ed9c2ac1e9df6c11b1f688cf8a6e0152e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
4  *
5  * The test validates the basic hypercall functionalities that are exposed
6  * via the psuedo-firmware bitmap register. This includes the registers'
7  * read/write behavior before and after the VM has started, and if the
8  * hypercalls are properly masked or unmasked to the guest when disabled or
9  * enabled from the KVM userspace, respectively.
10  */
11 
12 #include <errno.h>
13 #include <linux/arm-smccc.h>
14 #include <asm/kvm.h>
15 #include <kvm_util.h>
16 
17 #include "processor.h"
18 
19 #define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
20 
21 /* Last valid bits of the bitmapped firmware registers */
22 #define KVM_REG_ARM_STD_BMAP_BIT_MAX		0
23 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX	0
24 #define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX	1
25 
26 struct kvm_fw_reg_info {
27 	uint64_t reg;		/* Register definition */
28 	uint64_t max_feat_bit;	/* Bit that represents the upper limit of the feature-map */
29 };
30 
31 #define FW_REG_INFO(r)			\
32 	{					\
33 		.reg = r,			\
34 		.max_feat_bit = r##_BIT_MAX,	\
35 	}
36 
37 static const struct kvm_fw_reg_info fw_reg_info[] = {
38 	FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
39 	FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
40 	FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
41 };
42 
43 enum test_stage {
44 	TEST_STAGE_REG_IFACE,
45 	TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
46 	TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
47 	TEST_STAGE_HVC_IFACE_FALSE_INFO,
48 	TEST_STAGE_END,
49 };
50 
51 static int stage = TEST_STAGE_REG_IFACE;
52 
53 struct test_hvc_info {
54 	uint32_t func_id;
55 	uint64_t arg1;
56 };
57 
58 #define TEST_HVC_INFO(f, a1)	\
59 	{			\
60 		.func_id = f,	\
61 		.arg1 = a1,	\
62 	}
63 
64 static const struct test_hvc_info hvc_info[] = {
65 	/* KVM_REG_ARM_STD_BMAP */
66 	TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
67 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
68 	TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
69 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
70 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
71 
72 	/* KVM_REG_ARM_STD_HYP_BMAP */
73 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
74 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
75 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
76 
77 	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
78 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
79 			ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
80 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
81 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
82 };
83 
84 /* Feed false hypercall info to test the KVM behavior */
85 static const struct test_hvc_info false_hvc_info[] = {
86 	/* Feature support check against a different family of hypercalls */
87 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
88 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
89 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
90 };
91 
92 static void guest_test_hvc(const struct test_hvc_info *hc_info)
93 {
94 	unsigned int i;
95 	struct arm_smccc_res res;
96 	unsigned int hvc_info_arr_sz;
97 
98 	hvc_info_arr_sz =
99 	hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
100 
101 	for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
102 		memset(&res, 0, sizeof(res));
103 		smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
104 
105 		switch (stage) {
106 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
107 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
108 			GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
109 					res.a0, hc_info->func_id, hc_info->arg1);
110 			break;
111 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
112 			GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
113 					res.a0, hc_info->func_id, hc_info->arg1);
114 			break;
115 		default:
116 			GUEST_ASSERT_1(0, stage);
117 		}
118 	}
119 }
120 
121 static void guest_code(void)
122 {
123 	while (stage != TEST_STAGE_END) {
124 		switch (stage) {
125 		case TEST_STAGE_REG_IFACE:
126 			break;
127 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
128 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
129 			guest_test_hvc(hvc_info);
130 			break;
131 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
132 			guest_test_hvc(false_hvc_info);
133 			break;
134 		default:
135 			GUEST_ASSERT_1(0, stage);
136 		}
137 
138 		GUEST_SYNC(stage);
139 	}
140 
141 	GUEST_DONE();
142 }
143 
144 struct st_time {
145 	uint32_t rev;
146 	uint32_t attr;
147 	uint64_t st_time;
148 };
149 
150 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
151 #define ST_GPA_BASE		(1 << 30)
152 
153 static void steal_time_init(struct kvm_vcpu *vcpu)
154 {
155 	uint64_t st_ipa = (ulong)ST_GPA_BASE;
156 	unsigned int gpages;
157 
158 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
159 	vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
160 
161 	vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
162 			     KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
163 }
164 
165 static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
166 {
167 	uint64_t val;
168 	unsigned int i;
169 	int ret;
170 
171 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
172 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
173 
174 		/* First 'read' should be an upper limit of the features supported */
175 		vcpu_get_reg(vcpu, reg_info->reg, &val);
176 		TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
177 			"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
178 			reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
179 
180 		/* Test a 'write' by disabling all the features of the register map */
181 		ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
182 		TEST_ASSERT(ret == 0,
183 			"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
184 			reg_info->reg, errno);
185 
186 		vcpu_get_reg(vcpu, reg_info->reg, &val);
187 		TEST_ASSERT(val == 0,
188 			"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
189 
190 		/*
191 		 * Test enabling a feature that's not supported.
192 		 * Avoid this check if all the bits are occupied.
193 		 */
194 		if (reg_info->max_feat_bit < 63) {
195 			ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
196 			TEST_ASSERT(ret != 0 && errno == EINVAL,
197 			"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
198 			errno, reg_info->reg);
199 		}
200 	}
201 }
202 
203 static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
204 {
205 	uint64_t val;
206 	unsigned int i;
207 	int ret;
208 
209 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
210 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
211 
212 		/*
213 		 * Before starting the VM, the test clears all the bits.
214 		 * Check if that's still the case.
215 		 */
216 		vcpu_get_reg(vcpu, reg_info->reg, &val);
217 		TEST_ASSERT(val == 0,
218 			"Expected all the features to be cleared for reg: 0x%lx\n",
219 			reg_info->reg);
220 
221 		/*
222 		 * Since the VM has run at least once, KVM shouldn't allow modification of
223 		 * the registers and should return EBUSY. Set the registers and check for
224 		 * the expected errno.
225 		 */
226 		ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
227 		TEST_ASSERT(ret != 0 && errno == EBUSY,
228 		"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
229 		errno, reg_info->reg);
230 	}
231 }
232 
233 static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
234 {
235 	struct kvm_vm *vm;
236 
237 	vm = vm_create_with_one_vcpu(vcpu, guest_code);
238 
239 	steal_time_init(*vcpu);
240 
241 	return vm;
242 }
243 
244 static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
245 {
246 	int prev_stage = stage;
247 
248 	pr_debug("Stage: %d\n", prev_stage);
249 
250 	/* Sync the stage early, the VM might be freed below. */
251 	stage++;
252 	sync_global_to_guest(*vm, stage);
253 
254 	switch (prev_stage) {
255 	case TEST_STAGE_REG_IFACE:
256 		test_fw_regs_after_vm_start(*vcpu);
257 		break;
258 	case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
259 		/* Start a new VM so that all the features are now enabled by default */
260 		kvm_vm_free(*vm);
261 		*vm = test_vm_create(vcpu);
262 		break;
263 	case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
264 	case TEST_STAGE_HVC_IFACE_FALSE_INFO:
265 		break;
266 	default:
267 		TEST_FAIL("Unknown test stage: %d\n", prev_stage);
268 	}
269 }
270 
271 static void test_run(void)
272 {
273 	struct kvm_vcpu *vcpu;
274 	struct kvm_vm *vm;
275 	struct ucall uc;
276 	bool guest_done = false;
277 
278 	vm = test_vm_create(&vcpu);
279 
280 	test_fw_regs_before_vm_start(vcpu);
281 
282 	while (!guest_done) {
283 		vcpu_run(vcpu);
284 
285 		switch (get_ucall(vcpu, &uc)) {
286 		case UCALL_SYNC:
287 			test_guest_stage(&vm, &vcpu);
288 			break;
289 		case UCALL_DONE:
290 			guest_done = true;
291 			break;
292 		case UCALL_ABORT:
293 			REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u",
294 					      GUEST_ASSERT_ARG(uc, 0),
295 					      GUEST_ASSERT_ARG(uc, 1),
296 					      GUEST_ASSERT_ARG(uc, 2), stage);
297 			break;
298 		default:
299 			TEST_FAIL("Unexpected guest exit\n");
300 		}
301 	}
302 
303 	kvm_vm_free(vm);
304 }
305 
306 int main(void)
307 {
308 	test_run();
309 	return 0;
310 }
311