xref: /linux/tools/testing/selftests/kvm/aarch64/hypercalls.c (revision dec1c62e91ba268ab2a6e339d4d7a59287d5eba1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
4  *
5  * The test validates the basic hypercall functionalities that are exposed
6  * via the psuedo-firmware bitmap register. This includes the registers'
7  * read/write behavior before and after the VM has started, and if the
8  * hypercalls are properly masked or unmasked to the guest when disabled or
9  * enabled from the KVM userspace, respectively.
10  */
11 
12 #include <errno.h>
13 #include <linux/arm-smccc.h>
14 #include <asm/kvm.h>
15 #include <kvm_util.h>
16 
17 #include "processor.h"
18 
19 #define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
20 
21 /* Last valid bits of the bitmapped firmware registers */
22 #define KVM_REG_ARM_STD_BMAP_BIT_MAX		0
23 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX	0
24 #define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX	1
25 
26 struct kvm_fw_reg_info {
27 	uint64_t reg;		/* Register definition */
28 	uint64_t max_feat_bit;	/* Bit that represents the upper limit of the feature-map */
29 };
30 
31 #define FW_REG_INFO(r)			\
32 	{					\
33 		.reg = r,			\
34 		.max_feat_bit = r##_BIT_MAX,	\
35 	}
36 
37 static const struct kvm_fw_reg_info fw_reg_info[] = {
38 	FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
39 	FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
40 	FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
41 };
42 
43 enum test_stage {
44 	TEST_STAGE_REG_IFACE,
45 	TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
46 	TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
47 	TEST_STAGE_HVC_IFACE_FALSE_INFO,
48 	TEST_STAGE_END,
49 };
50 
51 static int stage = TEST_STAGE_REG_IFACE;
52 
53 struct test_hvc_info {
54 	uint32_t func_id;
55 	uint64_t arg1;
56 };
57 
58 #define TEST_HVC_INFO(f, a1)	\
59 	{			\
60 		.func_id = f,	\
61 		.arg1 = a1,	\
62 	}
63 
64 static const struct test_hvc_info hvc_info[] = {
65 	/* KVM_REG_ARM_STD_BMAP */
66 	TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
67 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
68 	TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
69 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
70 	TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
71 
72 	/* KVM_REG_ARM_STD_HYP_BMAP */
73 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
74 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
75 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
76 
77 	/* KVM_REG_ARM_VENDOR_HYP_BMAP */
78 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
79 			ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
80 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
81 	TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
82 };
83 
84 /* Feed false hypercall info to test the KVM behavior */
85 static const struct test_hvc_info false_hvc_info[] = {
86 	/* Feature support check against a different family of hypercalls */
87 	TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
88 	TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
89 	TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
90 };
91 
92 static void guest_test_hvc(const struct test_hvc_info *hc_info)
93 {
94 	unsigned int i;
95 	struct arm_smccc_res res;
96 	unsigned int hvc_info_arr_sz;
97 
98 	hvc_info_arr_sz =
99 	hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
100 
101 	for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
102 		memset(&res, 0, sizeof(res));
103 		smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
104 
105 		switch (stage) {
106 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
107 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
108 			GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
109 					res.a0, hc_info->func_id, hc_info->arg1);
110 			break;
111 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
112 			GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
113 					res.a0, hc_info->func_id, hc_info->arg1);
114 			break;
115 		default:
116 			GUEST_ASSERT_1(0, stage);
117 		}
118 	}
119 }
120 
121 static void guest_code(void)
122 {
123 	while (stage != TEST_STAGE_END) {
124 		switch (stage) {
125 		case TEST_STAGE_REG_IFACE:
126 			break;
127 		case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
128 		case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
129 			guest_test_hvc(hvc_info);
130 			break;
131 		case TEST_STAGE_HVC_IFACE_FALSE_INFO:
132 			guest_test_hvc(false_hvc_info);
133 			break;
134 		default:
135 			GUEST_ASSERT_1(0, stage);
136 		}
137 
138 		GUEST_SYNC(stage);
139 	}
140 
141 	GUEST_DONE();
142 }
143 
144 static int set_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t val)
145 {
146 	struct kvm_one_reg reg = {
147 		.id = id,
148 		.addr = (uint64_t)&val,
149 	};
150 
151 	return _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
152 }
153 
154 static void get_fw_reg(struct kvm_vm *vm, uint64_t id, uint64_t *addr)
155 {
156 	struct kvm_one_reg reg = {
157 		.id = id,
158 		.addr = (uint64_t)addr,
159 	};
160 
161 	vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
162 }
163 
164 struct st_time {
165 	uint32_t rev;
166 	uint32_t attr;
167 	uint64_t st_time;
168 };
169 
170 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
171 #define ST_GPA_BASE		(1 << 30)
172 
173 static void steal_time_init(struct kvm_vm *vm)
174 {
175 	uint64_t st_ipa = (ulong)ST_GPA_BASE;
176 	unsigned int gpages;
177 	struct kvm_device_attr dev = {
178 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
179 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
180 		.addr = (uint64_t)&st_ipa,
181 	};
182 
183 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
184 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
185 
186 	vcpu_ioctl(vm, 0, KVM_SET_DEVICE_ATTR, &dev);
187 }
188 
189 static void test_fw_regs_before_vm_start(struct kvm_vm *vm)
190 {
191 	uint64_t val;
192 	unsigned int i;
193 	int ret;
194 
195 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
196 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
197 
198 		/* First 'read' should be an upper limit of the features supported */
199 		get_fw_reg(vm, reg_info->reg, &val);
200 		TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
201 			"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
202 			reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
203 
204 		/* Test a 'write' by disabling all the features of the register map */
205 		ret = set_fw_reg(vm, reg_info->reg, 0);
206 		TEST_ASSERT(ret == 0,
207 			"Failed to clear all the features of reg: 0x%lx; ret: %d\n",
208 			reg_info->reg, errno);
209 
210 		get_fw_reg(vm, reg_info->reg, &val);
211 		TEST_ASSERT(val == 0,
212 			"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
213 
214 		/*
215 		 * Test enabling a feature that's not supported.
216 		 * Avoid this check if all the bits are occupied.
217 		 */
218 		if (reg_info->max_feat_bit < 63) {
219 			ret = set_fw_reg(vm, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
220 			TEST_ASSERT(ret != 0 && errno == EINVAL,
221 			"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
222 			errno, reg_info->reg);
223 		}
224 	}
225 }
226 
227 static void test_fw_regs_after_vm_start(struct kvm_vm *vm)
228 {
229 	uint64_t val;
230 	unsigned int i;
231 	int ret;
232 
233 	for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
234 		const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
235 
236 		/*
237 		 * Before starting the VM, the test clears all the bits.
238 		 * Check if that's still the case.
239 		 */
240 		get_fw_reg(vm, reg_info->reg, &val);
241 		TEST_ASSERT(val == 0,
242 			"Expected all the features to be cleared for reg: 0x%lx\n",
243 			reg_info->reg);
244 
245 		/*
246 		 * Since the VM has run at least once, KVM shouldn't allow modification of
247 		 * the registers and should return EBUSY. Set the registers and check for
248 		 * the expected errno.
249 		 */
250 		ret = set_fw_reg(vm, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
251 		TEST_ASSERT(ret != 0 && errno == EBUSY,
252 		"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
253 		errno, reg_info->reg);
254 	}
255 }
256 
257 static struct kvm_vm *test_vm_create(void)
258 {
259 	struct kvm_vm *vm;
260 
261 	vm = vm_create_default(0, 0, guest_code);
262 
263 	ucall_init(vm, NULL);
264 	steal_time_init(vm);
265 
266 	return vm;
267 }
268 
269 static struct kvm_vm *test_guest_stage(struct kvm_vm *vm)
270 {
271 	struct kvm_vm *ret_vm = vm;
272 
273 	pr_debug("Stage: %d\n", stage);
274 
275 	switch (stage) {
276 	case TEST_STAGE_REG_IFACE:
277 		test_fw_regs_after_vm_start(vm);
278 		break;
279 	case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
280 		/* Start a new VM so that all the features are now enabled by default */
281 		kvm_vm_free(vm);
282 		ret_vm = test_vm_create();
283 		break;
284 	case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
285 	case TEST_STAGE_HVC_IFACE_FALSE_INFO:
286 		break;
287 	default:
288 		TEST_FAIL("Unknown test stage: %d\n", stage);
289 	}
290 
291 	stage++;
292 	sync_global_to_guest(vm, stage);
293 
294 	return ret_vm;
295 }
296 
297 static void test_run(void)
298 {
299 	struct kvm_vm *vm;
300 	struct ucall uc;
301 	bool guest_done = false;
302 
303 	vm = test_vm_create();
304 
305 	test_fw_regs_before_vm_start(vm);
306 
307 	while (!guest_done) {
308 		vcpu_run(vm, 0);
309 
310 		switch (get_ucall(vm, 0, &uc)) {
311 		case UCALL_SYNC:
312 			vm = test_guest_stage(vm);
313 			break;
314 		case UCALL_DONE:
315 			guest_done = true;
316 			break;
317 		case UCALL_ABORT:
318 			TEST_FAIL("%s at %s:%ld\n\tvalues: 0x%lx, 0x%lx; 0x%lx, stage: %u",
319 			(const char *)uc.args[0], __FILE__, uc.args[1],
320 			uc.args[2], uc.args[3], uc.args[4], stage);
321 			break;
322 		default:
323 			TEST_FAIL("Unexpected guest exit\n");
324 		}
325 	}
326 
327 	kvm_vm_free(vm);
328 }
329 
330 int main(void)
331 {
332 	setbuf(stdout, NULL);
333 
334 	test_run();
335 	return 0;
336 }
337