xref: /linux/tools/testing/selftests/kvm/steal_time.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * steal/stolen time test
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <pthread.h>
11 #include <linux/kernel.h>
12 #include <asm/kvm.h>
13 #ifdef __riscv
14 #include "sbi.h"
15 #else
16 #include <asm/kvm_para.h>
17 #endif
18 
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 #include "ucall_common.h"
23 
24 #define NR_VCPUS		4
25 #define ST_GPA_BASE		(1 << 30)
26 
27 static void *st_gva[NR_VCPUS];
28 static uint64_t guest_stolen_time[NR_VCPUS];
29 
30 #if defined(__x86_64__)
31 
32 /* steal_time must have 64-byte alignment */
33 #define STEAL_TIME_SIZE		((sizeof(struct kvm_steal_time) + 63) & ~63)
34 
35 static void check_status(struct kvm_steal_time *st)
36 {
37 	GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
38 	GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
39 	GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
40 }
41 
42 static void guest_code(int cpu)
43 {
44 	struct kvm_steal_time *st = st_gva[cpu];
45 	uint32_t version;
46 
47 	GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
48 
49 	memset(st, 0, sizeof(*st));
50 	GUEST_SYNC(0);
51 
52 	check_status(st);
53 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
54 	version = READ_ONCE(st->version);
55 	check_status(st);
56 	GUEST_SYNC(1);
57 
58 	check_status(st);
59 	GUEST_ASSERT(version < READ_ONCE(st->version));
60 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
61 	check_status(st);
62 	GUEST_DONE();
63 }
64 
65 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
66 {
67 	return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
68 }
69 
70 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
71 {
72 	int ret;
73 
74 	/* ST_GPA_BASE is identity mapped */
75 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
76 	sync_global_to_guest(vcpu->vm, st_gva[i]);
77 
78 	ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
79 			    (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
80 	TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
81 
82 	vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
83 }
84 
85 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
86 {
87 	struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
88 
89 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
90 	ksft_print_msg("    steal:     %lld\n", st->steal);
91 	ksft_print_msg("    version:   %d\n", st->version);
92 	ksft_print_msg("    flags:     %d\n", st->flags);
93 	ksft_print_msg("    preempted: %d\n", st->preempted);
94 	ksft_print_msg("    u8_pad:    %d %d %d\n",
95 			st->u8_pad[0], st->u8_pad[1], st->u8_pad[2]);
96 	ksft_print_msg("    pad:       %d %d %d %d %d %d %d %d %d %d %d\n",
97 			st->pad[0], st->pad[1], st->pad[2], st->pad[3],
98 			st->pad[4], st->pad[5], st->pad[6], st->pad[7],
99 			st->pad[8], st->pad[9], st->pad[10]);
100 }
101 
102 #elif defined(__aarch64__)
103 
104 /* PV_TIME_ST must have 64-byte alignment */
105 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
106 
107 #define SMCCC_ARCH_FEATURES	0x80000001
108 #define PV_TIME_FEATURES	0xc5000020
109 #define PV_TIME_ST		0xc5000021
110 
111 struct st_time {
112 	uint32_t rev;
113 	uint32_t attr;
114 	uint64_t st_time;
115 };
116 
117 static int64_t smccc(uint32_t func, uint64_t arg)
118 {
119 	struct arm_smccc_res res;
120 
121 	do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res);
122 	return res.a0;
123 }
124 
125 static void check_status(struct st_time *st)
126 {
127 	GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
128 	GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
129 }
130 
131 static void guest_code(int cpu)
132 {
133 	struct st_time *st;
134 	int64_t status;
135 
136 	status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
137 	GUEST_ASSERT_EQ(status, 0);
138 	status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
139 	GUEST_ASSERT_EQ(status, 0);
140 	status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
141 	GUEST_ASSERT_EQ(status, 0);
142 
143 	status = smccc(PV_TIME_ST, 0);
144 	GUEST_ASSERT_NE(status, -1);
145 	GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
146 
147 	st = (struct st_time *)status;
148 	GUEST_SYNC(0);
149 
150 	check_status(st);
151 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
152 	GUEST_SYNC(1);
153 
154 	check_status(st);
155 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
156 	GUEST_DONE();
157 }
158 
159 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
160 {
161 	struct kvm_device_attr dev = {
162 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
163 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
164 	};
165 
166 	return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
167 }
168 
169 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
170 {
171 	struct kvm_vm *vm = vcpu->vm;
172 	uint64_t st_ipa;
173 	int ret;
174 
175 	struct kvm_device_attr dev = {
176 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
177 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
178 		.addr = (uint64_t)&st_ipa,
179 	};
180 
181 	vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
182 
183 	/* ST_GPA_BASE is identity mapped */
184 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
185 	sync_global_to_guest(vm, st_gva[i]);
186 
187 	st_ipa = (ulong)st_gva[i] | 1;
188 	ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
189 	TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
190 
191 	st_ipa = (ulong)st_gva[i];
192 	vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
193 
194 	ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
195 	TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
196 }
197 
198 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
199 {
200 	struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
201 
202 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
203 	ksft_print_msg("    rev:     %d\n", st->rev);
204 	ksft_print_msg("    attr:    %d\n", st->attr);
205 	ksft_print_msg("    st_time: %ld\n", st->st_time);
206 }
207 
208 #elif defined(__riscv)
209 
210 /* SBI STA shmem must have 64-byte alignment */
211 #define STEAL_TIME_SIZE		((sizeof(struct sta_struct) + 63) & ~63)
212 
213 static vm_paddr_t st_gpa[NR_VCPUS];
214 
215 struct sta_struct {
216 	uint32_t sequence;
217 	uint32_t flags;
218 	uint64_t steal;
219 	uint8_t preempted;
220 	uint8_t pad[47];
221 } __packed;
222 
223 static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags)
224 {
225 	unsigned long lo = (unsigned long)gpa;
226 #if __riscv_xlen == 32
227 	unsigned long hi = (unsigned long)(gpa >> 32);
228 #else
229 	unsigned long hi = gpa == -1 ? -1 : 0;
230 #endif
231 	struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0);
232 
233 	GUEST_ASSERT(ret.value == 0 && ret.error == 0);
234 }
235 
236 static void check_status(struct sta_struct *st)
237 {
238 	GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1));
239 	GUEST_ASSERT(READ_ONCE(st->flags) == 0);
240 	GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
241 }
242 
243 static void guest_code(int cpu)
244 {
245 	struct sta_struct *st = st_gva[cpu];
246 	uint32_t sequence;
247 	long out_val = 0;
248 	bool probe;
249 
250 	probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val);
251 	GUEST_ASSERT(probe && out_val == 1);
252 
253 	sta_set_shmem(st_gpa[cpu], 0);
254 	GUEST_SYNC(0);
255 
256 	check_status(st);
257 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
258 	sequence = READ_ONCE(st->sequence);
259 	check_status(st);
260 	GUEST_SYNC(1);
261 
262 	check_status(st);
263 	GUEST_ASSERT(sequence < READ_ONCE(st->sequence));
264 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
265 	check_status(st);
266 	GUEST_DONE();
267 }
268 
269 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
270 {
271 	uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
272 	unsigned long enabled = vcpu_get_reg(vcpu, id);
273 
274 	TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
275 
276 	return enabled;
277 }
278 
279 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
280 {
281 	/* ST_GPA_BASE is identity mapped */
282 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
283 	st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]);
284 	sync_global_to_guest(vcpu->vm, st_gva[i]);
285 	sync_global_to_guest(vcpu->vm, st_gpa[i]);
286 }
287 
288 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
289 {
290 	struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
291 	int i;
292 
293 	pr_info("VCPU%d:\n", vcpu_idx);
294 	pr_info("    sequence:  %d\n", st->sequence);
295 	pr_info("    flags:     %d\n", st->flags);
296 	pr_info("    steal:     %"PRIu64"\n", st->steal);
297 	pr_info("    preempted: %d\n", st->preempted);
298 	pr_info("    pad:      ");
299 	for (i = 0; i < 47; ++i)
300 		pr_info("%d", st->pad[i]);
301 	pr_info("\n");
302 }
303 
304 #elif defined(__loongarch__)
305 
306 /* steal_time must have 64-byte alignment */
307 #define STEAL_TIME_SIZE		((sizeof(struct kvm_steal_time) + 63) & ~63)
308 #define KVM_STEAL_PHYS_VALID	BIT_ULL(0)
309 
310 struct kvm_steal_time {
311 	__u64 steal;
312 	__u32 version;
313 	__u32 flags;
314 	__u8  preempted;
315 	__u8  pad[47];
316 };
317 
318 static void check_status(struct kvm_steal_time *st)
319 {
320 	GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
321 	GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
322 	GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
323 }
324 
325 static void guest_code(int cpu)
326 {
327 	uint32_t version;
328 	struct kvm_steal_time *st = st_gva[cpu];
329 
330 	memset(st, 0, sizeof(*st));
331 	GUEST_SYNC(0);
332 
333 	check_status(st);
334 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
335 	version = READ_ONCE(st->version);
336 	check_status(st);
337 	GUEST_SYNC(1);
338 
339 	check_status(st);
340 	GUEST_ASSERT(version < READ_ONCE(st->version));
341 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
342 	check_status(st);
343 	GUEST_DONE();
344 }
345 
346 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
347 {
348 	int err;
349 	uint64_t val;
350 	struct kvm_device_attr attr = {
351 		.group = KVM_LOONGARCH_VCPU_CPUCFG,
352 		.attr = CPUCFG_KVM_FEATURE,
353 		.addr = (uint64_t)&val,
354 	};
355 
356 	err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
357 	if (err)
358 		return false;
359 
360 	err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr);
361 	if (err)
362 		return false;
363 
364 	return val & BIT(KVM_FEATURE_STEAL_TIME);
365 }
366 
367 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
368 {
369 	int err;
370 	uint64_t st_gpa;
371 	struct kvm_vm *vm = vcpu->vm;
372 	struct kvm_device_attr attr = {
373 		.group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
374 		.attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
375 		.addr = (uint64_t)&st_gpa,
376 	};
377 
378 	/* ST_GPA_BASE is identity mapped */
379 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
380 	sync_global_to_guest(vm, st_gva[i]);
381 
382 	err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
383 	TEST_ASSERT(err == 0, "No PV stealtime Feature");
384 
385 	st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID;
386 	err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr);
387 	TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA");
388 }
389 
390 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
391 {
392 	struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
393 
394 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
395 	ksft_print_msg("    steal:     %lld\n", st->steal);
396 	ksft_print_msg("    flags:     %d\n", st->flags);
397 	ksft_print_msg("    version:   %d\n", st->version);
398 	ksft_print_msg("    preempted: %d\n", st->preempted);
399 }
400 #endif
401 
402 static void *do_steal_time(void *arg)
403 {
404 	struct timespec ts, stop;
405 
406 	clock_gettime(CLOCK_MONOTONIC, &ts);
407 	stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
408 
409 	while (1) {
410 		clock_gettime(CLOCK_MONOTONIC, &ts);
411 		if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
412 			break;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void run_vcpu(struct kvm_vcpu *vcpu)
419 {
420 	struct ucall uc;
421 
422 	vcpu_run(vcpu);
423 
424 	switch (get_ucall(vcpu, &uc)) {
425 	case UCALL_SYNC:
426 	case UCALL_DONE:
427 		break;
428 	case UCALL_ABORT:
429 		REPORT_GUEST_ASSERT(uc);
430 	default:
431 		TEST_ASSERT(false, "Unexpected exit: %s",
432 			    exit_reason_str(vcpu->run->exit_reason));
433 	}
434 }
435 
436 int main(int ac, char **av)
437 {
438 	struct kvm_vcpu *vcpus[NR_VCPUS];
439 	struct kvm_vm *vm;
440 	pthread_attr_t attr;
441 	pthread_t thread;
442 	cpu_set_t cpuset;
443 	unsigned int gpages;
444 	long stolen_time;
445 	long run_delay;
446 	bool verbose;
447 	int i;
448 
449 	verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
450 
451 	/* Set CPU affinity so we can force preemption of the VCPU */
452 	CPU_ZERO(&cpuset);
453 	CPU_SET(0, &cpuset);
454 	pthread_attr_init(&attr);
455 	pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
456 	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
457 
458 	/* Create a VM and an identity mapped memslot for the steal time structure */
459 	vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
460 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
461 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
462 	virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
463 
464 	ksft_print_header();
465 	TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
466 	ksft_set_plan(NR_VCPUS);
467 
468 	/* Run test on each VCPU */
469 	for (i = 0; i < NR_VCPUS; ++i) {
470 		steal_time_init(vcpus[i], i);
471 
472 		vcpu_args_set(vcpus[i], 1, i);
473 
474 		/* First VCPU run initializes steal-time */
475 		run_vcpu(vcpus[i]);
476 
477 		/* Second VCPU run, expect guest stolen time to be <= run_delay */
478 		run_vcpu(vcpus[i]);
479 		sync_global_from_guest(vm, guest_stolen_time[i]);
480 		stolen_time = guest_stolen_time[i];
481 		run_delay = get_run_delay();
482 		TEST_ASSERT(stolen_time <= run_delay,
483 			    "Expected stolen time <= %ld, got %ld",
484 			    run_delay, stolen_time);
485 
486 		/* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
487 		run_delay = get_run_delay();
488 		pthread_create(&thread, &attr, do_steal_time, NULL);
489 		do
490 			sched_yield();
491 		while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
492 		pthread_join(thread, NULL);
493 		run_delay = get_run_delay() - run_delay;
494 		TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
495 			    "Expected run_delay >= %ld, got %ld",
496 			    MIN_RUN_DELAY_NS, run_delay);
497 
498 		/* Run VCPU again to confirm stolen time is consistent with run_delay */
499 		run_vcpu(vcpus[i]);
500 		sync_global_from_guest(vm, guest_stolen_time[i]);
501 		stolen_time = guest_stolen_time[i] - stolen_time;
502 		TEST_ASSERT(stolen_time >= run_delay,
503 			    "Expected stolen time >= %ld, got %ld",
504 			    run_delay, stolen_time);
505 
506 		if (verbose) {
507 			ksft_print_msg("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld%s\n",
508 				       i, guest_stolen_time[i], stolen_time,
509 				       stolen_time == run_delay ?
510 				       " (BONUS: guest test-stolen-time even exactly matches test-run_delay)" : "");
511 			steal_time_dump(vm, i);
512 		}
513 		ksft_test_result_pass("vcpu%d\n", i);
514 	}
515 
516 	/* Print results and exit() accordingly */
517 	ksft_finished();
518 }
519