xref: /linux/tools/testing/selftests/kvm/steal_time.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * steal/stolen time test
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <pthread.h>
11 #include <linux/kernel.h>
12 #include <asm/kvm.h>
13 #ifdef __riscv
14 #include "sbi.h"
15 #else
16 #include <asm/kvm_para.h>
17 #endif
18 
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 #include "ucall_common.h"
23 
24 #define NR_VCPUS		4
25 #define ST_GPA_BASE		(1 << 30)
26 
27 static void *st_gva[NR_VCPUS];
28 static uint64_t guest_stolen_time[NR_VCPUS];
29 
30 #if defined(__x86_64__)
31 
32 /* steal_time must have 64-byte alignment */
33 #define STEAL_TIME_SIZE		((sizeof(struct kvm_steal_time) + 63) & ~63)
34 
35 static void check_status(struct kvm_steal_time *st)
36 {
37 	GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
38 	GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
39 	GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
40 }
41 
42 static void guest_code(int cpu)
43 {
44 	struct kvm_steal_time *st = st_gva[cpu];
45 	uint32_t version;
46 
47 	GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
48 
49 	memset(st, 0, sizeof(*st));
50 	GUEST_SYNC(0);
51 
52 	check_status(st);
53 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
54 	version = READ_ONCE(st->version);
55 	check_status(st);
56 	GUEST_SYNC(1);
57 
58 	check_status(st);
59 	GUEST_ASSERT(version < READ_ONCE(st->version));
60 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
61 	check_status(st);
62 	GUEST_DONE();
63 }
64 
65 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
66 {
67 	return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
68 }
69 
70 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
71 {
72 	/* ST_GPA_BASE is identity mapped */
73 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
74 	sync_global_to_guest(vcpu->vm, st_gva[i]);
75 
76 	vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
77 }
78 
79 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
80 {
81 	struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
82 
83 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
84 	ksft_print_msg("    steal:     %lld\n", st->steal);
85 	ksft_print_msg("    version:   %d\n", st->version);
86 	ksft_print_msg("    flags:     %d\n", st->flags);
87 	ksft_print_msg("    preempted: %d\n", st->preempted);
88 	ksft_print_msg("    u8_pad:    %d %d %d\n",
89 			st->u8_pad[0], st->u8_pad[1], st->u8_pad[2]);
90 	ksft_print_msg("    pad:       %d %d %d %d %d %d %d %d %d %d %d\n",
91 			st->pad[0], st->pad[1], st->pad[2], st->pad[3],
92 			st->pad[4], st->pad[5], st->pad[6], st->pad[7],
93 			st->pad[8], st->pad[9], st->pad[10]);
94 }
95 
96 static void check_steal_time_uapi(void)
97 {
98 	struct kvm_vm *vm;
99 	struct kvm_vcpu *vcpu;
100 	int ret;
101 
102 	vm = vm_create_with_one_vcpu(&vcpu, NULL);
103 
104 	ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
105 			    (ulong)ST_GPA_BASE | KVM_STEAL_RESERVED_MASK);
106 	TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
107 
108 	kvm_vm_free(vm);
109 }
110 
111 #elif defined(__aarch64__)
112 
113 /* PV_TIME_ST must have 64-byte alignment */
114 #define STEAL_TIME_SIZE		((sizeof(struct st_time) + 63) & ~63)
115 
116 #define SMCCC_ARCH_FEATURES	0x80000001
117 #define PV_TIME_FEATURES	0xc5000020
118 #define PV_TIME_ST		0xc5000021
119 
120 struct st_time {
121 	uint32_t rev;
122 	uint32_t attr;
123 	uint64_t st_time;
124 };
125 
126 static int64_t smccc(uint32_t func, uint64_t arg)
127 {
128 	struct arm_smccc_res res;
129 
130 	do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res);
131 	return res.a0;
132 }
133 
134 static void check_status(struct st_time *st)
135 {
136 	GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
137 	GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
138 }
139 
140 static void guest_code(int cpu)
141 {
142 	struct st_time *st;
143 	int64_t status;
144 
145 	status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
146 	GUEST_ASSERT_EQ(status, 0);
147 	status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
148 	GUEST_ASSERT_EQ(status, 0);
149 	status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
150 	GUEST_ASSERT_EQ(status, 0);
151 
152 	status = smccc(PV_TIME_ST, 0);
153 	GUEST_ASSERT_NE(status, -1);
154 	GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
155 
156 	st = (struct st_time *)status;
157 	GUEST_SYNC(0);
158 
159 	check_status(st);
160 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
161 	GUEST_SYNC(1);
162 
163 	check_status(st);
164 	WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
165 	GUEST_DONE();
166 }
167 
168 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
169 {
170 	struct kvm_device_attr dev = {
171 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
172 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
173 	};
174 
175 	return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
176 }
177 
178 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
179 {
180 	struct kvm_vm *vm = vcpu->vm;
181 	uint64_t st_ipa;
182 
183 	struct kvm_device_attr dev = {
184 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
185 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
186 		.addr = (uint64_t)&st_ipa,
187 	};
188 
189 	/* ST_GPA_BASE is identity mapped */
190 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
191 	sync_global_to_guest(vm, st_gva[i]);
192 
193 	st_ipa = (ulong)st_gva[i];
194 	vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
195 }
196 
197 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
198 {
199 	struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
200 
201 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
202 	ksft_print_msg("    rev:     %d\n", st->rev);
203 	ksft_print_msg("    attr:    %d\n", st->attr);
204 	ksft_print_msg("    st_time: %ld\n", st->st_time);
205 }
206 
207 static void check_steal_time_uapi(void)
208 {
209 	struct kvm_vm *vm;
210 	struct kvm_vcpu *vcpu;
211 	uint64_t st_ipa;
212 	int ret;
213 
214 	vm = vm_create_with_one_vcpu(&vcpu, NULL);
215 
216 	struct kvm_device_attr dev = {
217 		.group = KVM_ARM_VCPU_PVTIME_CTRL,
218 		.attr = KVM_ARM_VCPU_PVTIME_IPA,
219 		.addr = (uint64_t)&st_ipa,
220 	};
221 
222 	vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
223 
224 	st_ipa = (ulong)ST_GPA_BASE | 1;
225 	ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
226 	TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
227 
228 	st_ipa = (ulong)ST_GPA_BASE;
229 	vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
230 
231 	ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
232 	TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
233 
234 	kvm_vm_free(vm);
235 }
236 
237 #elif defined(__riscv)
238 
239 /* SBI STA shmem must have 64-byte alignment */
240 #define STEAL_TIME_SIZE		((sizeof(struct sta_struct) + 63) & ~63)
241 
242 static vm_paddr_t st_gpa[NR_VCPUS];
243 
244 struct sta_struct {
245 	uint32_t sequence;
246 	uint32_t flags;
247 	uint64_t steal;
248 	uint8_t preempted;
249 	uint8_t pad[47];
250 } __packed;
251 
252 static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags)
253 {
254 	unsigned long lo = (unsigned long)gpa;
255 #if __riscv_xlen == 32
256 	unsigned long hi = (unsigned long)(gpa >> 32);
257 #else
258 	unsigned long hi = gpa == -1 ? -1 : 0;
259 #endif
260 	struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0);
261 
262 	GUEST_ASSERT(ret.value == 0 && ret.error == 0);
263 }
264 
265 static void check_status(struct sta_struct *st)
266 {
267 	GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1));
268 	GUEST_ASSERT(READ_ONCE(st->flags) == 0);
269 	GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
270 }
271 
272 static void guest_code(int cpu)
273 {
274 	struct sta_struct *st = st_gva[cpu];
275 	uint32_t sequence;
276 	long out_val = 0;
277 	bool probe;
278 
279 	probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val);
280 	GUEST_ASSERT(probe && out_val == 1);
281 
282 	sta_set_shmem(st_gpa[cpu], 0);
283 	GUEST_SYNC(0);
284 
285 	check_status(st);
286 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
287 	sequence = READ_ONCE(st->sequence);
288 	check_status(st);
289 	GUEST_SYNC(1);
290 
291 	check_status(st);
292 	GUEST_ASSERT(sequence < READ_ONCE(st->sequence));
293 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
294 	check_status(st);
295 	GUEST_DONE();
296 }
297 
298 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
299 {
300 	uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
301 	unsigned long enabled = vcpu_get_reg(vcpu, id);
302 
303 	TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
304 
305 	return enabled;
306 }
307 
308 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
309 {
310 	/* ST_GPA_BASE is identity mapped */
311 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
312 	st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]);
313 	sync_global_to_guest(vcpu->vm, st_gva[i]);
314 	sync_global_to_guest(vcpu->vm, st_gpa[i]);
315 }
316 
317 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
318 {
319 	struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
320 	int i;
321 
322 	pr_info("VCPU%d:\n", vcpu_idx);
323 	pr_info("    sequence:  %d\n", st->sequence);
324 	pr_info("    flags:     %d\n", st->flags);
325 	pr_info("    steal:     %"PRIu64"\n", st->steal);
326 	pr_info("    preempted: %d\n", st->preempted);
327 	pr_info("    pad:      ");
328 	for (i = 0; i < 47; ++i)
329 		pr_info("%d", st->pad[i]);
330 	pr_info("\n");
331 }
332 
333 static void check_steal_time_uapi(void)
334 {
335 	struct kvm_vm *vm;
336 	struct kvm_vcpu *vcpu;
337 	struct kvm_one_reg reg;
338 	uint64_t shmem;
339 	int ret;
340 
341 	vm = vm_create_with_one_vcpu(&vcpu, NULL);
342 
343 	reg.id = KVM_REG_RISCV |
344 			 KVM_REG_SIZE_ULONG |
345 			 KVM_REG_RISCV_SBI_STATE |
346 			 KVM_REG_RISCV_SBI_STA |
347 			 KVM_REG_RISCV_SBI_STA_REG(shmem_lo);
348 	reg.addr = (uint64_t)&shmem;
349 
350 	shmem = ST_GPA_BASE + 1;
351 	ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
352 	TEST_ASSERT(ret == -1 && errno == EINVAL,
353 		    "misaligned STA shmem returns -EINVAL");
354 
355 	shmem = ST_GPA_BASE;
356 	ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
357 	TEST_ASSERT(ret == 0,
358 		    "aligned STA shmem succeeds");
359 
360 	shmem = INVALID_GPA;
361 	ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
362 	TEST_ASSERT(ret == 0,
363 		    "all-ones for STA shmem succeeds");
364 
365 	kvm_vm_free(vm);
366 }
367 
368 #elif defined(__loongarch__)
369 
370 /* steal_time must have 64-byte alignment */
371 #define STEAL_TIME_SIZE		((sizeof(struct kvm_steal_time) + 63) & ~63)
372 #define KVM_STEAL_PHYS_VALID	BIT_ULL(0)
373 
374 struct kvm_steal_time {
375 	__u64 steal;
376 	__u32 version;
377 	__u32 flags;
378 	__u8  preempted;
379 	__u8  pad[47];
380 };
381 
382 static void check_status(struct kvm_steal_time *st)
383 {
384 	GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
385 	GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
386 	GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
387 }
388 
389 static void guest_code(int cpu)
390 {
391 	uint32_t version;
392 	struct kvm_steal_time *st = st_gva[cpu];
393 
394 	memset(st, 0, sizeof(*st));
395 	GUEST_SYNC(0);
396 
397 	check_status(st);
398 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
399 	version = READ_ONCE(st->version);
400 	check_status(st);
401 	GUEST_SYNC(1);
402 
403 	check_status(st);
404 	GUEST_ASSERT(version < READ_ONCE(st->version));
405 	WRITE_ONCE(guest_stolen_time[cpu], st->steal);
406 	check_status(st);
407 	GUEST_DONE();
408 }
409 
410 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
411 {
412 	int err;
413 	uint64_t val;
414 	struct kvm_device_attr attr = {
415 		.group = KVM_LOONGARCH_VCPU_CPUCFG,
416 		.attr = CPUCFG_KVM_FEATURE,
417 		.addr = (uint64_t)&val,
418 	};
419 
420 	err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
421 	if (err)
422 		return false;
423 
424 	err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr);
425 	if (err)
426 		return false;
427 
428 	return val & BIT(KVM_FEATURE_STEAL_TIME);
429 }
430 
431 static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
432 {
433 	int err;
434 	uint64_t st_gpa;
435 	struct kvm_vm *vm = vcpu->vm;
436 	struct kvm_device_attr attr = {
437 		.group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
438 		.attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
439 		.addr = (uint64_t)&st_gpa,
440 	};
441 
442 	/* ST_GPA_BASE is identity mapped */
443 	st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
444 	sync_global_to_guest(vm, st_gva[i]);
445 
446 	err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
447 	TEST_ASSERT(err == 0, "No PV stealtime Feature");
448 
449 	st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID;
450 	err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr);
451 	TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA");
452 }
453 
454 static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
455 {
456 	struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
457 
458 	ksft_print_msg("VCPU%d:\n", vcpu_idx);
459 	ksft_print_msg("    steal:     %lld\n", st->steal);
460 	ksft_print_msg("    flags:     %d\n", st->flags);
461 	ksft_print_msg("    version:   %d\n", st->version);
462 	ksft_print_msg("    preempted: %d\n", st->preempted);
463 }
464 #endif
465 
466 static void *do_steal_time(void *arg)
467 {
468 	struct timespec ts, stop;
469 
470 	clock_gettime(CLOCK_MONOTONIC, &ts);
471 	stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
472 
473 	while (1) {
474 		clock_gettime(CLOCK_MONOTONIC, &ts);
475 		if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
476 			break;
477 	}
478 
479 	return NULL;
480 }
481 
482 static void run_vcpu(struct kvm_vcpu *vcpu)
483 {
484 	struct ucall uc;
485 
486 	vcpu_run(vcpu);
487 
488 	switch (get_ucall(vcpu, &uc)) {
489 	case UCALL_SYNC:
490 	case UCALL_DONE:
491 		break;
492 	case UCALL_ABORT:
493 		REPORT_GUEST_ASSERT(uc);
494 	default:
495 		TEST_ASSERT(false, "Unexpected exit: %s",
496 			    exit_reason_str(vcpu->run->exit_reason));
497 	}
498 }
499 
500 int main(int ac, char **av)
501 {
502 	struct kvm_vcpu *vcpus[NR_VCPUS];
503 	struct kvm_vm *vm;
504 	pthread_attr_t attr;
505 	pthread_t thread;
506 	cpu_set_t cpuset;
507 	unsigned int gpages;
508 	long stolen_time;
509 	long run_delay;
510 	bool verbose;
511 	int i;
512 
513 	verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
514 
515 	/* Set CPU affinity so we can force preemption of the VCPU */
516 	CPU_ZERO(&cpuset);
517 	CPU_SET(0, &cpuset);
518 	pthread_attr_init(&attr);
519 	pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
520 	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
521 
522 	/* Create a VM and an identity mapped memslot for the steal time structure */
523 	vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
524 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
525 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
526 	virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
527 
528 	ksft_print_header();
529 	TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
530 	ksft_set_plan(NR_VCPUS);
531 
532 	check_steal_time_uapi();
533 
534 	/* Run test on each VCPU */
535 	for (i = 0; i < NR_VCPUS; ++i) {
536 		steal_time_init(vcpus[i], i);
537 
538 		vcpu_args_set(vcpus[i], 1, i);
539 
540 		/* First VCPU run initializes steal-time */
541 		run_vcpu(vcpus[i]);
542 
543 		/* Second VCPU run, expect guest stolen time to be <= run_delay */
544 		run_vcpu(vcpus[i]);
545 		sync_global_from_guest(vm, guest_stolen_time[i]);
546 		stolen_time = guest_stolen_time[i];
547 		run_delay = get_run_delay();
548 		TEST_ASSERT(stolen_time <= run_delay,
549 			    "Expected stolen time <= %ld, got %ld",
550 			    run_delay, stolen_time);
551 
552 		/* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
553 		run_delay = get_run_delay();
554 		pthread_create(&thread, &attr, do_steal_time, NULL);
555 		do
556 			sched_yield();
557 		while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
558 		pthread_join(thread, NULL);
559 		run_delay = get_run_delay() - run_delay;
560 		TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
561 			    "Expected run_delay >= %ld, got %ld",
562 			    MIN_RUN_DELAY_NS, run_delay);
563 
564 		/* Run VCPU again to confirm stolen time is consistent with run_delay */
565 		run_vcpu(vcpus[i]);
566 		sync_global_from_guest(vm, guest_stolen_time[i]);
567 		stolen_time = guest_stolen_time[i] - stolen_time;
568 		TEST_ASSERT(stolen_time >= run_delay,
569 			    "Expected stolen time >= %ld, got %ld",
570 			    run_delay, stolen_time);
571 
572 		if (verbose) {
573 			ksft_print_msg("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld%s\n",
574 				       i, guest_stolen_time[i], stolen_time,
575 				       stolen_time == run_delay ?
576 				       " (BONUS: guest test-stolen-time even exactly matches test-run_delay)" : "");
577 			steal_time_dump(vm, i);
578 		}
579 		ksft_test_result_pass("vcpu%d\n", i);
580 	}
581 
582 	/* Print results and exit() accordingly */
583 	ksft_finished();
584 }
585