1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * steal/stolen time test
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 */
7 #include <stdio.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <pthread.h>
11 #include <linux/kernel.h>
12 #include <asm/kvm.h>
13 #ifdef __riscv
14 #include "sbi.h"
15 #else
16 #include <asm/kvm_para.h>
17 #endif
18
19 #include "test_util.h"
20 #include "kvm_util.h"
21 #include "processor.h"
22 #include "ucall_common.h"
23
24 #define NR_VCPUS 4
25 #define ST_GPA_BASE (1 << 30)
26
27 static void *st_gva[NR_VCPUS];
28 static u64 guest_stolen_time[NR_VCPUS];
29
30 #if defined(__x86_64__)
31
32 /* steal_time must have 64-byte alignment */
33 #define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
34
check_status(struct kvm_steal_time * st)35 static void check_status(struct kvm_steal_time *st)
36 {
37 GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
38 GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
39 GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
40 }
41
guest_code(int cpu)42 static void guest_code(int cpu)
43 {
44 struct kvm_steal_time *st = st_gva[cpu];
45 u32 version;
46
47 GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((u64)st_gva[cpu] | KVM_MSR_ENABLED));
48
49 memset(st, 0, sizeof(*st));
50 GUEST_SYNC(0);
51
52 check_status(st);
53 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
54 version = READ_ONCE(st->version);
55 check_status(st);
56 GUEST_SYNC(1);
57
58 check_status(st);
59 GUEST_ASSERT(version < READ_ONCE(st->version));
60 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
61 check_status(st);
62 GUEST_DONE();
63 }
64
is_steal_time_supported(struct kvm_vcpu * vcpu)65 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
66 {
67 return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
68 }
69
steal_time_init(struct kvm_vcpu * vcpu,u32 i)70 static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
71 {
72 /* ST_GPA_BASE is identity mapped */
73 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
74 sync_global_to_guest(vcpu->vm, st_gva[i]);
75
76 vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
77 }
78
steal_time_dump(struct kvm_vm * vm,u32 vcpu_idx)79 static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
80 {
81 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
82
83 ksft_print_msg("VCPU%d:\n", vcpu_idx);
84 ksft_print_msg(" steal: %lld\n", st->steal);
85 ksft_print_msg(" version: %d\n", st->version);
86 ksft_print_msg(" flags: %d\n", st->flags);
87 ksft_print_msg(" preempted: %d\n", st->preempted);
88 ksft_print_msg(" u8_pad: %d %d %d\n",
89 st->u8_pad[0], st->u8_pad[1], st->u8_pad[2]);
90 ksft_print_msg(" pad: %d %d %d %d %d %d %d %d %d %d %d\n",
91 st->pad[0], st->pad[1], st->pad[2], st->pad[3],
92 st->pad[4], st->pad[5], st->pad[6], st->pad[7],
93 st->pad[8], st->pad[9], st->pad[10]);
94 }
95
check_steal_time_uapi(void)96 static void check_steal_time_uapi(void)
97 {
98 struct kvm_vm *vm;
99 struct kvm_vcpu *vcpu;
100 int ret;
101
102 vm = vm_create_with_one_vcpu(&vcpu, NULL);
103
104 ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
105 (ulong)ST_GPA_BASE | KVM_STEAL_RESERVED_MASK);
106 TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
107
108 kvm_vm_free(vm);
109 }
110
111 #elif defined(__aarch64__)
112
113 /* PV_TIME_ST must have 64-byte alignment */
114 #define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
115
116 #define SMCCC_ARCH_FEATURES 0x80000001
117 #define PV_TIME_FEATURES 0xc5000020
118 #define PV_TIME_ST 0xc5000021
119
120 struct st_time {
121 u32 rev;
122 u32 attr;
123 u64 st_time;
124 };
125
smccc(u32 func,u64 arg)126 static s64 smccc(u32 func, u64 arg)
127 {
128 struct arm_smccc_res res;
129
130 do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res);
131 return res.a0;
132 }
133
check_status(struct st_time * st)134 static void check_status(struct st_time *st)
135 {
136 GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
137 GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
138 }
139
guest_code(int cpu)140 static void guest_code(int cpu)
141 {
142 struct st_time *st;
143 s64 status;
144
145 status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
146 GUEST_ASSERT_EQ(status, 0);
147 status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
148 GUEST_ASSERT_EQ(status, 0);
149 status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
150 GUEST_ASSERT_EQ(status, 0);
151
152 status = smccc(PV_TIME_ST, 0);
153 GUEST_ASSERT_NE(status, -1);
154 GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
155
156 st = (struct st_time *)status;
157 GUEST_SYNC(0);
158
159 check_status(st);
160 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
161 GUEST_SYNC(1);
162
163 check_status(st);
164 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
165 GUEST_DONE();
166 }
167
is_steal_time_supported(struct kvm_vcpu * vcpu)168 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
169 {
170 struct kvm_device_attr dev = {
171 .group = KVM_ARM_VCPU_PVTIME_CTRL,
172 .attr = KVM_ARM_VCPU_PVTIME_IPA,
173 };
174
175 return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
176 }
177
steal_time_init(struct kvm_vcpu * vcpu,u32 i)178 static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
179 {
180 struct kvm_vm *vm = vcpu->vm;
181 u64 st_ipa;
182
183 struct kvm_device_attr dev = {
184 .group = KVM_ARM_VCPU_PVTIME_CTRL,
185 .attr = KVM_ARM_VCPU_PVTIME_IPA,
186 .addr = (u64)&st_ipa,
187 };
188
189 /* ST_GPA_BASE is identity mapped */
190 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
191 sync_global_to_guest(vm, st_gva[i]);
192
193 st_ipa = (ulong)st_gva[i];
194 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
195 }
196
steal_time_dump(struct kvm_vm * vm,u32 vcpu_idx)197 static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
198 {
199 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
200
201 ksft_print_msg("VCPU%d:\n", vcpu_idx);
202 ksft_print_msg(" rev: %d\n", st->rev);
203 ksft_print_msg(" attr: %d\n", st->attr);
204 ksft_print_msg(" st_time: %ld\n", st->st_time);
205 }
206
check_steal_time_uapi(void)207 static void check_steal_time_uapi(void)
208 {
209 struct kvm_vm *vm;
210 struct kvm_vcpu *vcpu;
211 u64 st_ipa;
212 int ret;
213
214 vm = vm_create_with_one_vcpu(&vcpu, NULL);
215
216 struct kvm_device_attr dev = {
217 .group = KVM_ARM_VCPU_PVTIME_CTRL,
218 .attr = KVM_ARM_VCPU_PVTIME_IPA,
219 .addr = (u64)&st_ipa,
220 };
221
222 vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
223 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, 1, 0);
224 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, 1);
225
226 st_ipa = (ulong)ST_GPA_BASE | 1;
227 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
228 TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
229
230 st_ipa = (ulong)ST_GPA_BASE;
231 vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
232
233 ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
234 TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
235
236 kvm_vm_free(vm);
237 }
238
239 #elif defined(__riscv)
240
241 /* SBI STA shmem must have 64-byte alignment */
242 #define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63)
243
244 static gpa_t st_gpa[NR_VCPUS];
245
246 struct sta_struct {
247 u32 sequence;
248 u32 flags;
249 u64 steal;
250 u8 preempted;
251 u8 pad[47];
252 } __packed;
253
sta_set_shmem(gpa_t gpa,unsigned long flags)254 static void sta_set_shmem(gpa_t gpa, unsigned long flags)
255 {
256 unsigned long lo = (unsigned long)gpa;
257 #if __riscv_xlen == 32
258 unsigned long hi = (unsigned long)(gpa >> 32);
259 #else
260 unsigned long hi = gpa == -1 ? -1 : 0;
261 #endif
262 struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0);
263
264 GUEST_ASSERT(ret.value == 0 && ret.error == 0);
265 }
266
check_status(struct sta_struct * st)267 static void check_status(struct sta_struct *st)
268 {
269 GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1));
270 GUEST_ASSERT(READ_ONCE(st->flags) == 0);
271 GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
272 }
273
guest_code(int cpu)274 static void guest_code(int cpu)
275 {
276 struct sta_struct *st = st_gva[cpu];
277 u32 sequence;
278 long out_val = 0;
279 bool probe;
280
281 probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val);
282 GUEST_ASSERT(probe && out_val == 1);
283
284 sta_set_shmem(st_gpa[cpu], 0);
285 GUEST_SYNC(0);
286
287 check_status(st);
288 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
289 sequence = READ_ONCE(st->sequence);
290 check_status(st);
291 GUEST_SYNC(1);
292
293 check_status(st);
294 GUEST_ASSERT(sequence < READ_ONCE(st->sequence));
295 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
296 check_status(st);
297 GUEST_DONE();
298 }
299
is_steal_time_supported(struct kvm_vcpu * vcpu)300 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
301 {
302 u64 id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
303 unsigned long enabled = vcpu_get_reg(vcpu, id);
304
305 TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
306
307 return enabled;
308 }
309
steal_time_init(struct kvm_vcpu * vcpu,u32 i)310 static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
311 {
312 /* ST_GPA_BASE is identity mapped */
313 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
314 st_gpa[i] = addr_gva2gpa(vcpu->vm, (gva_t)st_gva[i]);
315 sync_global_to_guest(vcpu->vm, st_gva[i]);
316 sync_global_to_guest(vcpu->vm, st_gpa[i]);
317 }
318
steal_time_dump(struct kvm_vm * vm,u32 vcpu_idx)319 static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
320 {
321 struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
322 int i;
323
324 pr_info("VCPU%d:\n", vcpu_idx);
325 pr_info(" sequence: %d\n", st->sequence);
326 pr_info(" flags: %d\n", st->flags);
327 pr_info(" steal: %"PRIu64"\n", st->steal);
328 pr_info(" preempted: %d\n", st->preempted);
329 pr_info(" pad: ");
330 for (i = 0; i < 47; ++i)
331 pr_info("%d", st->pad[i]);
332 pr_info("\n");
333 }
334
check_steal_time_uapi(void)335 static void check_steal_time_uapi(void)
336 {
337 struct kvm_vm *vm;
338 struct kvm_vcpu *vcpu;
339 struct kvm_one_reg reg;
340 u64 shmem;
341 int ret;
342
343 vm = vm_create_with_one_vcpu(&vcpu, NULL);
344
345 reg.id = KVM_REG_RISCV |
346 KVM_REG_SIZE_ULONG |
347 KVM_REG_RISCV_SBI_STATE |
348 KVM_REG_RISCV_SBI_STA |
349 KVM_REG_RISCV_SBI_STA_REG(shmem_lo);
350 reg.addr = (u64)&shmem;
351
352 shmem = ST_GPA_BASE + 1;
353 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
354 TEST_ASSERT(ret == -1 && errno == EINVAL,
355 "misaligned STA shmem returns -EINVAL");
356
357 shmem = ST_GPA_BASE;
358 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
359 TEST_ASSERT(ret == 0,
360 "aligned STA shmem succeeds");
361
362 shmem = INVALID_GPA;
363 ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
364 TEST_ASSERT(ret == 0,
365 "all-ones for STA shmem succeeds");
366
367 kvm_vm_free(vm);
368 }
369
370 #elif defined(__loongarch__)
371
372 /* steal_time must have 64-byte alignment */
373 #define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
374 #define KVM_STEAL_PHYS_VALID BIT_ULL(0)
375
376 struct kvm_steal_time {
377 __u64 steal;
378 __u32 version;
379 __u32 flags;
380 __u8 preempted;
381 __u8 pad[47];
382 };
383
check_status(struct kvm_steal_time * st)384 static void check_status(struct kvm_steal_time *st)
385 {
386 GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
387 GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
388 GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
389 }
390
guest_code(int cpu)391 static void guest_code(int cpu)
392 {
393 u32 version;
394 struct kvm_steal_time *st = st_gva[cpu];
395
396 memset(st, 0, sizeof(*st));
397 GUEST_SYNC(0);
398
399 check_status(st);
400 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
401 version = READ_ONCE(st->version);
402 check_status(st);
403 GUEST_SYNC(1);
404
405 check_status(st);
406 GUEST_ASSERT(version < READ_ONCE(st->version));
407 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
408 check_status(st);
409 GUEST_DONE();
410 }
411
is_steal_time_supported(struct kvm_vcpu * vcpu)412 static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
413 {
414 int err;
415 u64 val;
416 struct kvm_device_attr attr = {
417 .group = KVM_LOONGARCH_VCPU_CPUCFG,
418 .attr = CPUCFG_KVM_FEATURE,
419 .addr = (u64)&val,
420 };
421
422 err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
423 if (err)
424 return false;
425
426 err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr);
427 if (err)
428 return false;
429
430 return val & BIT(KVM_FEATURE_STEAL_TIME);
431 }
432
steal_time_init(struct kvm_vcpu * vcpu,u32 i)433 static void steal_time_init(struct kvm_vcpu *vcpu, u32 i)
434 {
435 int err;
436 u64 st_gpa;
437 struct kvm_vm *vm = vcpu->vm;
438 struct kvm_device_attr attr = {
439 .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
440 .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
441 .addr = (u64)&st_gpa,
442 };
443
444 /* ST_GPA_BASE is identity mapped */
445 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
446 sync_global_to_guest(vm, st_gva[i]);
447
448 err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr);
449 TEST_ASSERT(err == 0, "No PV stealtime Feature");
450
451 st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID;
452 err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr);
453 TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA");
454 }
455
steal_time_dump(struct kvm_vm * vm,u32 vcpu_idx)456 static void steal_time_dump(struct kvm_vm *vm, u32 vcpu_idx)
457 {
458 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
459
460 ksft_print_msg("VCPU%d:\n", vcpu_idx);
461 ksft_print_msg(" steal: %lld\n", st->steal);
462 ksft_print_msg(" flags: %d\n", st->flags);
463 ksft_print_msg(" version: %d\n", st->version);
464 ksft_print_msg(" preempted: %d\n", st->preempted);
465 }
466
check_steal_time_uapi(void)467 static void check_steal_time_uapi(void)
468 {
469
470 }
471 #endif
472
do_steal_time(void * arg)473 static void *do_steal_time(void *arg)
474 {
475 struct timespec ts, stop;
476
477 clock_gettime(CLOCK_MONOTONIC, &ts);
478 stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
479
480 while (1) {
481 clock_gettime(CLOCK_MONOTONIC, &ts);
482 if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
483 break;
484 }
485
486 return NULL;
487 }
488
run_vcpu(struct kvm_vcpu * vcpu)489 static void run_vcpu(struct kvm_vcpu *vcpu)
490 {
491 struct ucall uc;
492
493 vcpu_run(vcpu);
494
495 switch (get_ucall(vcpu, &uc)) {
496 case UCALL_SYNC:
497 case UCALL_DONE:
498 break;
499 case UCALL_ABORT:
500 REPORT_GUEST_ASSERT(uc);
501 default:
502 TEST_ASSERT(false, "Unexpected exit: %s",
503 exit_reason_str(vcpu->run->exit_reason));
504 }
505 }
506
main(int ac,char ** av)507 int main(int ac, char **av)
508 {
509 struct kvm_vcpu *vcpus[NR_VCPUS];
510 struct kvm_vm *vm;
511 pthread_attr_t attr;
512 pthread_t thread;
513 cpu_set_t cpuset;
514 unsigned int gpages;
515 long stolen_time;
516 long run_delay;
517 bool verbose;
518 int i;
519
520 verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
521
522 /* Set CPU affinity so we can force preemption of the VCPU */
523 CPU_ZERO(&cpuset);
524 CPU_SET(0, &cpuset);
525 pthread_attr_init(&attr);
526 pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
527 pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
528
529 /* Create a VM and an identity mapped memslot for the steal time structure */
530 vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
531 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
532 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
533 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
534
535 ksft_print_header();
536 TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
537 ksft_set_plan(NR_VCPUS);
538
539 check_steal_time_uapi();
540
541 /* Run test on each VCPU */
542 for (i = 0; i < NR_VCPUS; ++i) {
543 steal_time_init(vcpus[i], i);
544
545 vcpu_args_set(vcpus[i], 1, i);
546
547 /* First VCPU run initializes steal-time */
548 run_vcpu(vcpus[i]);
549
550 /* Second VCPU run, expect guest stolen time to be <= run_delay */
551 run_vcpu(vcpus[i]);
552 sync_global_from_guest(vm, guest_stolen_time[i]);
553 stolen_time = guest_stolen_time[i];
554 run_delay = get_run_delay();
555 TEST_ASSERT(stolen_time <= run_delay,
556 "Expected stolen time <= %ld, got %ld",
557 run_delay, stolen_time);
558
559 /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
560 run_delay = get_run_delay();
561 pthread_create(&thread, &attr, do_steal_time, NULL);
562 do
563 sched_yield();
564 while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
565 pthread_join(thread, NULL);
566 run_delay = get_run_delay() - run_delay;
567 TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
568 "Expected run_delay >= %ld, got %ld",
569 MIN_RUN_DELAY_NS, run_delay);
570
571 /* Run VCPU again to confirm stolen time is consistent with run_delay */
572 run_vcpu(vcpus[i]);
573 sync_global_from_guest(vm, guest_stolen_time[i]);
574 stolen_time = guest_stolen_time[i] - stolen_time;
575 TEST_ASSERT(stolen_time >= run_delay,
576 "Expected stolen time >= %ld, got %ld",
577 run_delay, stolen_time);
578
579 if (verbose) {
580 ksft_print_msg("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld%s\n",
581 i, guest_stolen_time[i], stolen_time,
582 stolen_time == run_delay ?
583 " (BONUS: guest test-stolen-time even exactly matches test-run_delay)" : "");
584 steal_time_dump(vm, i);
585 }
586 ksft_test_result_pass("vcpu%d\n", i);
587 }
588
589 /* Print results and exit() accordingly */
590 ksft_finished();
591 }
592