Lines Matching +full:p +full:- +full:states
1 // SPDX-License-Identifier: GPL-2.0-or-later
36 early_param("no-kvmclock", parse_no_kvmclock);
43 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
71 return -ENODEV; in kvm_set_wallclock()
91 return pvclock_clocksource_read_nowd(this_cpu_pvti()) - kvm_sched_clock_offset; in kvm_sched_clock_read()
101 pr_info("kvm-clock: using sched offset of %llu cycles", in kvm_sched_clock_init()
105 sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time)); in kvm_sched_clock_init()
110 * will calibrate under heavy load - thus, getting a lower lpj -
143 if ((src->pvti.flags & PVCLOCK_GUEST_STOPPED) != 0) { in kvm_check_and_clear_guest_paused()
144 src->pvti.flags &= ~PVCLOCK_GUEST_STOPPED; in kvm_check_and_clear_guest_paused()
158 .name = "kvm-clock",
175 pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; in kvm_register_clock()
177 pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt); in kvm_register_clock()
206 struct page *p; in kvmclock_init_mem() local
212 ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; in kvmclock_init_mem()
215 p = alloc_pages(GFP_KERNEL, order); in kvmclock_init_mem()
216 if (!p) { in kvmclock_init_mem()
221 hvclock_mem = page_address(p); in kvmclock_init_mem()
231 __free_pages(p, order); in kvmclock_init_mem()
266 struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu); in kvmclock_setup_percpu() local
273 if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0))) in kvmclock_setup_percpu()
278 p = &hv_clock_boot[cpu]; in kvmclock_setup_percpu()
280 p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; in kvmclock_setup_percpu()
282 return -ENOMEM; in kvmclock_setup_percpu()
284 per_cpu(hv_clock_per_cpu, cpu) = p; in kvmclock_setup_percpu()
285 return p ? 0 : -ENOMEM; in kvmclock_setup_percpu()
310 pr_info("kvm-clock: Using msrs %x and %x", in kvmclock_init()
336 * with P/T states and does not stop in deep C-states. in kvmclock_init()