Lines Matching +full:clock +full:- +full:skip

20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
49 #define STEALCLOCK_NOT_AVAILABLE (-1)
55 u64 clock; /* stolen time counter in units of vtsc */ member
57 /* only for little-endian */
155 early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
162 early_param("no-steal-acc", parse_no_stealacc);
170 ns -= vmware_cyc2ns.cyc2ns_offset; in vmware_sched_clock()
179 clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift, in vmware_cyc2ns_setup()
181 d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, in vmware_cyc2ns_setup()
182 d->cyc2ns_shift); in vmware_cyc2ns_setup()
184 pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset); in vmware_cyc2ns_setup()
217 * vmware_steal_clock() - read the per-cpu steal clock
218 * @cpu: the cpu number whose steal clock we want to read
220 * The function reads the steal clock if we are on a 64-bit system, otherwise
225 * The steal clock reading in ns.
230 u64 clock; in vmware_steal_clock() local
233 clock = READ_ONCE(steal->clock); in vmware_steal_clock()
238 initial_high = READ_ONCE(steal->clock_high); in vmware_steal_clock()
241 low = READ_ONCE(steal->clock_low); in vmware_steal_clock()
244 high = READ_ONCE(steal->clock_high); in vmware_steal_clock()
247 clock = ((u64)high << 32) | low; in vmware_steal_clock()
250 return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul, in vmware_steal_clock()
267 pr_info("vmware-stealtime: cpu %d, pa %llx\n", in vmware_register_steal_time()
355 /* We use reboot notifier only to disable steal clock */ in vmware_paravirt_ops_setup()
383 * long time. To skip these checks at runtime we set these capability bits,
424 /* Skip lapic calibration since we know the bus frequency. */ in vmware_platform_setup()
426 pr_info("Host bus clock speed read from hypervisor : %u Hz\n", in vmware_platform_setup()
486 /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
553 ghcb_set_rip(ghcb, regs->ip); in vmware_sev_es_hcall_prepare()
554 ghcb_set_rbx(ghcb, regs->bx); in vmware_sev_es_hcall_prepare()
555 ghcb_set_rcx(ghcb, regs->cx); in vmware_sev_es_hcall_prepare()
556 ghcb_set_rdx(ghcb, regs->dx); in vmware_sev_es_hcall_prepare()
557 ghcb_set_rsi(ghcb, regs->si); in vmware_sev_es_hcall_prepare()
558 ghcb_set_rdi(ghcb, regs->di); in vmware_sev_es_hcall_prepare()
559 ghcb_set_rbp(ghcb, regs->bp); in vmware_sev_es_hcall_prepare()
572 regs->bx = ghcb_get_rbx(ghcb); in vmware_sev_es_hcall_finish()
573 regs->cx = ghcb_get_rcx(ghcb); in vmware_sev_es_hcall_finish()
574 regs->dx = ghcb_get_rdx(ghcb); in vmware_sev_es_hcall_finish()
575 regs->si = ghcb_get_rsi(ghcb); in vmware_sev_es_hcall_finish()
576 regs->di = ghcb_get_rdi(ghcb); in vmware_sev_es_hcall_finish()
577 regs->bp = ghcb_get_rbp(ghcb); in vmware_sev_es_hcall_finish()