Lines Matching +full:100 +full:ka
2356 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time() local
2359 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2362 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2517 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching() local
2525 bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2535 if ((ka->use_master_clock && new_generation) || in kvm_track_tsc_matching()
2536 (ka->use_master_clock != use_master_clock)) in kvm_track_tsc_matching()
2539 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2541 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
3017 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy() local
3022 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
3030 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
3031 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
3033 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
3034 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
3035 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
3037 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
3041 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
3067 struct kvm_arch *ka = &kvm->arch; in kvm_end_pvclock_update() local
3071 write_seqcount_end(&ka->pvclock_sc); in kvm_end_pvclock_update()
3072 raw_spin_unlock_irq(&ka->tsc_write_lock); in kvm_end_pvclock_update()
3108 struct kvm_arch *ka = &kvm->arch; in __get_kvmclock() local
3115 if (ka->use_master_clock && in __get_kvmclock()
3128 hv_clock.tsc_timestamp = ka->master_cycle_now; in __get_kvmclock()
3129 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in __get_kvmclock()
3135 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; in __get_kvmclock()
3143 struct kvm_arch *ka = &kvm->arch; in get_kvmclock() local
3147 seq = read_seqcount_begin(&ka->pvclock_sc); in get_kvmclock()
3149 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in get_kvmclock()
3219 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update() local
3232 ka->xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE; in kvm_guest_time_update()
3243 seq = read_seqcount_begin(&ka->pvclock_sc); in kvm_guest_time_update()
3244 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
3246 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
3247 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
3249 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in kvm_guest_time_update()
3349 struct kvm_arch *ka = &kvm->arch; in kvm_get_wall_clock_epoch() local
3355 seq = read_seqcount_begin(&ka->pvclock_sc); in kvm_get_wall_clock_epoch()
3358 if (!ka->use_master_clock) in kvm_get_wall_clock_epoch()
3380 hv_clock.tsc_timestamp = ka->master_cycle_now; in kvm_get_wall_clock_epoch()
3381 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in kvm_get_wall_clock_epoch()
3383 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); in kvm_get_wall_clock_epoch()
3416 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
3422 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, in kvmclock_update_fn() local
3424 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn()
3447 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, in kvmclock_sync_fn() local
3449 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn()
4306 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, in kvm_get_msr_common()
6983 struct kvm_arch *ka = &kvm->arch; in kvm_vm_ioctl_set_clock() local
7018 if (ka->use_master_clock) in kvm_vm_ioctl_set_clock()
7019 now_raw_ns = ka->master_kernel_ns; in kvm_vm_ioctl_set_clock()
7022 ka->kvmclock_offset = data.clock - now_raw_ns; in kvm_vm_ioctl_set_clock()