Lines Matching +full:protected +full:- +full:clocks
1 // SPDX-License-Identifier: GPL-2.0
40 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_shared_info_init()
46 int idx = srcu_read_lock(&kvm->srcu); in kvm_xen_shared_info_init()
48 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
50 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
56 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
65 /* Paranoia checks on the 32-bit struct layout */ in kvm_xen_shared_info_init()
71 /* Paranoia checks on the 64-bit struct layout */ in kvm_xen_shared_info_init()
75 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_shared_info_init()
76 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
78 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init()
79 wc = &shinfo->wc; in kvm_xen_shared_info_init()
83 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
85 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init()
86 wc = &shinfo->wc; in kvm_xen_shared_info_init()
90 wc_version = wc->version = (wc->version + 1) | 1; in kvm_xen_shared_info_init()
93 wc->nsec = do_div(wall_nsec, NSEC_PER_SEC); in kvm_xen_shared_info_init()
94 wc->sec = (u32)wall_nsec; in kvm_xen_shared_info_init()
98 wc->version = wc_version + 1; in kvm_xen_shared_info_init()
99 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
104 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_shared_info_init()
110 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { in kvm_xen_inject_timer_irqs()
113 e.vcpu_id = vcpu->vcpu_id; in kvm_xen_inject_timer_irqs()
114 e.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_inject_timer_irqs()
115 e.port = vcpu->arch.xen.timer_virq; in kvm_xen_inject_timer_irqs()
118 kvm_xen_set_evtchn(&e, vcpu->kvm); in kvm_xen_inject_timer_irqs()
120 vcpu->arch.xen.timer_expires = 0; in kvm_xen_inject_timer_irqs()
121 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_inject_timer_irqs()
132 if (atomic_read(&vcpu->arch.xen.timer_pending)) in xen_timer_callback()
135 e.vcpu_id = vcpu->vcpu_id; in xen_timer_callback()
136 e.vcpu_idx = vcpu->vcpu_idx; in xen_timer_callback()
137 e.port = vcpu->arch.xen.timer_virq; in xen_timer_callback()
140 rc = kvm_xen_set_evtchn_fast(&e, vcpu->kvm); in xen_timer_callback()
141 if (rc != -EWOULDBLOCK) { in xen_timer_callback()
142 vcpu->arch.xen.timer_expires = 0; in xen_timer_callback()
146 atomic_inc(&vcpu->arch.xen.timer_pending); in xen_timer_callback()
161 read_lock_irqsave(&gpc->lock, flags); in xen_get_guest_pvclock()
163 read_unlock_irqrestore(&gpc->lock, flags); in xen_get_guest_pvclock()
169 read_lock_irqsave(&gpc->lock, flags); in xen_get_guest_pvclock()
172 memcpy(hv_clock, gpc->khva + offset, sizeof(*hv_clock)); in xen_get_guest_pvclock()
173 read_unlock_irqrestore(&gpc->lock, flags); in xen_get_guest_pvclock()
179 if (hv_clock->tsc_shift != vcpu->arch.pvclock_tsc_shift || in xen_get_guest_pvclock()
180 hv_clock->tsc_to_system_mul != vcpu->arch.pvclock_tsc_mul) in xen_get_guest_pvclock()
181 return -EINVAL; in xen_get_guest_pvclock()
189 struct kvm_vcpu_xen *xen = &vcpu->arch.xen; in kvm_xen_start_timer()
192 int r = -EOPNOTSUPP; in kvm_xen_start_timer()
216 !vcpu->kvm->arch.use_master_clock) in kvm_xen_start_timer()
220 * If both Xen PV clocks are active, arbitrarily try to use the in kvm_xen_start_timer()
221 * compat clock first, but also try to use the non-compat clock in kvm_xen_start_timer()
222 * if the compat clock is unusable. The two PV clocks hold the in kvm_xen_start_timer()
226 if (xen->vcpu_info_cache.active) in kvm_xen_start_timer()
227 r = xen_get_guest_pvclock(vcpu, &hv_clock, &xen->vcpu_info_cache, in kvm_xen_start_timer()
229 if (r && xen->vcpu_time_info_cache.active) in kvm_xen_start_timer()
230 r = xen_get_guest_pvclock(vcpu, &hv_clock, &xen->vcpu_time_info_cache, 0); in kvm_xen_start_timer()
272 guest_now = get_kvmclock_ns(vcpu->kvm); in kvm_xen_start_timer()
276 delta = guest_abs - guest_now; in kvm_xen_start_timer()
300 if (vcpu->arch.xen.timer_expires) in kvm_xen_start_timer()
301 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
303 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_start_timer()
304 vcpu->arch.xen.timer_expires = guest_abs; in kvm_xen_start_timer()
307 xen_timer_callback(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
309 hrtimer_start(&vcpu->arch.xen.timer, in kvm_xen_start_timer()
316 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_stop_timer()
317 vcpu->arch.xen.timer_expires = 0; in kvm_xen_stop_timer()
318 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_stop_timer()
323 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate_guest()
324 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache; in kvm_xen_update_runstate_guest()
325 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; in kvm_xen_update_runstate_guest()
336 * The only difference between 32-bit and 64-bit versions of the in kvm_xen_update_runstate_guest()
337 * runstate struct is the alignment of uint64_t in 32-bit, which in kvm_xen_update_runstate_guest()
338 * means that the 64-bit version has an additional 4 bytes of in kvm_xen_update_runstate_guest()
348 * The 64-bit structure has 4 bytes of padding before 'state_entry_time' in kvm_xen_update_runstate_guest()
359 * and is the same size (int) as vx->current_runstate. in kvm_xen_update_runstate_guest()
364 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
366 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
371 * is little-endian means that it's in the last *byte* of the word. in kvm_xen_update_runstate_guest()
381 * The time array is four 64-bit quantities in both versions, matching in kvm_xen_update_runstate_guest()
382 * the vx->runstate_times and immediately following state_entry_time. in kvm_xen_update_runstate_guest()
385 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t)); in kvm_xen_update_runstate_guest()
387 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t)); in kvm_xen_update_runstate_guest()
391 sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
393 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_update_runstate_guest()
406 * alignment (and the 32-bit ABI doesn't align the 64-bit integers in kvm_xen_update_runstate_guest()
407 * anyway, even if the overall struct had been 64-bit aligned). in kvm_xen_update_runstate_guest()
409 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { in kvm_xen_update_runstate_guest()
410 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); in kvm_xen_update_runstate_guest()
411 user_len2 = user_len - user_len1; in kvm_xen_update_runstate_guest()
425 if (!read_trylock(&gpc1->lock)) { in kvm_xen_update_runstate_guest()
430 read_lock_irqsave(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
433 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
442 read_lock_irqsave(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
455 rs_state = gpc1->khva; in kvm_xen_update_runstate_guest()
456 rs_times = gpc1->khva + times_ofs; in kvm_xen_update_runstate_guest()
457 if (v->kvm->arch.xen.runstate_update_flag) in kvm_xen_update_runstate_guest()
458 update_bit = ((void *)(&rs_times[1])) - 1; in kvm_xen_update_runstate_guest()
467 lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_); in kvm_xen_update_runstate_guest()
469 if (!read_trylock(&gpc2->lock)) { in kvm_xen_update_runstate_guest()
470 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
474 read_lock(&gpc2->lock); in kvm_xen_update_runstate_guest()
478 read_unlock(&gpc2->lock); in kvm_xen_update_runstate_guest()
479 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
487 * area was configured in 32-bit mode and only extends in kvm_xen_update_runstate_guest()
489 * 64-bit mode, the second GPC won't have been set up. in kvm_xen_update_runstate_guest()
491 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, in kvm_xen_update_runstate_guest()
513 * is the 32-bit field that the compiler thinks is padding. in kvm_xen_update_runstate_guest()
515 rs_state = ((void *)rs_times) - times_ofs; in kvm_xen_update_runstate_guest()
521 if (v->kvm->arch.xen.runstate_update_flag) { in kvm_xen_update_runstate_guest()
523 update_bit = gpc1->khva + times_ofs + in kvm_xen_update_runstate_guest()
524 sizeof(uint64_t) - 1; in kvm_xen_update_runstate_guest()
526 update_bit = gpc2->khva + times_ofs + in kvm_xen_update_runstate_guest()
527 sizeof(uint64_t) - 1 - user_len1; in kvm_xen_update_runstate_guest()
532 * Don't leak kernel memory through the padding in the 64-bit in kvm_xen_update_runstate_guest()
542 * that (and write-barrier) before writing to the rest of the in kvm_xen_update_runstate_guest()
545 * different cache line to the rest of the 64-bit word, due to in kvm_xen_update_runstate_guest()
548 entry_time = vx->runstate_entry_time; in kvm_xen_update_runstate_guest()
551 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56; in kvm_xen_update_runstate_guest()
560 *rs_state = vx->current_runstate; in kvm_xen_update_runstate_guest()
562 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
566 memcpy(gpc1->khva, rs_state, user_len1); in kvm_xen_update_runstate_guest()
567 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2); in kvm_xen_update_runstate_guest()
580 read_unlock(&gpc2->lock); in kvm_xen_update_runstate_guest()
584 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
589 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate()
590 u64 now = get_kvmclock_ns(v->kvm); in kvm_xen_update_runstate()
591 u64 delta_ns = now - vx->runstate_entry_time; in kvm_xen_update_runstate()
592 u64 run_delay = current->sched_info.run_delay; in kvm_xen_update_runstate()
594 if (unlikely(!vx->runstate_entry_time)) in kvm_xen_update_runstate()
595 vx->current_runstate = RUNSTATE_offline; in kvm_xen_update_runstate()
601 if (vx->current_runstate == RUNSTATE_running) { in kvm_xen_update_runstate()
602 u64 steal_ns = run_delay - vx->last_steal; in kvm_xen_update_runstate()
604 delta_ns -= steal_ns; in kvm_xen_update_runstate()
606 vx->runstate_times[RUNSTATE_runnable] += steal_ns; in kvm_xen_update_runstate()
608 vx->last_steal = run_delay; in kvm_xen_update_runstate()
610 vx->runstate_times[vx->current_runstate] += delta_ns; in kvm_xen_update_runstate()
611 vx->current_runstate = state; in kvm_xen_update_runstate()
612 vx->runstate_entry_time = now; in kvm_xen_update_runstate()
614 if (vx->runstate_cache.active) in kvm_xen_update_runstate()
622 irq.dest_id = v->vcpu_id; in kvm_xen_inject_vcpu_vector()
623 irq.vector = v->arch.xen.upcall_vector; in kvm_xen_inject_vcpu_vector()
629 kvm_irq_delivery_to_apic(v->kvm, NULL, &irq, NULL); in kvm_xen_inject_vcpu_vector()
634 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
641 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); in kvm_xen_inject_pending_events()
642 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events()
649 * Yes, this is an open-coded loop. But that's just what put_user() in kvm_xen_inject_pending_events()
653 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
655 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
660 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
663 /* Now gpc->khva is a valid kernel address for the vcpu_info */ in kvm_xen_inject_pending_events()
664 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_inject_pending_events()
665 struct vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
671 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
672 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
674 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
677 struct compat_vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
683 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
684 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
686 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
690 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
692 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_inject_pending_events()
693 if (v->arch.xen.upcall_vector) in kvm_xen_inject_pending_events()
699 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt()
716 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
718 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
738 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
741 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; in __kvm_xen_has_interrupt()
742 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
748 int r = -ENOENT; in kvm_xen_hvm_set_attr()
751 switch (data->type) { in kvm_xen_hvm_set_attr()
753 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { in kvm_xen_hvm_set_attr()
754 r = -EINVAL; in kvm_xen_hvm_set_attr()
756 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
757 kvm->arch.xen.long_mode = !!data->u.long_mode; in kvm_xen_hvm_set_attr()
760 * Re-initialize shared_info to put the wallclock in the in kvm_xen_hvm_set_attr()
765 r = kvm->arch.xen.shinfo_cache.active ? in kvm_xen_hvm_set_attr()
767 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
775 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
777 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_hvm_set_attr()
779 if (data->type == KVM_XEN_ATTR_TYPE_SHARED_INFO) { in kvm_xen_hvm_set_attr()
780 gfn_t gfn = data->u.shared_info.gfn; in kvm_xen_hvm_set_attr()
783 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_hvm_set_attr()
786 r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache, in kvm_xen_hvm_set_attr()
790 void __user * hva = u64_to_user_ptr(data->u.shared_info.hva); in kvm_xen_hvm_set_attr()
793 r = -EINVAL; in kvm_xen_hvm_set_attr()
795 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_hvm_set_attr()
798 r = kvm_gpc_activate_hva(&kvm->arch.xen.shinfo_cache, in kvm_xen_hvm_set_attr()
803 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_hvm_set_attr()
805 if (!r && kvm->arch.xen.shinfo_cache.active) in kvm_xen_hvm_set_attr()
808 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
812 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_hvm_set_attr()
813 r = -EINVAL; in kvm_xen_hvm_set_attr()
815 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
816 kvm->arch.xen.upcall_vector = data->u.vector; in kvm_xen_hvm_set_attr()
817 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
827 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
828 kvm->arch.xen.xen_version = data->u.xen_version; in kvm_xen_hvm_set_attr()
829 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
835 r = -EOPNOTSUPP; in kvm_xen_hvm_set_attr()
838 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
839 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; in kvm_xen_hvm_set_attr()
840 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
853 int r = -ENOENT; in kvm_xen_hvm_get_attr()
855 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
857 switch (data->type) { in kvm_xen_hvm_get_attr()
859 data->u.long_mode = kvm->arch.xen.long_mode; in kvm_xen_hvm_get_attr()
864 if (kvm_gpc_is_gpa_active(&kvm->arch.xen.shinfo_cache)) in kvm_xen_hvm_get_attr()
865 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr()
867 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN; in kvm_xen_hvm_get_attr()
872 if (kvm_gpc_is_hva_active(&kvm->arch.xen.shinfo_cache)) in kvm_xen_hvm_get_attr()
873 data->u.shared_info.hva = kvm->arch.xen.shinfo_cache.uhva; in kvm_xen_hvm_get_attr()
875 data->u.shared_info.hva = 0; in kvm_xen_hvm_get_attr()
880 data->u.vector = kvm->arch.xen.upcall_vector; in kvm_xen_hvm_get_attr()
885 data->u.xen_version = kvm->arch.xen.xen_version; in kvm_xen_hvm_get_attr()
891 r = -EOPNOTSUPP; in kvm_xen_hvm_get_attr()
894 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; in kvm_xen_hvm_get_attr()
902 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
908 int idx, r = -ENOENT; in kvm_xen_vcpu_set_attr()
910 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
911 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_vcpu_set_attr()
913 switch (data->type) { in kvm_xen_vcpu_set_attr()
922 if (data->type == KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO) { in kvm_xen_vcpu_set_attr()
923 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
924 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
929 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, in kvm_xen_vcpu_set_attr()
930 data->u.gpa, sizeof(struct vcpu_info)); in kvm_xen_vcpu_set_attr()
932 if (data->u.hva == 0) { in kvm_xen_vcpu_set_attr()
933 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
938 r = kvm_gpc_activate_hva(&vcpu->arch.xen.vcpu_info_cache, in kvm_xen_vcpu_set_attr()
939 data->u.hva, sizeof(struct vcpu_info)); in kvm_xen_vcpu_set_attr()
948 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
949 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_vcpu_set_attr()
954 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, in kvm_xen_vcpu_set_attr()
955 data->u.gpa, in kvm_xen_vcpu_set_attr()
965 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
968 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
971 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_vcpu_set_attr()
972 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
977 * If the guest switches to 64-bit mode after setting the runstate in kvm_xen_vcpu_set_attr()
981 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) in kvm_xen_vcpu_set_attr()
987 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
988 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, in kvm_xen_vcpu_set_attr()
989 data->u.gpa, sz1); in kvm_xen_vcpu_set_attr()
995 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
997 sz2 = sz - sz1; in kvm_xen_vcpu_set_attr()
998 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
999 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, in kvm_xen_vcpu_set_attr()
1000 data->u.gpa + sz1, sz2); in kvm_xen_vcpu_set_attr()
1010 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
1013 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
1014 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1018 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
1024 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
1027 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
1028 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1031 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
1032 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
1033 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
1034 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
1035 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
1036 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1039 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
1040 data->u.runstate.state_entry_time) { in kvm_xen_vcpu_set_attr()
1041 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1045 vcpu->arch.xen.current_runstate = data->u.runstate.state; in kvm_xen_vcpu_set_attr()
1046 vcpu->arch.xen.runstate_entry_time = in kvm_xen_vcpu_set_attr()
1047 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
1048 vcpu->arch.xen.runstate_times[RUNSTATE_running] = in kvm_xen_vcpu_set_attr()
1049 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
1050 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = in kvm_xen_vcpu_set_attr()
1051 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
1052 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = in kvm_xen_vcpu_set_attr()
1053 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
1054 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = in kvm_xen_vcpu_set_attr()
1055 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
1056 vcpu->arch.xen.last_steal = current->sched_info.run_delay; in kvm_xen_vcpu_set_attr()
1062 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
1065 if (data->u.runstate.state > RUNSTATE_offline && in kvm_xen_vcpu_set_attr()
1066 data->u.runstate.state != (u64)-1) { in kvm_xen_vcpu_set_attr()
1067 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1071 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
1072 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
1073 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
1074 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
1075 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
1076 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1080 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
1081 (vcpu->arch.xen.runstate_entry_time + in kvm_xen_vcpu_set_attr()
1082 data->u.runstate.state_entry_time)) { in kvm_xen_vcpu_set_attr()
1083 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1087 vcpu->arch.xen.runstate_entry_time += in kvm_xen_vcpu_set_attr()
1088 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
1089 vcpu->arch.xen.runstate_times[RUNSTATE_running] += in kvm_xen_vcpu_set_attr()
1090 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
1091 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += in kvm_xen_vcpu_set_attr()
1092 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
1093 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += in kvm_xen_vcpu_set_attr()
1094 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
1095 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += in kvm_xen_vcpu_set_attr()
1096 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
1098 if (data->u.runstate.state <= RUNSTATE_offline) in kvm_xen_vcpu_set_attr()
1099 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
1100 else if (vcpu->arch.xen.runstate_cache.active) in kvm_xen_vcpu_set_attr()
1106 if (data->u.vcpu_id >= KVM_MAX_VCPUS) in kvm_xen_vcpu_set_attr()
1107 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1109 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; in kvm_xen_vcpu_set_attr()
1115 if (data->u.timer.port && in kvm_xen_vcpu_set_attr()
1116 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { in kvm_xen_vcpu_set_attr()
1117 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1123 vcpu->arch.xen.timer_virq = data->u.timer.port; in kvm_xen_vcpu_set_attr()
1126 if (data->u.timer.port && data->u.timer.expires_ns) in kvm_xen_vcpu_set_attr()
1127 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, false); in kvm_xen_vcpu_set_attr()
1133 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_vcpu_set_attr()
1134 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1136 vcpu->arch.xen.upcall_vector = data->u.vector; in kvm_xen_vcpu_set_attr()
1145 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_vcpu_set_attr()
1146 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
1152 int r = -ENOENT; in kvm_xen_vcpu_get_attr()
1154 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
1156 switch (data->type) { in kvm_xen_vcpu_get_attr()
1158 if (kvm_gpc_is_gpa_active(&vcpu->arch.xen.vcpu_info_cache)) in kvm_xen_vcpu_get_attr()
1159 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; in kvm_xen_vcpu_get_attr()
1161 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
1166 if (kvm_gpc_is_hva_active(&vcpu->arch.xen.vcpu_info_cache)) in kvm_xen_vcpu_get_attr()
1167 data->u.hva = vcpu->arch.xen.vcpu_info_cache.uhva; in kvm_xen_vcpu_get_attr()
1169 data->u.hva = 0; in kvm_xen_vcpu_get_attr()
1174 if (vcpu->arch.xen.vcpu_time_info_cache.active) in kvm_xen_vcpu_get_attr()
1175 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; in kvm_xen_vcpu_get_attr()
1177 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
1183 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1186 if (vcpu->arch.xen.runstate_cache.active) { in kvm_xen_vcpu_get_attr()
1187 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; in kvm_xen_vcpu_get_attr()
1194 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1197 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
1203 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1206 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
1207 data->u.runstate.state_entry_time = in kvm_xen_vcpu_get_attr()
1208 vcpu->arch.xen.runstate_entry_time; in kvm_xen_vcpu_get_attr()
1209 data->u.runstate.time_running = in kvm_xen_vcpu_get_attr()
1210 vcpu->arch.xen.runstate_times[RUNSTATE_running]; in kvm_xen_vcpu_get_attr()
1211 data->u.runstate.time_runnable = in kvm_xen_vcpu_get_attr()
1212 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; in kvm_xen_vcpu_get_attr()
1213 data->u.runstate.time_blocked = in kvm_xen_vcpu_get_attr()
1214 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; in kvm_xen_vcpu_get_attr()
1215 data->u.runstate.time_offline = in kvm_xen_vcpu_get_attr()
1216 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; in kvm_xen_vcpu_get_attr()
1221 r = -EINVAL; in kvm_xen_vcpu_get_attr()
1225 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; in kvm_xen_vcpu_get_attr()
1239 if (vcpu->arch.xen.timer_expires) { in kvm_xen_vcpu_get_attr()
1240 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_vcpu_get_attr()
1244 data->u.timer.port = vcpu->arch.xen.timer_virq; in kvm_xen_vcpu_get_attr()
1245 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; in kvm_xen_vcpu_get_attr()
1246 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; in kvm_xen_vcpu_get_attr()
1256 if (vcpu->arch.xen.timer_expires) in kvm_xen_vcpu_get_attr()
1257 hrtimer_start_expires(&vcpu->arch.xen.timer, in kvm_xen_vcpu_get_attr()
1264 data->u.vector = vcpu->arch.xen.upcall_vector; in kvm_xen_vcpu_get_attr()
1272 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
1278 struct kvm *kvm = vcpu->kvm; in kvm_xen_write_hypercall_page()
1284 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_write_hypercall_page()
1285 if (kvm->arch.xen.long_mode != lm) { in kvm_xen_write_hypercall_page()
1286 kvm->arch.xen.long_mode = lm; in kvm_xen_write_hypercall_page()
1289 * Re-initialize shared_info to put the wallclock in the in kvm_xen_write_hypercall_page()
1292 if (kvm->arch.xen.shinfo_cache.active && in kvm_xen_write_hypercall_page()
1296 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_write_hypercall_page()
1324 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); in kvm_xen_write_hypercall_page()
1335 * Note, truncation is a non-issue as 'lm' is guaranteed to be in kvm_xen_write_hypercall_page()
1336 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. in kvm_xen_write_hypercall_page()
1338 hva_t blob_addr = lm ? kvm->arch.xen.hvm_config.blob_addr_64 in kvm_xen_write_hypercall_page()
1339 : kvm->arch.xen.hvm_config.blob_addr_32; in kvm_xen_write_hypercall_page()
1340 u8 blob_size = lm ? kvm->arch.xen.hvm_config.blob_size_64 in kvm_xen_write_hypercall_page()
1341 : kvm->arch.xen.hvm_config.blob_size_32; in kvm_xen_write_hypercall_page()
1370 if (xhc->flags & ~permitted_flags) in kvm_xen_hvm_config()
1371 return -EINVAL; in kvm_xen_hvm_config()
1377 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && in kvm_xen_hvm_config()
1378 (xhc->blob_addr_32 || xhc->blob_addr_64 || in kvm_xen_hvm_config()
1379 xhc->blob_size_32 || xhc->blob_size_64)) in kvm_xen_hvm_config()
1380 return -EINVAL; in kvm_xen_hvm_config()
1384 * synthetic, virtualization-defined MSRs, e.g. to prevent confusing in kvm_xen_hvm_config()
1387 if (xhc->msr && in kvm_xen_hvm_config()
1388 (xhc->msr < KVM_XEN_MSR_MIN_INDEX || xhc->msr > KVM_XEN_MSR_MAX_INDEX)) in kvm_xen_hvm_config()
1389 return -EINVAL; in kvm_xen_hvm_config()
1391 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1393 if (xhc->msr && !kvm->arch.xen.hvm_config.msr) in kvm_xen_hvm_config()
1395 else if (!xhc->msr && kvm->arch.xen.hvm_config.msr) in kvm_xen_hvm_config()
1398 old_flags = kvm->arch.xen.hvm_config.flags; in kvm_xen_hvm_config()
1399 memcpy(&kvm->arch.xen.hvm_config, xhc, sizeof(*xhc)); in kvm_xen_hvm_config()
1401 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1403 if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE) in kvm_xen_hvm_config()
1417 struct kvm_run *run = vcpu->run; in kvm_xen_hypercall_complete_userspace()
1419 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) in kvm_xen_hypercall_complete_userspace()
1422 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); in kvm_xen_hypercall_complete_userspace()
1427 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) in max_evtchn_port()
1436 struct kvm *kvm = vcpu->kvm; in wait_pending_event()
1437 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event()
1443 idx = srcu_read_lock(&kvm->srcu); in wait_pending_event()
1444 read_lock_irqsave(&gpc->lock, flags); in wait_pending_event()
1449 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in wait_pending_event()
1450 struct shared_info *shinfo = gpc->khva; in wait_pending_event()
1451 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
1453 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event()
1454 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
1465 read_unlock_irqrestore(&gpc->lock, flags); in wait_pending_event()
1466 srcu_read_unlock(&kvm->srcu, idx); in wait_pending_event()
1480 !(vcpu->kvm->arch.xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) in kvm_xen_schedop_poll()
1490 *r = -EFAULT; in kvm_xen_schedop_poll()
1495 * This is a 32-bit pointer to an array of evtchn_port_t which in kvm_xen_schedop_poll()
1505 *r = -EFAULT; in kvm_xen_schedop_poll()
1513 *r = -EINVAL; in kvm_xen_schedop_poll()
1520 *r = -ENOMEM; in kvm_xen_schedop_poll()
1528 *r = -EFAULT; in kvm_xen_schedop_poll()
1533 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { in kvm_xen_schedop_poll()
1534 *r = -EINVAL; in kvm_xen_schedop_poll()
1540 vcpu->arch.xen.poll_evtchn = port; in kvm_xen_schedop_poll()
1542 vcpu->arch.xen.poll_evtchn = -1; in kvm_xen_schedop_poll()
1544 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1550 mod_timer(&vcpu->arch.xen.poll_timer, in kvm_xen_schedop_poll()
1556 timer_delete(&vcpu->arch.xen.poll_timer); in kvm_xen_schedop_poll()
1561 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_schedop_poll()
1565 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1616 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1617 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1622 * The only difference for 32-bit compat is the 4 bytes of in kvm_xen_hcall_vcpu_op()
1625 * the padding and return -EFAULT if we can't. Otherwise we in kvm_xen_hcall_vcpu_op()
1626 * might as well just have copied the 12-byte 32-bit struct. in kvm_xen_hcall_vcpu_op()
1639 *r = -EFAULT; in kvm_xen_hcall_vcpu_op()
1648 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1649 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1678 u64 input, params[6], r = -ENOSYS; in kvm_xen_hypercall()
1684 /* Hyper-V hypercalls get bit 31 set in EAX */ in kvm_xen_hypercall()
1721 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { in kvm_xen_hypercall()
1722 r = vcpu->kvm->arch.xen.xen_version; in kvm_xen_hypercall()
1740 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ in kvm_xen_hypercall()
1754 vcpu->run->exit_reason = KVM_EXIT_XEN; in kvm_xen_hypercall()
1755 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; in kvm_xen_hypercall()
1756 vcpu->run->xen.u.hcall.longmode = longmode; in kvm_xen_hypercall()
1757 vcpu->run->xen.u.hcall.cpl = cpl; in kvm_xen_hypercall()
1758 vcpu->run->xen.u.hcall.input = input; in kvm_xen_hypercall()
1759 vcpu->run->xen.u.hcall.params[0] = params[0]; in kvm_xen_hypercall()
1760 vcpu->run->xen.u.hcall.params[1] = params[1]; in kvm_xen_hypercall()
1761 vcpu->run->xen.u.hcall.params[2] = params[2]; in kvm_xen_hypercall()
1762 vcpu->run->xen.u.hcall.params[3] = params[3]; in kvm_xen_hypercall()
1763 vcpu->run->xen.u.hcall.params[4] = params[4]; in kvm_xen_hypercall()
1764 vcpu->run->xen.u.hcall.params[5] = params[5]; in kvm_xen_hypercall()
1765 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); in kvm_xen_hypercall()
1766 vcpu->arch.complete_userspace_io = in kvm_xen_hypercall()
1774 int poll_evtchn = vcpu->arch.xen.poll_evtchn; in kvm_xen_check_poller()
1776 if ((poll_evtchn == port || poll_evtchn == -1) && in kvm_xen_check_poller()
1777 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { in kvm_xen_check_poller()
1791 * only check on its return value is a comparison with -EWOULDBLOCK'.
1795 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast()
1803 vcpu_idx = READ_ONCE(xe->vcpu_idx); in kvm_xen_set_evtchn_fast()
1807 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); in kvm_xen_set_evtchn_fast()
1809 return -EINVAL; in kvm_xen_set_evtchn_fast()
1810 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); in kvm_xen_set_evtchn_fast()
1813 if (xe->port >= max_evtchn_port(kvm)) in kvm_xen_set_evtchn_fast()
1814 return -EINVAL; in kvm_xen_set_evtchn_fast()
1816 rc = -EWOULDBLOCK; in kvm_xen_set_evtchn_fast()
1818 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn_fast()
1820 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1824 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1825 struct shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1826 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1827 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1828 port_word_bit = xe->port / 64; in kvm_xen_set_evtchn_fast()
1830 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1831 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1832 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1833 port_word_bit = xe->port / 32; in kvm_xen_set_evtchn_fast()
1838 * we try to set the corresponding bit in the in-kernel shadow of in kvm_xen_set_evtchn_fast()
1843 if (test_and_set_bit(xe->port, pending_bits)) { in kvm_xen_set_evtchn_fast()
1845 } else if (test_bit(xe->port, mask_bits)) { in kvm_xen_set_evtchn_fast()
1846 rc = -ENOTCONN; /* Masked */ in kvm_xen_set_evtchn_fast()
1847 kvm_xen_check_poller(vcpu, xe->port); in kvm_xen_set_evtchn_fast()
1851 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1852 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1854 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1857 * Could not access the vcpu_info. Set the bit in-kernel in kvm_xen_set_evtchn_fast()
1860 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) in kvm_xen_set_evtchn_fast()
1865 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1866 struct vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1867 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1868 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1872 struct compat_vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1874 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1875 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1880 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_set_evtchn_fast()
1881 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { in kvm_xen_set_evtchn_fast()
1888 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1889 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn_fast()
1905 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1908 if (current->mm != kvm->mm) { in kvm_xen_set_evtchn()
1913 if (WARN_ON_ONCE(current->mm)) in kvm_xen_set_evtchn()
1914 return -EINVAL; in kvm_xen_set_evtchn()
1916 kthread_use_mm(kvm->mm); in kvm_xen_set_evtchn()
1935 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn()
1939 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1942 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn()
1944 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn()
1948 kthread_unuse_mm(kvm->mm); in kvm_xen_set_evtchn()
1958 return -EINVAL; in evtchn_set_fn()
1960 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); in evtchn_set_fn()
1978 * configure MSIs which target non-existent APICs. in kvm_xen_setup_evtchn()
1989 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_setup_evtchn()
1990 return -EINVAL; in kvm_xen_setup_evtchn()
2000 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); in kvm_xen_setup_evtchn()
2002 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_setup_evtchn()
2004 e->xen_evtchn.vcpu_idx = -1; in kvm_xen_setup_evtchn()
2006 e->xen_evtchn.port = ue->u.xen_evtchn.port; in kvm_xen_setup_evtchn()
2007 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; in kvm_xen_setup_evtchn()
2008 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; in kvm_xen_setup_evtchn()
2009 e->set = evtchn_set_fn; in kvm_xen_setup_evtchn()
2022 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) in kvm_xen_hvm_evtchn_send()
2023 return -EINVAL; in kvm_xen_hvm_evtchn_send()
2026 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_hvm_evtchn_send()
2027 return -EINVAL; in kvm_xen_hvm_evtchn_send()
2029 e.port = uxe->port; in kvm_xen_hvm_evtchn_send()
2030 e.vcpu_id = uxe->vcpu; in kvm_xen_hvm_evtchn_send()
2031 e.vcpu_idx = -1; in kvm_xen_hvm_evtchn_send()
2032 e.priority = uxe->priority; in kvm_xen_hvm_evtchn_send()
2038 * We don't care if it was masked (-ENOTCONN) either. in kvm_xen_hvm_evtchn_send()
2040 if (ret > 0 || ret == -ENOTCONN) in kvm_xen_hvm_evtchn_send()
2067 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_update()
2072 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
2073 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_update()
2075 ret = -ENOENT; in kvm_xen_eventfd_update()
2080 ret = -EINVAL; in kvm_xen_eventfd_update()
2081 if (evtchnfd->type != data->u.evtchn.type) in kvm_xen_eventfd_update()
2088 if (!evtchnfd->deliver.port.port || in kvm_xen_eventfd_update()
2089 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) in kvm_xen_eventfd_update()
2093 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_update()
2096 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_update()
2097 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { in kvm_xen_eventfd_update()
2098 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_update()
2099 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_update()
2103 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
2114 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
2117 int ret = -EINVAL; in kvm_xen_eventfd_assign()
2121 return -ENOMEM; in kvm_xen_eventfd_assign()
2123 switch(data->u.evtchn.type) { in kvm_xen_eventfd_assign()
2126 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) in kvm_xen_eventfd_assign()
2127 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
2131 if (data->u.evtchn.deliver.port.port) { in kvm_xen_eventfd_assign()
2132 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) in kvm_xen_eventfd_assign()
2133 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
2135 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); in kvm_xen_eventfd_assign()
2148 goto out; /* -EINVAL */ in kvm_xen_eventfd_assign()
2151 evtchnfd->send_port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
2152 evtchnfd->type = data->u.evtchn.type; in kvm_xen_eventfd_assign()
2154 evtchnfd->deliver.eventfd.ctx = eventfd; in kvm_xen_eventfd_assign()
2157 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_assign()
2158 goto out; /* -EINVAL; */ in kvm_xen_eventfd_assign()
2160 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; in kvm_xen_eventfd_assign()
2161 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_assign()
2162 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_assign()
2163 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_assign()
2166 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
2167 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, in kvm_xen_eventfd_assign()
2169 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
2173 if (ret == -ENOSPC) in kvm_xen_eventfd_assign()
2174 ret = -EEXIST; in kvm_xen_eventfd_assign()
2187 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
2188 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_deassign()
2189 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
2192 return -ENOENT; in kvm_xen_eventfd_deassign()
2194 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_deassign()
2195 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_deassign()
2196 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_deassign()
2207 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2214 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) in kvm_xen_eventfd_reset()
2219 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2220 return -ENOMEM; in kvm_xen_eventfd_reset()
2224 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_eventfd_reset()
2226 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); in kvm_xen_eventfd_reset()
2228 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2230 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_reset()
2232 while (n--) { in kvm_xen_eventfd_reset()
2234 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_reset()
2235 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_reset()
2245 u32 port = data->u.evtchn.send_port; in kvm_xen_setattr_evtchn()
2247 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) in kvm_xen_setattr_evtchn()
2251 return -EINVAL; in kvm_xen_setattr_evtchn()
2253 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) in kvm_xen_setattr_evtchn()
2255 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) in kvm_xen_setattr_evtchn()
2257 if (data->u.evtchn.flags) in kvm_xen_setattr_evtchn()
2258 return -EINVAL; in kvm_xen_setattr_evtchn()
2269 /* Sanity check: this structure is the same for 32-bit and 64-bit */ in kvm_xen_hcall_evtchn_send()
2272 *r = -EFAULT; in kvm_xen_hcall_evtchn_send()
2277 * evtchnfd is protected by kvm->srcu; the idr lookup instead in kvm_xen_hcall_evtchn_send()
2278 * is protected by RCU. in kvm_xen_hcall_evtchn_send()
2281 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); in kvm_xen_hcall_evtchn_send()
2286 if (evtchnfd->deliver.port.port) { in kvm_xen_hcall_evtchn_send()
2287 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); in kvm_xen_hcall_evtchn_send()
2288 if (ret < 0 && ret != -ENOTCONN) in kvm_xen_hcall_evtchn_send()
2291 eventfd_signal(evtchnfd->deliver.eventfd.ctx); in kvm_xen_hcall_evtchn_send()
2300 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; in kvm_xen_init_vcpu()
2301 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_init_vcpu()
2303 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); in kvm_xen_init_vcpu()
2304 hrtimer_setup(&vcpu->arch.xen.timer, xen_timer_callback, CLOCK_MONOTONIC, in kvm_xen_init_vcpu()
2307 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2308 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2309 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2310 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2318 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_destroy_vcpu()
2319 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_destroy_vcpu()
2320 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_destroy_vcpu()
2321 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_destroy_vcpu()
2323 timer_delete_sync(&vcpu->arch.xen.poll_timer); in kvm_xen_destroy_vcpu()
2328 mutex_init(&kvm->arch.xen.xen_lock); in kvm_xen_init_vm()
2329 idr_init(&kvm->arch.xen.evtchn_ports); in kvm_xen_init_vm()
2330 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm); in kvm_xen_init_vm()
2338 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_destroy_vm()
2340 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_destroy_vm()
2341 if (!evtchnfd->deliver.port.port) in kvm_xen_destroy_vm()
2342 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_destroy_vm()
2345 idr_destroy(&kvm->arch.xen.evtchn_ports); in kvm_xen_destroy_vm()
2347 if (kvm->arch.xen.hvm_config.msr) in kvm_xen_destroy_vm()