1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 * 6 * KVM Xen emulation 7 */ 8 9 #include "x86.h" 10 #include "xen.h" 11 #include "hyperv.h" 12 #include "lapic.h" 13 14 #include <linux/eventfd.h> 15 #include <linux/kvm_host.h> 16 #include <linux/sched/stat.h> 17 18 #include <trace/events/kvm.h> 19 #include <xen/interface/xen.h> 20 #include <xen/interface/vcpu.h> 21 #include <xen/interface/version.h> 22 #include <xen/interface/event_channel.h> 23 #include <xen/interface/sched.h> 24 25 #include "trace.h" 26 27 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm); 28 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 29 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); 30 31 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); 32 33 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) 34 { 35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 36 struct pvclock_wall_clock *wc; 37 gpa_t gpa = gfn_to_gpa(gfn); 38 u32 *wc_sec_hi; 39 u32 wc_version; 40 u64 wall_nsec; 41 int ret = 0; 42 int idx = srcu_read_lock(&kvm->srcu); 43 44 if (gfn == GPA_INVALID) { 45 kvm_gpc_deactivate(kvm, gpc); 46 goto out; 47 } 48 49 do { 50 ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, 51 PAGE_SIZE); 52 if (ret) 53 goto out; 54 55 /* 56 * This code mirrors kvm_write_wall_clock() except that it writes 57 * directly through the pfn cache and doesn't mark the page dirty. 58 */ 59 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 60 61 /* It could be invalid again already, so we need to check */ 62 read_lock_irq(&gpc->lock); 63 64 if (gpc->valid) 65 break; 66 67 read_unlock_irq(&gpc->lock); 68 } while (1); 69 70 /* Paranoia checks on the 32-bit struct layout */ 71 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); 72 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); 73 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 74 75 #ifdef CONFIG_X86_64 76 /* Paranoia checks on the 64-bit struct layout */ 77 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); 78 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); 79 80 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 81 struct shared_info *shinfo = gpc->khva; 82 83 wc_sec_hi = &shinfo->wc_sec_hi; 84 wc = &shinfo->wc; 85 } else 86 #endif 87 { 88 struct compat_shared_info *shinfo = gpc->khva; 89 90 wc_sec_hi = &shinfo->arch.wc_sec_hi; 91 wc = &shinfo->wc; 92 } 93 94 /* Increment and ensure an odd value */ 95 wc_version = wc->version = (wc->version + 1) | 1; 96 smp_wmb(); 97 98 wc->nsec = do_div(wall_nsec, 1000000000); 99 wc->sec = (u32)wall_nsec; 100 *wc_sec_hi = wall_nsec >> 32; 101 smp_wmb(); 102 103 wc->version = wc_version + 1; 104 read_unlock_irq(&gpc->lock); 105 106 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); 107 108 out: 109 srcu_read_unlock(&kvm->srcu, idx); 110 return ret; 111 } 112 113 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) 114 { 115 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { 116 struct kvm_xen_evtchn e; 117 118 e.vcpu_id = vcpu->vcpu_id; 119 e.vcpu_idx = vcpu->vcpu_idx; 120 e.port = vcpu->arch.xen.timer_virq; 121 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 122 123 kvm_xen_set_evtchn(&e, vcpu->kvm); 124 125 vcpu->arch.xen.timer_expires = 0; 126 atomic_set(&vcpu->arch.xen.timer_pending, 0); 127 } 128 } 129 130 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer) 131 { 132 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu, 133 arch.xen.timer); 134 if (atomic_read(&vcpu->arch.xen.timer_pending)) 135 return HRTIMER_NORESTART; 136 137 atomic_inc(&vcpu->arch.xen.timer_pending); 138 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 139 kvm_vcpu_kick(vcpu); 140 141 return HRTIMER_NORESTART; 142 } 143 144 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns) 145 { 146 atomic_set(&vcpu->arch.xen.timer_pending, 0); 147 vcpu->arch.xen.timer_expires = guest_abs; 148 149 if (delta_ns <= 0) { 150 xen_timer_callback(&vcpu->arch.xen.timer); 151 } else { 152 ktime_t ktime_now = ktime_get(); 153 hrtimer_start(&vcpu->arch.xen.timer, 154 ktime_add_ns(ktime_now, delta_ns), 155 HRTIMER_MODE_ABS_HARD); 156 } 157 } 158 159 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu) 160 { 161 hrtimer_cancel(&vcpu->arch.xen.timer); 162 vcpu->arch.xen.timer_expires = 0; 163 atomic_set(&vcpu->arch.xen.timer_pending, 0); 164 } 165 166 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu) 167 { 168 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, 169 HRTIMER_MODE_ABS_HARD); 170 vcpu->arch.xen.timer.function = xen_timer_callback; 171 } 172 173 static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) 174 { 175 struct kvm_vcpu_xen *vx = &v->arch.xen; 176 u64 now = get_kvmclock_ns(v->kvm); 177 u64 delta_ns = now - vx->runstate_entry_time; 178 u64 run_delay = current->sched_info.run_delay; 179 180 if (unlikely(!vx->runstate_entry_time)) 181 vx->current_runstate = RUNSTATE_offline; 182 183 /* 184 * Time waiting for the scheduler isn't "stolen" if the 185 * vCPU wasn't running anyway. 186 */ 187 if (vx->current_runstate == RUNSTATE_running) { 188 u64 steal_ns = run_delay - vx->last_steal; 189 190 delta_ns -= steal_ns; 191 192 vx->runstate_times[RUNSTATE_runnable] += steal_ns; 193 } 194 vx->last_steal = run_delay; 195 196 vx->runstate_times[vx->current_runstate] += delta_ns; 197 vx->current_runstate = state; 198 vx->runstate_entry_time = now; 199 } 200 201 void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) 202 { 203 struct kvm_vcpu_xen *vx = &v->arch.xen; 204 struct gfn_to_pfn_cache *gpc = &vx->runstate_cache; 205 uint64_t *user_times; 206 unsigned long flags; 207 size_t user_len; 208 int *user_state; 209 210 kvm_xen_update_runstate(v, state); 211 212 if (!vx->runstate_cache.active) 213 return; 214 215 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) 216 user_len = sizeof(struct vcpu_runstate_info); 217 else 218 user_len = sizeof(struct compat_vcpu_runstate_info); 219 220 read_lock_irqsave(&gpc->lock, flags); 221 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 222 user_len)) { 223 read_unlock_irqrestore(&gpc->lock, flags); 224 225 /* When invoked from kvm_sched_out() we cannot sleep */ 226 if (state == RUNSTATE_runnable) 227 return; 228 229 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len)) 230 return; 231 232 read_lock_irqsave(&gpc->lock, flags); 233 } 234 235 /* 236 * The only difference between 32-bit and 64-bit versions of the 237 * runstate struct us the alignment of uint64_t in 32-bit, which 238 * means that the 64-bit version has an additional 4 bytes of 239 * padding after the first field 'state'. 240 * 241 * So we use 'int __user *user_state' to point to the state field, 242 * and 'uint64_t __user *user_times' for runstate_entry_time. So 243 * the actual array of time[] in each state starts at user_times[1]. 244 */ 245 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0); 246 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0); 247 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); 248 #ifdef CONFIG_X86_64 249 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 250 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); 251 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != 252 offsetof(struct compat_vcpu_runstate_info, time) + 4); 253 #endif 254 255 user_state = gpc->khva; 256 257 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) 258 user_times = gpc->khva + offsetof(struct vcpu_runstate_info, 259 state_entry_time); 260 else 261 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info, 262 state_entry_time); 263 264 /* 265 * First write the updated state_entry_time at the appropriate 266 * location determined by 'offset'. 267 */ 268 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != 269 sizeof(user_times[0])); 270 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != 271 sizeof(user_times[0])); 272 273 user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE; 274 smp_wmb(); 275 276 /* 277 * Next, write the new runstate. This is in the *same* place 278 * for 32-bit and 64-bit guests, asserted here for paranoia. 279 */ 280 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 281 offsetof(struct compat_vcpu_runstate_info, state)); 282 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) != 283 sizeof(vx->current_runstate)); 284 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) != 285 sizeof(vx->current_runstate)); 286 287 *user_state = vx->current_runstate; 288 289 /* 290 * Write the actual runstate times immediately after the 291 * runstate_entry_time. 292 */ 293 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 294 offsetof(struct vcpu_runstate_info, time) - sizeof(u64)); 295 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != 296 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64)); 297 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 298 sizeof_field(struct compat_vcpu_runstate_info, time)); 299 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 300 sizeof(vx->runstate_times)); 301 302 memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); 303 smp_wmb(); 304 305 /* 306 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's 307 * runstate_entry_time field. 308 */ 309 user_times[0] &= ~XEN_RUNSTATE_UPDATE; 310 smp_wmb(); 311 312 read_unlock_irqrestore(&gpc->lock, flags); 313 314 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 315 } 316 317 static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) 318 { 319 struct kvm_lapic_irq irq = { }; 320 int r; 321 322 irq.dest_id = v->vcpu_id; 323 irq.vector = v->arch.xen.upcall_vector; 324 irq.dest_mode = APIC_DEST_PHYSICAL; 325 irq.shorthand = APIC_DEST_NOSHORT; 326 irq.delivery_mode = APIC_DM_FIXED; 327 irq.level = 1; 328 329 /* The fast version will always work for physical unicast */ 330 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); 331 } 332 333 /* 334 * On event channel delivery, the vcpu_info may not have been accessible. 335 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which 336 * need to be marked into the vcpu_info (and evtchn_upcall_pending set). 337 * Do so now that we can sleep in the context of the vCPU to bring the 338 * page in, and refresh the pfn cache for it. 339 */ 340 void kvm_xen_inject_pending_events(struct kvm_vcpu *v) 341 { 342 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); 343 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 344 unsigned long flags; 345 346 if (!evtchn_pending_sel) 347 return; 348 349 /* 350 * Yes, this is an open-coded loop. But that's just what put_user() 351 * does anyway. Page it in and retry the instruction. We're just a 352 * little more honest about it. 353 */ 354 read_lock_irqsave(&gpc->lock, flags); 355 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 356 sizeof(struct vcpu_info))) { 357 read_unlock_irqrestore(&gpc->lock, flags); 358 359 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 360 sizeof(struct vcpu_info))) 361 return; 362 363 read_lock_irqsave(&gpc->lock, flags); 364 } 365 366 /* Now gpc->khva is a valid kernel address for the vcpu_info */ 367 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { 368 struct vcpu_info *vi = gpc->khva; 369 370 asm volatile(LOCK_PREFIX "orq %0, %1\n" 371 "notq %0\n" 372 LOCK_PREFIX "andq %0, %2\n" 373 : "=r" (evtchn_pending_sel), 374 "+m" (vi->evtchn_pending_sel), 375 "+m" (v->arch.xen.evtchn_pending_sel) 376 : "0" (evtchn_pending_sel)); 377 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 378 } else { 379 u32 evtchn_pending_sel32 = evtchn_pending_sel; 380 struct compat_vcpu_info *vi = gpc->khva; 381 382 asm volatile(LOCK_PREFIX "orl %0, %1\n" 383 "notl %0\n" 384 LOCK_PREFIX "andl %0, %2\n" 385 : "=r" (evtchn_pending_sel32), 386 "+m" (vi->evtchn_pending_sel), 387 "+m" (v->arch.xen.evtchn_pending_sel) 388 : "0" (evtchn_pending_sel32)); 389 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 390 } 391 read_unlock_irqrestore(&gpc->lock, flags); 392 393 /* For the per-vCPU lapic vector, deliver it as MSI. */ 394 if (v->arch.xen.upcall_vector) 395 kvm_xen_inject_vcpu_vector(v); 396 397 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 398 } 399 400 int __kvm_xen_has_interrupt(struct kvm_vcpu *v) 401 { 402 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 403 unsigned long flags; 404 u8 rc = 0; 405 406 /* 407 * If the global upcall vector (HVMIRQ_callback_vector) is set and 408 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. 409 */ 410 411 /* No need for compat handling here */ 412 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != 413 offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); 414 BUILD_BUG_ON(sizeof(rc) != 415 sizeof_field(struct vcpu_info, evtchn_upcall_pending)); 416 BUILD_BUG_ON(sizeof(rc) != 417 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); 418 419 read_lock_irqsave(&gpc->lock, flags); 420 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, 421 sizeof(struct vcpu_info))) { 422 read_unlock_irqrestore(&gpc->lock, flags); 423 424 /* 425 * This function gets called from kvm_vcpu_block() after setting the 426 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately 427 * from a HLT. So we really mustn't sleep. If the page ended up absent 428 * at that point, just return 1 in order to trigger an immediate wake, 429 * and we'll end up getting called again from a context where we *can* 430 * fault in the page and wait for it. 431 */ 432 if (in_atomic() || !task_is_running(current)) 433 return 1; 434 435 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, 436 sizeof(struct vcpu_info))) { 437 /* 438 * If this failed, userspace has screwed up the 439 * vcpu_info mapping. No interrupts for you. 440 */ 441 return 0; 442 } 443 read_lock_irqsave(&gpc->lock, flags); 444 } 445 446 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; 447 read_unlock_irqrestore(&gpc->lock, flags); 448 return rc; 449 } 450 451 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 452 { 453 int r = -ENOENT; 454 455 456 switch (data->type) { 457 case KVM_XEN_ATTR_TYPE_LONG_MODE: 458 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { 459 r = -EINVAL; 460 } else { 461 mutex_lock(&kvm->lock); 462 kvm->arch.xen.long_mode = !!data->u.long_mode; 463 mutex_unlock(&kvm->lock); 464 r = 0; 465 } 466 break; 467 468 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 469 mutex_lock(&kvm->lock); 470 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); 471 mutex_unlock(&kvm->lock); 472 break; 473 474 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 475 if (data->u.vector && data->u.vector < 0x10) 476 r = -EINVAL; 477 else { 478 mutex_lock(&kvm->lock); 479 kvm->arch.xen.upcall_vector = data->u.vector; 480 mutex_unlock(&kvm->lock); 481 r = 0; 482 } 483 break; 484 485 case KVM_XEN_ATTR_TYPE_EVTCHN: 486 r = kvm_xen_setattr_evtchn(kvm, data); 487 break; 488 489 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 490 mutex_lock(&kvm->lock); 491 kvm->arch.xen.xen_version = data->u.xen_version; 492 mutex_unlock(&kvm->lock); 493 r = 0; 494 break; 495 496 default: 497 break; 498 } 499 500 return r; 501 } 502 503 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 504 { 505 int r = -ENOENT; 506 507 mutex_lock(&kvm->lock); 508 509 switch (data->type) { 510 case KVM_XEN_ATTR_TYPE_LONG_MODE: 511 data->u.long_mode = kvm->arch.xen.long_mode; 512 r = 0; 513 break; 514 515 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 516 if (kvm->arch.xen.shinfo_cache.active) 517 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); 518 else 519 data->u.shared_info.gfn = GPA_INVALID; 520 r = 0; 521 break; 522 523 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 524 data->u.vector = kvm->arch.xen.upcall_vector; 525 r = 0; 526 break; 527 528 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 529 data->u.xen_version = kvm->arch.xen.xen_version; 530 r = 0; 531 break; 532 533 default: 534 break; 535 } 536 537 mutex_unlock(&kvm->lock); 538 return r; 539 } 540 541 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 542 { 543 int idx, r = -ENOENT; 544 545 mutex_lock(&vcpu->kvm->lock); 546 idx = srcu_read_lock(&vcpu->kvm->srcu); 547 548 switch (data->type) { 549 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 550 /* No compat necessary here. */ 551 BUILD_BUG_ON(sizeof(struct vcpu_info) != 552 sizeof(struct compat_vcpu_info)); 553 BUILD_BUG_ON(offsetof(struct vcpu_info, time) != 554 offsetof(struct compat_vcpu_info, time)); 555 556 if (data->u.gpa == GPA_INVALID) { 557 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); 558 r = 0; 559 break; 560 } 561 562 r = kvm_gpc_activate(vcpu->kvm, 563 &vcpu->arch.xen.vcpu_info_cache, NULL, 564 KVM_HOST_USES_PFN, data->u.gpa, 565 sizeof(struct vcpu_info)); 566 if (!r) 567 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 568 569 break; 570 571 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 572 if (data->u.gpa == GPA_INVALID) { 573 kvm_gpc_deactivate(vcpu->kvm, 574 &vcpu->arch.xen.vcpu_time_info_cache); 575 r = 0; 576 break; 577 } 578 579 r = kvm_gpc_activate(vcpu->kvm, 580 &vcpu->arch.xen.vcpu_time_info_cache, 581 NULL, KVM_HOST_USES_PFN, data->u.gpa, 582 sizeof(struct pvclock_vcpu_time_info)); 583 if (!r) 584 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 585 break; 586 587 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: 588 if (!sched_info_on()) { 589 r = -EOPNOTSUPP; 590 break; 591 } 592 if (data->u.gpa == GPA_INVALID) { 593 kvm_gpc_deactivate(vcpu->kvm, 594 &vcpu->arch.xen.runstate_cache); 595 r = 0; 596 break; 597 } 598 599 r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache, 600 NULL, KVM_HOST_USES_PFN, data->u.gpa, 601 sizeof(struct vcpu_runstate_info)); 602 break; 603 604 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 605 if (!sched_info_on()) { 606 r = -EOPNOTSUPP; 607 break; 608 } 609 if (data->u.runstate.state > RUNSTATE_offline) { 610 r = -EINVAL; 611 break; 612 } 613 614 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 615 r = 0; 616 break; 617 618 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 619 if (!sched_info_on()) { 620 r = -EOPNOTSUPP; 621 break; 622 } 623 if (data->u.runstate.state > RUNSTATE_offline) { 624 r = -EINVAL; 625 break; 626 } 627 if (data->u.runstate.state_entry_time != 628 (data->u.runstate.time_running + 629 data->u.runstate.time_runnable + 630 data->u.runstate.time_blocked + 631 data->u.runstate.time_offline)) { 632 r = -EINVAL; 633 break; 634 } 635 if (get_kvmclock_ns(vcpu->kvm) < 636 data->u.runstate.state_entry_time) { 637 r = -EINVAL; 638 break; 639 } 640 641 vcpu->arch.xen.current_runstate = data->u.runstate.state; 642 vcpu->arch.xen.runstate_entry_time = 643 data->u.runstate.state_entry_time; 644 vcpu->arch.xen.runstate_times[RUNSTATE_running] = 645 data->u.runstate.time_running; 646 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = 647 data->u.runstate.time_runnable; 648 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = 649 data->u.runstate.time_blocked; 650 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = 651 data->u.runstate.time_offline; 652 vcpu->arch.xen.last_steal = current->sched_info.run_delay; 653 r = 0; 654 break; 655 656 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 657 if (!sched_info_on()) { 658 r = -EOPNOTSUPP; 659 break; 660 } 661 if (data->u.runstate.state > RUNSTATE_offline && 662 data->u.runstate.state != (u64)-1) { 663 r = -EINVAL; 664 break; 665 } 666 /* The adjustment must add up */ 667 if (data->u.runstate.state_entry_time != 668 (data->u.runstate.time_running + 669 data->u.runstate.time_runnable + 670 data->u.runstate.time_blocked + 671 data->u.runstate.time_offline)) { 672 r = -EINVAL; 673 break; 674 } 675 676 if (get_kvmclock_ns(vcpu->kvm) < 677 (vcpu->arch.xen.runstate_entry_time + 678 data->u.runstate.state_entry_time)) { 679 r = -EINVAL; 680 break; 681 } 682 683 vcpu->arch.xen.runstate_entry_time += 684 data->u.runstate.state_entry_time; 685 vcpu->arch.xen.runstate_times[RUNSTATE_running] += 686 data->u.runstate.time_running; 687 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += 688 data->u.runstate.time_runnable; 689 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += 690 data->u.runstate.time_blocked; 691 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += 692 data->u.runstate.time_offline; 693 694 if (data->u.runstate.state <= RUNSTATE_offline) 695 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 696 r = 0; 697 break; 698 699 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 700 if (data->u.vcpu_id >= KVM_MAX_VCPUS) 701 r = -EINVAL; 702 else { 703 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; 704 r = 0; 705 } 706 break; 707 708 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 709 if (data->u.timer.port && 710 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { 711 r = -EINVAL; 712 break; 713 } 714 715 if (!vcpu->arch.xen.timer.function) 716 kvm_xen_init_timer(vcpu); 717 718 /* Stop the timer (if it's running) before changing the vector */ 719 kvm_xen_stop_timer(vcpu); 720 vcpu->arch.xen.timer_virq = data->u.timer.port; 721 722 /* Start the timer if the new value has a valid vector+expiry. */ 723 if (data->u.timer.port && data->u.timer.expires_ns) 724 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, 725 data->u.timer.expires_ns - 726 get_kvmclock_ns(vcpu->kvm)); 727 728 r = 0; 729 break; 730 731 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 732 if (data->u.vector && data->u.vector < 0x10) 733 r = -EINVAL; 734 else { 735 vcpu->arch.xen.upcall_vector = data->u.vector; 736 r = 0; 737 } 738 break; 739 740 default: 741 break; 742 } 743 744 srcu_read_unlock(&vcpu->kvm->srcu, idx); 745 mutex_unlock(&vcpu->kvm->lock); 746 return r; 747 } 748 749 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 750 { 751 int r = -ENOENT; 752 753 mutex_lock(&vcpu->kvm->lock); 754 755 switch (data->type) { 756 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 757 if (vcpu->arch.xen.vcpu_info_cache.active) 758 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; 759 else 760 data->u.gpa = GPA_INVALID; 761 r = 0; 762 break; 763 764 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 765 if (vcpu->arch.xen.vcpu_time_info_cache.active) 766 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; 767 else 768 data->u.gpa = GPA_INVALID; 769 r = 0; 770 break; 771 772 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: 773 if (!sched_info_on()) { 774 r = -EOPNOTSUPP; 775 break; 776 } 777 if (vcpu->arch.xen.runstate_cache.active) { 778 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; 779 r = 0; 780 } 781 break; 782 783 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 784 if (!sched_info_on()) { 785 r = -EOPNOTSUPP; 786 break; 787 } 788 data->u.runstate.state = vcpu->arch.xen.current_runstate; 789 r = 0; 790 break; 791 792 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 793 if (!sched_info_on()) { 794 r = -EOPNOTSUPP; 795 break; 796 } 797 data->u.runstate.state = vcpu->arch.xen.current_runstate; 798 data->u.runstate.state_entry_time = 799 vcpu->arch.xen.runstate_entry_time; 800 data->u.runstate.time_running = 801 vcpu->arch.xen.runstate_times[RUNSTATE_running]; 802 data->u.runstate.time_runnable = 803 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; 804 data->u.runstate.time_blocked = 805 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; 806 data->u.runstate.time_offline = 807 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; 808 r = 0; 809 break; 810 811 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 812 r = -EINVAL; 813 break; 814 815 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 816 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; 817 r = 0; 818 break; 819 820 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 821 data->u.timer.port = vcpu->arch.xen.timer_virq; 822 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 823 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; 824 r = 0; 825 break; 826 827 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 828 data->u.vector = vcpu->arch.xen.upcall_vector; 829 r = 0; 830 break; 831 832 default: 833 break; 834 } 835 836 mutex_unlock(&vcpu->kvm->lock); 837 return r; 838 } 839 840 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 841 { 842 struct kvm *kvm = vcpu->kvm; 843 u32 page_num = data & ~PAGE_MASK; 844 u64 page_addr = data & PAGE_MASK; 845 bool lm = is_long_mode(vcpu); 846 847 /* Latch long_mode for shared_info pages etc. */ 848 vcpu->kvm->arch.xen.long_mode = lm; 849 850 /* 851 * If Xen hypercall intercept is enabled, fill the hypercall 852 * page with VMCALL/VMMCALL instructions since that's what 853 * we catch. Else the VMM has provided the hypercall pages 854 * with instructions of its own choosing, so use those. 855 */ 856 if (kvm_xen_hypercall_enabled(kvm)) { 857 u8 instructions[32]; 858 int i; 859 860 if (page_num) 861 return 1; 862 863 /* mov imm32, %eax */ 864 instructions[0] = 0xb8; 865 866 /* vmcall / vmmcall */ 867 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5); 868 869 /* ret */ 870 instructions[8] = 0xc3; 871 872 /* int3 to pad */ 873 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); 874 875 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { 876 *(u32 *)&instructions[1] = i; 877 if (kvm_vcpu_write_guest(vcpu, 878 page_addr + (i * sizeof(instructions)), 879 instructions, sizeof(instructions))) 880 return 1; 881 } 882 } else { 883 /* 884 * Note, truncation is a non-issue as 'lm' is guaranteed to be 885 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. 886 */ 887 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 888 : kvm->arch.xen_hvm_config.blob_addr_32; 889 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 890 : kvm->arch.xen_hvm_config.blob_size_32; 891 u8 *page; 892 893 if (page_num >= blob_size) 894 return 1; 895 896 blob_addr += page_num * PAGE_SIZE; 897 898 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); 899 if (IS_ERR(page)) 900 return PTR_ERR(page); 901 902 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { 903 kfree(page); 904 return 1; 905 } 906 } 907 return 0; 908 } 909 910 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) 911 { 912 /* Only some feature flags need to be *enabled* by userspace */ 913 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 914 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 915 916 if (xhc->flags & ~permitted_flags) 917 return -EINVAL; 918 919 /* 920 * With hypercall interception the kernel generates its own 921 * hypercall page so it must not be provided. 922 */ 923 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && 924 (xhc->blob_addr_32 || xhc->blob_addr_64 || 925 xhc->blob_size_32 || xhc->blob_size_64)) 926 return -EINVAL; 927 928 mutex_lock(&kvm->lock); 929 930 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) 931 static_branch_inc(&kvm_xen_enabled.key); 932 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) 933 static_branch_slow_dec_deferred(&kvm_xen_enabled); 934 935 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); 936 937 mutex_unlock(&kvm->lock); 938 return 0; 939 } 940 941 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) 942 { 943 kvm_rax_write(vcpu, result); 944 return kvm_skip_emulated_instruction(vcpu); 945 } 946 947 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 948 { 949 struct kvm_run *run = vcpu->run; 950 951 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) 952 return 1; 953 954 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); 955 } 956 957 static inline int max_evtchn_port(struct kvm *kvm) 958 { 959 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) 960 return EVTCHN_2L_NR_CHANNELS; 961 else 962 return COMPAT_EVTCHN_2L_NR_CHANNELS; 963 } 964 965 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, 966 evtchn_port_t *ports) 967 { 968 struct kvm *kvm = vcpu->kvm; 969 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 970 unsigned long *pending_bits; 971 unsigned long flags; 972 bool ret = true; 973 int idx, i; 974 975 read_lock_irqsave(&gpc->lock, flags); 976 idx = srcu_read_lock(&kvm->srcu); 977 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) 978 goto out_rcu; 979 980 ret = false; 981 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 982 struct shared_info *shinfo = gpc->khva; 983 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 984 } else { 985 struct compat_shared_info *shinfo = gpc->khva; 986 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 987 } 988 989 for (i = 0; i < nr_ports; i++) { 990 if (test_bit(ports[i], pending_bits)) { 991 ret = true; 992 break; 993 } 994 } 995 996 out_rcu: 997 srcu_read_unlock(&kvm->srcu, idx); 998 read_unlock_irqrestore(&gpc->lock, flags); 999 1000 return ret; 1001 } 1002 1003 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, 1004 u64 param, u64 *r) 1005 { 1006 int idx, i; 1007 struct sched_poll sched_poll; 1008 evtchn_port_t port, *ports; 1009 gpa_t gpa; 1010 1011 if (!longmode || !lapic_in_kernel(vcpu) || 1012 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) 1013 return false; 1014 1015 idx = srcu_read_lock(&vcpu->kvm->srcu); 1016 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 1017 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1018 1019 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, 1020 sizeof(sched_poll))) { 1021 *r = -EFAULT; 1022 return true; 1023 } 1024 1025 if (unlikely(sched_poll.nr_ports > 1)) { 1026 /* Xen (unofficially) limits number of pollers to 128 */ 1027 if (sched_poll.nr_ports > 128) { 1028 *r = -EINVAL; 1029 return true; 1030 } 1031 1032 ports = kmalloc_array(sched_poll.nr_ports, 1033 sizeof(*ports), GFP_KERNEL); 1034 if (!ports) { 1035 *r = -ENOMEM; 1036 return true; 1037 } 1038 } else 1039 ports = &port; 1040 1041 for (i = 0; i < sched_poll.nr_ports; i++) { 1042 idx = srcu_read_lock(&vcpu->kvm->srcu); 1043 gpa = kvm_mmu_gva_to_gpa_system(vcpu, 1044 (gva_t)(sched_poll.ports + i), 1045 NULL); 1046 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1047 1048 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, 1049 &ports[i], sizeof(port))) { 1050 *r = -EFAULT; 1051 goto out; 1052 } 1053 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { 1054 *r = -EINVAL; 1055 goto out; 1056 } 1057 } 1058 1059 if (sched_poll.nr_ports == 1) 1060 vcpu->arch.xen.poll_evtchn = port; 1061 else 1062 vcpu->arch.xen.poll_evtchn = -1; 1063 1064 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1065 1066 if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) { 1067 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 1068 1069 if (sched_poll.timeout) 1070 mod_timer(&vcpu->arch.xen.poll_timer, 1071 jiffies + nsecs_to_jiffies(sched_poll.timeout)); 1072 1073 kvm_vcpu_halt(vcpu); 1074 1075 if (sched_poll.timeout) 1076 del_timer(&vcpu->arch.xen.poll_timer); 1077 1078 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1079 } 1080 1081 vcpu->arch.xen.poll_evtchn = 0; 1082 *r = 0; 1083 out: 1084 /* Really, this is only needed in case of timeout */ 1085 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1086 1087 if (unlikely(sched_poll.nr_ports > 1)) 1088 kfree(ports); 1089 return true; 1090 } 1091 1092 static void cancel_evtchn_poll(struct timer_list *t) 1093 { 1094 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer); 1095 1096 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1097 kvm_vcpu_kick(vcpu); 1098 } 1099 1100 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode, 1101 int cmd, u64 param, u64 *r) 1102 { 1103 switch (cmd) { 1104 case SCHEDOP_poll: 1105 if (kvm_xen_schedop_poll(vcpu, longmode, param, r)) 1106 return true; 1107 fallthrough; 1108 case SCHEDOP_yield: 1109 kvm_vcpu_on_spin(vcpu, true); 1110 *r = 0; 1111 return true; 1112 default: 1113 break; 1114 } 1115 1116 return false; 1117 } 1118 1119 struct compat_vcpu_set_singleshot_timer { 1120 uint64_t timeout_abs_ns; 1121 uint32_t flags; 1122 } __attribute__((packed)); 1123 1124 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, 1125 int vcpu_id, u64 param, u64 *r) 1126 { 1127 struct vcpu_set_singleshot_timer oneshot; 1128 s64 delta; 1129 gpa_t gpa; 1130 int idx; 1131 1132 if (!kvm_xen_timer_enabled(vcpu)) 1133 return false; 1134 1135 switch (cmd) { 1136 case VCPUOP_set_singleshot_timer: 1137 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1138 *r = -EINVAL; 1139 return true; 1140 } 1141 idx = srcu_read_lock(&vcpu->kvm->srcu); 1142 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 1143 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1144 1145 /* 1146 * The only difference for 32-bit compat is the 4 bytes of 1147 * padding after the interesting part of the structure. So 1148 * for a faithful emulation of Xen we have to *try* to copy 1149 * the padding and return -EFAULT if we can't. Otherwise we 1150 * might as well just have copied the 12-byte 32-bit struct. 1151 */ 1152 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1153 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1154 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1155 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1156 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) != 1157 offsetof(struct vcpu_set_singleshot_timer, flags)); 1158 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != 1159 sizeof_field(struct vcpu_set_singleshot_timer, flags)); 1160 1161 if (!gpa || 1162 kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : 1163 sizeof(struct compat_vcpu_set_singleshot_timer))) { 1164 *r = -EFAULT; 1165 return true; 1166 } 1167 1168 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm); 1169 if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) { 1170 *r = -ETIME; 1171 return true; 1172 } 1173 1174 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta); 1175 *r = 0; 1176 return true; 1177 1178 case VCPUOP_stop_singleshot_timer: 1179 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1180 *r = -EINVAL; 1181 return true; 1182 } 1183 kvm_xen_stop_timer(vcpu); 1184 *r = 0; 1185 return true; 1186 } 1187 1188 return false; 1189 } 1190 1191 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, 1192 u64 *r) 1193 { 1194 if (!kvm_xen_timer_enabled(vcpu)) 1195 return false; 1196 1197 if (timeout) { 1198 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm); 1199 int64_t delta = timeout - guest_now; 1200 1201 /* Xen has a 'Linux workaround' in do_set_timer_op() which 1202 * checks for negative absolute timeout values (caused by 1203 * integer overflow), and for values about 13 days in the 1204 * future (2^50ns) which would be caused by jiffies 1205 * overflow. For those cases, it sets the timeout 100ms in 1206 * the future (not *too* soon, since if a guest really did 1207 * set a long timeout on purpose we don't want to keep 1208 * churning CPU time by waking it up). 1209 */ 1210 if (unlikely((int64_t)timeout < 0 || 1211 (delta > 0 && (uint32_t) (delta >> 50) != 0))) { 1212 delta = 100 * NSEC_PER_MSEC; 1213 timeout = guest_now + delta; 1214 } 1215 1216 kvm_xen_start_timer(vcpu, timeout, delta); 1217 } else { 1218 kvm_xen_stop_timer(vcpu); 1219 } 1220 1221 *r = 0; 1222 return true; 1223 } 1224 1225 int kvm_xen_hypercall(struct kvm_vcpu *vcpu) 1226 { 1227 bool longmode; 1228 u64 input, params[6], r = -ENOSYS; 1229 bool handled = false; 1230 u8 cpl; 1231 1232 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); 1233 1234 /* Hyper-V hypercalls get bit 31 set in EAX */ 1235 if ((input & 0x80000000) && 1236 kvm_hv_hypercall_enabled(vcpu)) 1237 return kvm_hv_hypercall(vcpu); 1238 1239 longmode = is_64_bit_hypercall(vcpu); 1240 if (!longmode) { 1241 params[0] = (u32)kvm_rbx_read(vcpu); 1242 params[1] = (u32)kvm_rcx_read(vcpu); 1243 params[2] = (u32)kvm_rdx_read(vcpu); 1244 params[3] = (u32)kvm_rsi_read(vcpu); 1245 params[4] = (u32)kvm_rdi_read(vcpu); 1246 params[5] = (u32)kvm_rbp_read(vcpu); 1247 } 1248 #ifdef CONFIG_X86_64 1249 else { 1250 params[0] = (u64)kvm_rdi_read(vcpu); 1251 params[1] = (u64)kvm_rsi_read(vcpu); 1252 params[2] = (u64)kvm_rdx_read(vcpu); 1253 params[3] = (u64)kvm_r10_read(vcpu); 1254 params[4] = (u64)kvm_r8_read(vcpu); 1255 params[5] = (u64)kvm_r9_read(vcpu); 1256 } 1257 #endif 1258 cpl = static_call(kvm_x86_get_cpl)(vcpu); 1259 trace_kvm_xen_hypercall(input, params[0], params[1], params[2], 1260 params[3], params[4], params[5]); 1261 1262 /* 1263 * Only allow hypercall acceleration for CPL0. The rare hypercalls that 1264 * are permitted in guest userspace can be handled by the VMM. 1265 */ 1266 if (unlikely(cpl > 0)) 1267 goto handle_in_userspace; 1268 1269 switch (input) { 1270 case __HYPERVISOR_xen_version: 1271 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { 1272 r = vcpu->kvm->arch.xen.xen_version; 1273 handled = true; 1274 } 1275 break; 1276 case __HYPERVISOR_event_channel_op: 1277 if (params[0] == EVTCHNOP_send) 1278 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); 1279 break; 1280 case __HYPERVISOR_sched_op: 1281 handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0], 1282 params[1], &r); 1283 break; 1284 case __HYPERVISOR_vcpu_op: 1285 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1], 1286 params[2], &r); 1287 break; 1288 case __HYPERVISOR_set_timer_op: { 1289 u64 timeout = params[0]; 1290 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ 1291 if (!longmode) 1292 timeout |= params[1] << 32; 1293 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r); 1294 break; 1295 } 1296 default: 1297 break; 1298 } 1299 1300 if (handled) 1301 return kvm_xen_hypercall_set_result(vcpu, r); 1302 1303 handle_in_userspace: 1304 vcpu->run->exit_reason = KVM_EXIT_XEN; 1305 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; 1306 vcpu->run->xen.u.hcall.longmode = longmode; 1307 vcpu->run->xen.u.hcall.cpl = cpl; 1308 vcpu->run->xen.u.hcall.input = input; 1309 vcpu->run->xen.u.hcall.params[0] = params[0]; 1310 vcpu->run->xen.u.hcall.params[1] = params[1]; 1311 vcpu->run->xen.u.hcall.params[2] = params[2]; 1312 vcpu->run->xen.u.hcall.params[3] = params[3]; 1313 vcpu->run->xen.u.hcall.params[4] = params[4]; 1314 vcpu->run->xen.u.hcall.params[5] = params[5]; 1315 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); 1316 vcpu->arch.complete_userspace_io = 1317 kvm_xen_hypercall_complete_userspace; 1318 1319 return 0; 1320 } 1321 1322 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port) 1323 { 1324 int poll_evtchn = vcpu->arch.xen.poll_evtchn; 1325 1326 if ((poll_evtchn == port || poll_evtchn == -1) && 1327 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { 1328 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1329 kvm_vcpu_kick(vcpu); 1330 } 1331 } 1332 1333 /* 1334 * The return value from this function is propagated to kvm_set_irq() API, 1335 * so it returns: 1336 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 1337 * = 0 Interrupt was coalesced (previous irq is still pending) 1338 * > 0 Number of CPUs interrupt was delivered to 1339 * 1340 * It is also called directly from kvm_arch_set_irq_inatomic(), where the 1341 * only check on its return value is a comparison with -EWOULDBLOCK'. 1342 */ 1343 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1344 { 1345 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1346 struct kvm_vcpu *vcpu; 1347 unsigned long *pending_bits, *mask_bits; 1348 unsigned long flags; 1349 int port_word_bit; 1350 bool kick_vcpu = false; 1351 int vcpu_idx, idx, rc; 1352 1353 vcpu_idx = READ_ONCE(xe->vcpu_idx); 1354 if (vcpu_idx >= 0) 1355 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 1356 else { 1357 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); 1358 if (!vcpu) 1359 return -EINVAL; 1360 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); 1361 } 1362 1363 if (!vcpu->arch.xen.vcpu_info_cache.active) 1364 return -EINVAL; 1365 1366 if (xe->port >= max_evtchn_port(kvm)) 1367 return -EINVAL; 1368 1369 rc = -EWOULDBLOCK; 1370 1371 idx = srcu_read_lock(&kvm->srcu); 1372 1373 read_lock_irqsave(&gpc->lock, flags); 1374 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) 1375 goto out_rcu; 1376 1377 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1378 struct shared_info *shinfo = gpc->khva; 1379 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1380 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1381 port_word_bit = xe->port / 64; 1382 } else { 1383 struct compat_shared_info *shinfo = gpc->khva; 1384 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1385 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1386 port_word_bit = xe->port / 32; 1387 } 1388 1389 /* 1390 * If this port wasn't already set, and if it isn't masked, then 1391 * we try to set the corresponding bit in the in-kernel shadow of 1392 * evtchn_pending_sel for the target vCPU. And if *that* wasn't 1393 * already set, then we kick the vCPU in question to write to the 1394 * *real* evtchn_pending_sel in its own guest vcpu_info struct. 1395 */ 1396 if (test_and_set_bit(xe->port, pending_bits)) { 1397 rc = 0; /* It was already raised */ 1398 } else if (test_bit(xe->port, mask_bits)) { 1399 rc = -ENOTCONN; /* Masked */ 1400 kvm_xen_check_poller(vcpu, xe->port); 1401 } else { 1402 rc = 1; /* Delivered to the bitmap in shared_info. */ 1403 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */ 1404 read_unlock_irqrestore(&gpc->lock, flags); 1405 gpc = &vcpu->arch.xen.vcpu_info_cache; 1406 1407 read_lock_irqsave(&gpc->lock, flags); 1408 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) { 1409 /* 1410 * Could not access the vcpu_info. Set the bit in-kernel 1411 * and prod the vCPU to deliver it for itself. 1412 */ 1413 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) 1414 kick_vcpu = true; 1415 goto out_rcu; 1416 } 1417 1418 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1419 struct vcpu_info *vcpu_info = gpc->khva; 1420 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { 1421 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1422 kick_vcpu = true; 1423 } 1424 } else { 1425 struct compat_vcpu_info *vcpu_info = gpc->khva; 1426 if (!test_and_set_bit(port_word_bit, 1427 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { 1428 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1429 kick_vcpu = true; 1430 } 1431 } 1432 1433 /* For the per-vCPU lapic vector, deliver it as MSI. */ 1434 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { 1435 kvm_xen_inject_vcpu_vector(vcpu); 1436 kick_vcpu = false; 1437 } 1438 } 1439 1440 out_rcu: 1441 read_unlock_irqrestore(&gpc->lock, flags); 1442 srcu_read_unlock(&kvm->srcu, idx); 1443 1444 if (kick_vcpu) { 1445 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1446 kvm_vcpu_kick(vcpu); 1447 } 1448 1449 return rc; 1450 } 1451 1452 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1453 { 1454 bool mm_borrowed = false; 1455 int rc; 1456 1457 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1458 if (rc != -EWOULDBLOCK) 1459 return rc; 1460 1461 if (current->mm != kvm->mm) { 1462 /* 1463 * If not on a thread which already belongs to this KVM, 1464 * we'd better be in the irqfd workqueue. 1465 */ 1466 if (WARN_ON_ONCE(current->mm)) 1467 return -EINVAL; 1468 1469 kthread_use_mm(kvm->mm); 1470 mm_borrowed = true; 1471 } 1472 1473 /* 1474 * For the irqfd workqueue, using the main kvm->lock mutex is 1475 * fine since this function is invoked from kvm_set_irq() with 1476 * no other lock held, no srcu. In future if it will be called 1477 * directly from a vCPU thread (e.g. on hypercall for an IPI) 1478 * then it may need to switch to using a leaf-node mutex for 1479 * serializing the shared_info mapping. 1480 */ 1481 mutex_lock(&kvm->lock); 1482 1483 /* 1484 * It is theoretically possible for the page to be unmapped 1485 * and the MMU notifier to invalidate the shared_info before 1486 * we even get to use it. In that case, this looks like an 1487 * infinite loop. It was tempting to do it via the userspace 1488 * HVA instead... but that just *hides* the fact that it's 1489 * an infinite loop, because if a fault occurs and it waits 1490 * for the page to come back, it can *still* immediately 1491 * fault and have to wait again, repeatedly. 1492 * 1493 * Conversely, the page could also have been reinstated by 1494 * another thread before we even obtain the mutex above, so 1495 * check again *first* before remapping it. 1496 */ 1497 do { 1498 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1499 int idx; 1500 1501 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1502 if (rc != -EWOULDBLOCK) 1503 break; 1504 1505 idx = srcu_read_lock(&kvm->srcu); 1506 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); 1507 srcu_read_unlock(&kvm->srcu, idx); 1508 } while(!rc); 1509 1510 mutex_unlock(&kvm->lock); 1511 1512 if (mm_borrowed) 1513 kthread_unuse_mm(kvm->mm); 1514 1515 return rc; 1516 } 1517 1518 /* This is the version called from kvm_set_irq() as the .set function */ 1519 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1520 int irq_source_id, int level, bool line_status) 1521 { 1522 if (!level) 1523 return -EINVAL; 1524 1525 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); 1526 } 1527 1528 /* 1529 * Set up an event channel interrupt from the KVM IRQ routing table. 1530 * Used for e.g. PIRQ from passed through physical devices. 1531 */ 1532 int kvm_xen_setup_evtchn(struct kvm *kvm, 1533 struct kvm_kernel_irq_routing_entry *e, 1534 const struct kvm_irq_routing_entry *ue) 1535 1536 { 1537 struct kvm_vcpu *vcpu; 1538 1539 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) 1540 return -EINVAL; 1541 1542 /* We only support 2 level event channels for now */ 1543 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1544 return -EINVAL; 1545 1546 /* 1547 * Xen gives us interesting mappings from vCPU index to APIC ID, 1548 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs 1549 * to find it. Do that once at setup time, instead of every time. 1550 * But beware that on live update / live migration, the routing 1551 * table might be reinstated before the vCPU threads have finished 1552 * recreating their vCPUs. 1553 */ 1554 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); 1555 if (vcpu) 1556 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; 1557 else 1558 e->xen_evtchn.vcpu_idx = -1; 1559 1560 e->xen_evtchn.port = ue->u.xen_evtchn.port; 1561 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; 1562 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; 1563 e->set = evtchn_set_fn; 1564 1565 return 0; 1566 } 1567 1568 /* 1569 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl. 1570 */ 1571 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe) 1572 { 1573 struct kvm_xen_evtchn e; 1574 int ret; 1575 1576 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) 1577 return -EINVAL; 1578 1579 /* We only support 2 level event channels for now */ 1580 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1581 return -EINVAL; 1582 1583 e.port = uxe->port; 1584 e.vcpu_id = uxe->vcpu; 1585 e.vcpu_idx = -1; 1586 e.priority = uxe->priority; 1587 1588 ret = kvm_xen_set_evtchn(&e, kvm); 1589 1590 /* 1591 * None of that 'return 1 if it actually got delivered' nonsense. 1592 * We don't care if it was masked (-ENOTCONN) either. 1593 */ 1594 if (ret > 0 || ret == -ENOTCONN) 1595 ret = 0; 1596 1597 return ret; 1598 } 1599 1600 /* 1601 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall. 1602 */ 1603 struct evtchnfd { 1604 u32 send_port; 1605 u32 type; 1606 union { 1607 struct kvm_xen_evtchn port; 1608 struct { 1609 u32 port; /* zero */ 1610 struct eventfd_ctx *ctx; 1611 } eventfd; 1612 } deliver; 1613 }; 1614 1615 /* 1616 * Update target vCPU or priority for a registered sending channel. 1617 */ 1618 static int kvm_xen_eventfd_update(struct kvm *kvm, 1619 struct kvm_xen_hvm_attr *data) 1620 { 1621 u32 port = data->u.evtchn.send_port; 1622 struct evtchnfd *evtchnfd; 1623 1624 if (!port || port >= max_evtchn_port(kvm)) 1625 return -EINVAL; 1626 1627 mutex_lock(&kvm->lock); 1628 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); 1629 mutex_unlock(&kvm->lock); 1630 1631 if (!evtchnfd) 1632 return -ENOENT; 1633 1634 /* For an UPDATE, nothing may change except the priority/vcpu */ 1635 if (evtchnfd->type != data->u.evtchn.type) 1636 return -EINVAL; 1637 1638 /* 1639 * Port cannot change, and if it's zero that was an eventfd 1640 * which can't be changed either. 1641 */ 1642 if (!evtchnfd->deliver.port.port || 1643 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) 1644 return -EINVAL; 1645 1646 /* We only support 2 level event channels for now */ 1647 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1648 return -EINVAL; 1649 1650 mutex_lock(&kvm->lock); 1651 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1652 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { 1653 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1654 evtchnfd->deliver.port.vcpu_idx = -1; 1655 } 1656 mutex_unlock(&kvm->lock); 1657 return 0; 1658 } 1659 1660 /* 1661 * Configure the target (eventfd or local port delivery) for sending on 1662 * a given event channel. 1663 */ 1664 static int kvm_xen_eventfd_assign(struct kvm *kvm, 1665 struct kvm_xen_hvm_attr *data) 1666 { 1667 u32 port = data->u.evtchn.send_port; 1668 struct eventfd_ctx *eventfd = NULL; 1669 struct evtchnfd *evtchnfd = NULL; 1670 int ret = -EINVAL; 1671 1672 if (!port || port >= max_evtchn_port(kvm)) 1673 return -EINVAL; 1674 1675 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL); 1676 if (!evtchnfd) 1677 return -ENOMEM; 1678 1679 switch(data->u.evtchn.type) { 1680 case EVTCHNSTAT_ipi: 1681 /* IPI must map back to the same port# */ 1682 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) 1683 goto out_noeventfd; /* -EINVAL */ 1684 break; 1685 1686 case EVTCHNSTAT_interdomain: 1687 if (data->u.evtchn.deliver.port.port) { 1688 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) 1689 goto out_noeventfd; /* -EINVAL */ 1690 } else { 1691 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); 1692 if (IS_ERR(eventfd)) { 1693 ret = PTR_ERR(eventfd); 1694 goto out_noeventfd; 1695 } 1696 } 1697 break; 1698 1699 case EVTCHNSTAT_virq: 1700 case EVTCHNSTAT_closed: 1701 case EVTCHNSTAT_unbound: 1702 case EVTCHNSTAT_pirq: 1703 default: /* Unknown event channel type */ 1704 goto out; /* -EINVAL */ 1705 } 1706 1707 evtchnfd->send_port = data->u.evtchn.send_port; 1708 evtchnfd->type = data->u.evtchn.type; 1709 if (eventfd) { 1710 evtchnfd->deliver.eventfd.ctx = eventfd; 1711 } else { 1712 /* We only support 2 level event channels for now */ 1713 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1714 goto out; /* -EINVAL; */ 1715 1716 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; 1717 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1718 evtchnfd->deliver.port.vcpu_idx = -1; 1719 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1720 } 1721 1722 mutex_lock(&kvm->lock); 1723 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, 1724 GFP_KERNEL); 1725 mutex_unlock(&kvm->lock); 1726 if (ret >= 0) 1727 return 0; 1728 1729 if (ret == -ENOSPC) 1730 ret = -EEXIST; 1731 out: 1732 if (eventfd) 1733 eventfd_ctx_put(eventfd); 1734 out_noeventfd: 1735 kfree(evtchnfd); 1736 return ret; 1737 } 1738 1739 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) 1740 { 1741 struct evtchnfd *evtchnfd; 1742 1743 mutex_lock(&kvm->lock); 1744 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); 1745 mutex_unlock(&kvm->lock); 1746 1747 if (!evtchnfd) 1748 return -ENOENT; 1749 1750 if (kvm) 1751 synchronize_srcu(&kvm->srcu); 1752 if (!evtchnfd->deliver.port.port) 1753 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1754 kfree(evtchnfd); 1755 return 0; 1756 } 1757 1758 static int kvm_xen_eventfd_reset(struct kvm *kvm) 1759 { 1760 struct evtchnfd *evtchnfd; 1761 int i; 1762 1763 mutex_lock(&kvm->lock); 1764 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 1765 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); 1766 synchronize_srcu(&kvm->srcu); 1767 if (!evtchnfd->deliver.port.port) 1768 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1769 kfree(evtchnfd); 1770 } 1771 mutex_unlock(&kvm->lock); 1772 1773 return 0; 1774 } 1775 1776 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 1777 { 1778 u32 port = data->u.evtchn.send_port; 1779 1780 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) 1781 return kvm_xen_eventfd_reset(kvm); 1782 1783 if (!port || port >= max_evtchn_port(kvm)) 1784 return -EINVAL; 1785 1786 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) 1787 return kvm_xen_eventfd_deassign(kvm, port); 1788 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) 1789 return kvm_xen_eventfd_update(kvm, data); 1790 if (data->u.evtchn.flags) 1791 return -EINVAL; 1792 1793 return kvm_xen_eventfd_assign(kvm, data); 1794 } 1795 1796 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) 1797 { 1798 struct evtchnfd *evtchnfd; 1799 struct evtchn_send send; 1800 gpa_t gpa; 1801 int idx; 1802 1803 idx = srcu_read_lock(&vcpu->kvm->srcu); 1804 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 1805 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1806 1807 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { 1808 *r = -EFAULT; 1809 return true; 1810 } 1811 1812 /* The evtchn_ports idr is protected by vcpu->kvm->srcu */ 1813 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); 1814 if (!evtchnfd) 1815 return false; 1816 1817 if (evtchnfd->deliver.port.port) { 1818 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); 1819 if (ret < 0 && ret != -ENOTCONN) 1820 return false; 1821 } else { 1822 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1); 1823 } 1824 1825 *r = 0; 1826 return true; 1827 } 1828 1829 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) 1830 { 1831 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; 1832 vcpu->arch.xen.poll_evtchn = 0; 1833 1834 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); 1835 1836 kvm_gpc_init(&vcpu->arch.xen.runstate_cache); 1837 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache); 1838 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache); 1839 } 1840 1841 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) 1842 { 1843 if (kvm_xen_timer_enabled(vcpu)) 1844 kvm_xen_stop_timer(vcpu); 1845 1846 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache); 1847 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); 1848 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache); 1849 1850 del_timer_sync(&vcpu->arch.xen.poll_timer); 1851 } 1852 1853 void kvm_xen_init_vm(struct kvm *kvm) 1854 { 1855 idr_init(&kvm->arch.xen.evtchn_ports); 1856 kvm_gpc_init(&kvm->arch.xen.shinfo_cache); 1857 } 1858 1859 void kvm_xen_destroy_vm(struct kvm *kvm) 1860 { 1861 struct evtchnfd *evtchnfd; 1862 int i; 1863 1864 kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache); 1865 1866 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 1867 if (!evtchnfd->deliver.port.port) 1868 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1869 kfree(evtchnfd); 1870 } 1871 idr_destroy(&kvm->arch.xen.evtchn_ports); 1872 1873 if (kvm->arch.xen_hvm_config.msr) 1874 static_branch_slow_dec_deferred(&kvm_xen_enabled); 1875 } 1876