1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 * 6 * KVM Xen emulation 7 */ 8 9 #include "x86.h" 10 #include "xen.h" 11 #include "hyperv.h" 12 #include "lapic.h" 13 14 #include <linux/eventfd.h> 15 #include <linux/kvm_host.h> 16 #include <linux/sched/stat.h> 17 18 #include <trace/events/kvm.h> 19 #include <xen/interface/xen.h> 20 #include <xen/interface/vcpu.h> 21 #include <xen/interface/version.h> 22 #include <xen/interface/event_channel.h> 23 #include <xen/interface/sched.h> 24 25 #include "trace.h" 26 27 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm); 28 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 29 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); 30 31 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); 32 33 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) 34 { 35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 36 struct pvclock_wall_clock *wc; 37 gpa_t gpa = gfn_to_gpa(gfn); 38 u32 *wc_sec_hi; 39 u32 wc_version; 40 u64 wall_nsec; 41 int ret = 0; 42 int idx = srcu_read_lock(&kvm->srcu); 43 44 if (gfn == GPA_INVALID) { 45 kvm_gpc_deactivate(gpc); 46 goto out; 47 } 48 49 do { 50 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); 51 if (ret) 52 goto out; 53 54 /* 55 * This code mirrors kvm_write_wall_clock() except that it writes 56 * directly through the pfn cache and doesn't mark the page dirty. 57 */ 58 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 59 60 /* It could be invalid again already, so we need to check */ 61 read_lock_irq(&gpc->lock); 62 63 if (gpc->valid) 64 break; 65 66 read_unlock_irq(&gpc->lock); 67 } while (1); 68 69 /* Paranoia checks on the 32-bit struct layout */ 70 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); 71 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); 72 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); 73 74 #ifdef CONFIG_X86_64 75 /* Paranoia checks on the 64-bit struct layout */ 76 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); 77 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); 78 79 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 80 struct shared_info *shinfo = gpc->khva; 81 82 wc_sec_hi = &shinfo->wc_sec_hi; 83 wc = &shinfo->wc; 84 } else 85 #endif 86 { 87 struct compat_shared_info *shinfo = gpc->khva; 88 89 wc_sec_hi = &shinfo->arch.wc_sec_hi; 90 wc = &shinfo->wc; 91 } 92 93 /* Increment and ensure an odd value */ 94 wc_version = wc->version = (wc->version + 1) | 1; 95 smp_wmb(); 96 97 wc->nsec = do_div(wall_nsec, 1000000000); 98 wc->sec = (u32)wall_nsec; 99 *wc_sec_hi = wall_nsec >> 32; 100 smp_wmb(); 101 102 wc->version = wc_version + 1; 103 read_unlock_irq(&gpc->lock); 104 105 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); 106 107 out: 108 srcu_read_unlock(&kvm->srcu, idx); 109 return ret; 110 } 111 112 void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu) 113 { 114 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { 115 struct kvm_xen_evtchn e; 116 117 e.vcpu_id = vcpu->vcpu_id; 118 e.vcpu_idx = vcpu->vcpu_idx; 119 e.port = vcpu->arch.xen.timer_virq; 120 e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 121 122 kvm_xen_set_evtchn(&e, vcpu->kvm); 123 124 vcpu->arch.xen.timer_expires = 0; 125 atomic_set(&vcpu->arch.xen.timer_pending, 0); 126 } 127 } 128 129 static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer) 130 { 131 struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu, 132 arch.xen.timer); 133 if (atomic_read(&vcpu->arch.xen.timer_pending)) 134 return HRTIMER_NORESTART; 135 136 atomic_inc(&vcpu->arch.xen.timer_pending); 137 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 138 kvm_vcpu_kick(vcpu); 139 140 return HRTIMER_NORESTART; 141 } 142 143 static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns) 144 { 145 atomic_set(&vcpu->arch.xen.timer_pending, 0); 146 vcpu->arch.xen.timer_expires = guest_abs; 147 148 if (delta_ns <= 0) { 149 xen_timer_callback(&vcpu->arch.xen.timer); 150 } else { 151 ktime_t ktime_now = ktime_get(); 152 hrtimer_start(&vcpu->arch.xen.timer, 153 ktime_add_ns(ktime_now, delta_ns), 154 HRTIMER_MODE_ABS_HARD); 155 } 156 } 157 158 static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu) 159 { 160 hrtimer_cancel(&vcpu->arch.xen.timer); 161 vcpu->arch.xen.timer_expires = 0; 162 atomic_set(&vcpu->arch.xen.timer_pending, 0); 163 } 164 165 static void kvm_xen_init_timer(struct kvm_vcpu *vcpu) 166 { 167 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, 168 HRTIMER_MODE_ABS_HARD); 169 vcpu->arch.xen.timer.function = xen_timer_callback; 170 } 171 172 static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) 173 { 174 struct kvm_vcpu_xen *vx = &v->arch.xen; 175 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache; 176 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; 177 size_t user_len, user_len1, user_len2; 178 struct vcpu_runstate_info rs; 179 unsigned long flags; 180 size_t times_ofs; 181 uint8_t *update_bit = NULL; 182 uint64_t entry_time; 183 uint64_t *rs_times; 184 int *rs_state; 185 186 /* 187 * The only difference between 32-bit and 64-bit versions of the 188 * runstate struct is the alignment of uint64_t in 32-bit, which 189 * means that the 64-bit version has an additional 4 bytes of 190 * padding after the first field 'state'. Let's be really really 191 * paranoid about that, and matching it with our internal data 192 * structures that we memcpy into it... 193 */ 194 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0); 195 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0); 196 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); 197 #ifdef CONFIG_X86_64 198 /* 199 * The 64-bit structure has 4 bytes of padding before 'state_entry_time' 200 * so each subsequent field is shifted by 4, and it's 4 bytes longer. 201 */ 202 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 203 offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); 204 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != 205 offsetof(struct compat_vcpu_runstate_info, time) + 4); 206 BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4); 207 #endif 208 /* 209 * The state field is in the same place at the start of both structs, 210 * and is the same size (int) as vx->current_runstate. 211 */ 212 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 213 offsetof(struct compat_vcpu_runstate_info, state)); 214 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) != 215 sizeof(vx->current_runstate)); 216 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) != 217 sizeof(vx->current_runstate)); 218 219 /* 220 * The state_entry_time field is 64 bits in both versions, and the 221 * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86 222 * is little-endian means that it's in the last *byte* of the word. 223 * That detail is important later. 224 */ 225 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != 226 sizeof(uint64_t)); 227 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != 228 sizeof(uint64_t)); 229 BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80); 230 231 /* 232 * The time array is four 64-bit quantities in both versions, matching 233 * the vx->runstate_times and immediately following state_entry_time. 234 */ 235 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != 236 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t)); 237 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != 238 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t)); 239 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 240 sizeof_field(struct compat_vcpu_runstate_info, time)); 241 BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != 242 sizeof(vx->runstate_times)); 243 244 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { 245 user_len = sizeof(struct vcpu_runstate_info); 246 times_ofs = offsetof(struct vcpu_runstate_info, 247 state_entry_time); 248 } else { 249 user_len = sizeof(struct compat_vcpu_runstate_info); 250 times_ofs = offsetof(struct compat_vcpu_runstate_info, 251 state_entry_time); 252 } 253 254 /* 255 * There are basically no alignment constraints. The guest can set it 256 * up so it crosses from one page to the next, and at arbitrary byte 257 * alignment (and the 32-bit ABI doesn't align the 64-bit integers 258 * anyway, even if the overall struct had been 64-bit aligned). 259 */ 260 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { 261 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); 262 user_len2 = user_len - user_len1; 263 } else { 264 user_len1 = user_len; 265 user_len2 = 0; 266 } 267 BUG_ON(user_len1 + user_len2 != user_len); 268 269 retry: 270 /* 271 * Attempt to obtain the GPC lock on *both* (if there are two) 272 * gfn_to_pfn caches that cover the region. 273 */ 274 read_lock_irqsave(&gpc1->lock, flags); 275 while (!kvm_gpc_check(gpc1, user_len1)) { 276 read_unlock_irqrestore(&gpc1->lock, flags); 277 278 /* When invoked from kvm_sched_out() we cannot sleep */ 279 if (atomic) 280 return; 281 282 if (kvm_gpc_refresh(gpc1, user_len1)) 283 return; 284 285 read_lock_irqsave(&gpc1->lock, flags); 286 } 287 288 if (likely(!user_len2)) { 289 /* 290 * Set up three pointers directly to the runstate_info 291 * struct in the guest (via the GPC). 292 * 293 * • @rs_state → state field 294 * • @rs_times → state_entry_time field. 295 * • @update_bit → last byte of state_entry_time, which 296 * contains the XEN_RUNSTATE_UPDATE bit. 297 */ 298 rs_state = gpc1->khva; 299 rs_times = gpc1->khva + times_ofs; 300 if (v->kvm->arch.xen.runstate_update_flag) 301 update_bit = ((void *)(&rs_times[1])) - 1; 302 } else { 303 /* 304 * The guest's runstate_info is split across two pages and we 305 * need to hold and validate both GPCs simultaneously. We can 306 * declare a lock ordering GPC1 > GPC2 because nothing else 307 * takes them more than one at a time. 308 */ 309 read_lock(&gpc2->lock); 310 311 if (!kvm_gpc_check(gpc2, user_len2)) { 312 read_unlock(&gpc2->lock); 313 read_unlock_irqrestore(&gpc1->lock, flags); 314 315 /* When invoked from kvm_sched_out() we cannot sleep */ 316 if (atomic) 317 return; 318 319 /* 320 * Use kvm_gpc_activate() here because if the runstate 321 * area was configured in 32-bit mode and only extends 322 * to the second page now because the guest changed to 323 * 64-bit mode, the second GPC won't have been set up. 324 */ 325 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, 326 user_len2)) 327 return; 328 329 /* 330 * We dropped the lock on GPC1 so we have to go all the 331 * way back and revalidate that too. 332 */ 333 goto retry; 334 } 335 336 /* 337 * In this case, the runstate_info struct will be assembled on 338 * the kernel stack (compat or not as appropriate) and will 339 * be copied to GPC1/GPC2 with a dual memcpy. Set up the three 340 * rs pointers accordingly. 341 */ 342 rs_times = &rs.state_entry_time; 343 344 /* 345 * The rs_state pointer points to the start of what we'll 346 * copy to the guest, which in the case of a compat guest 347 * is the 32-bit field that the compiler thinks is padding. 348 */ 349 rs_state = ((void *)rs_times) - times_ofs; 350 351 /* 352 * The update_bit is still directly in the guest memory, 353 * via one GPC or the other. 354 */ 355 if (v->kvm->arch.xen.runstate_update_flag) { 356 if (user_len1 >= times_ofs + sizeof(uint64_t)) 357 update_bit = gpc1->khva + times_ofs + 358 sizeof(uint64_t) - 1; 359 else 360 update_bit = gpc2->khva + times_ofs + 361 sizeof(uint64_t) - 1 - user_len1; 362 } 363 364 #ifdef CONFIG_X86_64 365 /* 366 * Don't leak kernel memory through the padding in the 64-bit 367 * version of the struct. 368 */ 369 memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time)); 370 #endif 371 } 372 373 /* 374 * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the 375 * state_entry_time field, directly in the guest. We need to set 376 * that (and write-barrier) before writing to the rest of the 377 * structure, and clear it last. Just as Xen does, we address the 378 * single *byte* in which it resides because it might be in a 379 * different cache line to the rest of the 64-bit word, due to 380 * the (lack of) alignment constraints. 381 */ 382 entry_time = vx->runstate_entry_time; 383 if (update_bit) { 384 entry_time |= XEN_RUNSTATE_UPDATE; 385 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56; 386 smp_wmb(); 387 } 388 389 /* 390 * Now assemble the actual structure, either on our kernel stack 391 * or directly in the guest according to how the rs_state and 392 * rs_times pointers were set up above. 393 */ 394 *rs_state = vx->current_runstate; 395 rs_times[0] = entry_time; 396 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); 397 398 /* For the split case, we have to then copy it to the guest. */ 399 if (user_len2) { 400 memcpy(gpc1->khva, rs_state, user_len1); 401 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2); 402 } 403 smp_wmb(); 404 405 /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */ 406 if (update_bit) { 407 entry_time &= ~XEN_RUNSTATE_UPDATE; 408 *update_bit = entry_time >> 56; 409 smp_wmb(); 410 } 411 412 if (user_len2) 413 read_unlock(&gpc2->lock); 414 415 read_unlock_irqrestore(&gpc1->lock, flags); 416 417 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); 418 if (user_len2) 419 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); 420 } 421 422 void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) 423 { 424 struct kvm_vcpu_xen *vx = &v->arch.xen; 425 u64 now = get_kvmclock_ns(v->kvm); 426 u64 delta_ns = now - vx->runstate_entry_time; 427 u64 run_delay = current->sched_info.run_delay; 428 429 if (unlikely(!vx->runstate_entry_time)) 430 vx->current_runstate = RUNSTATE_offline; 431 432 /* 433 * Time waiting for the scheduler isn't "stolen" if the 434 * vCPU wasn't running anyway. 435 */ 436 if (vx->current_runstate == RUNSTATE_running) { 437 u64 steal_ns = run_delay - vx->last_steal; 438 439 delta_ns -= steal_ns; 440 441 vx->runstate_times[RUNSTATE_runnable] += steal_ns; 442 } 443 vx->last_steal = run_delay; 444 445 vx->runstate_times[vx->current_runstate] += delta_ns; 446 vx->current_runstate = state; 447 vx->runstate_entry_time = now; 448 449 if (vx->runstate_cache.active) 450 kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable); 451 } 452 453 static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) 454 { 455 struct kvm_lapic_irq irq = { }; 456 int r; 457 458 irq.dest_id = v->vcpu_id; 459 irq.vector = v->arch.xen.upcall_vector; 460 irq.dest_mode = APIC_DEST_PHYSICAL; 461 irq.shorthand = APIC_DEST_NOSHORT; 462 irq.delivery_mode = APIC_DM_FIXED; 463 irq.level = 1; 464 465 /* The fast version will always work for physical unicast */ 466 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); 467 } 468 469 /* 470 * On event channel delivery, the vcpu_info may not have been accessible. 471 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which 472 * need to be marked into the vcpu_info (and evtchn_upcall_pending set). 473 * Do so now that we can sleep in the context of the vCPU to bring the 474 * page in, and refresh the pfn cache for it. 475 */ 476 void kvm_xen_inject_pending_events(struct kvm_vcpu *v) 477 { 478 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); 479 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 480 unsigned long flags; 481 482 if (!evtchn_pending_sel) 483 return; 484 485 /* 486 * Yes, this is an open-coded loop. But that's just what put_user() 487 * does anyway. Page it in and retry the instruction. We're just a 488 * little more honest about it. 489 */ 490 read_lock_irqsave(&gpc->lock, flags); 491 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 492 read_unlock_irqrestore(&gpc->lock, flags); 493 494 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) 495 return; 496 497 read_lock_irqsave(&gpc->lock, flags); 498 } 499 500 /* Now gpc->khva is a valid kernel address for the vcpu_info */ 501 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { 502 struct vcpu_info *vi = gpc->khva; 503 504 asm volatile(LOCK_PREFIX "orq %0, %1\n" 505 "notq %0\n" 506 LOCK_PREFIX "andq %0, %2\n" 507 : "=r" (evtchn_pending_sel), 508 "+m" (vi->evtchn_pending_sel), 509 "+m" (v->arch.xen.evtchn_pending_sel) 510 : "0" (evtchn_pending_sel)); 511 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 512 } else { 513 u32 evtchn_pending_sel32 = evtchn_pending_sel; 514 struct compat_vcpu_info *vi = gpc->khva; 515 516 asm volatile(LOCK_PREFIX "orl %0, %1\n" 517 "notl %0\n" 518 LOCK_PREFIX "andl %0, %2\n" 519 : "=r" (evtchn_pending_sel32), 520 "+m" (vi->evtchn_pending_sel), 521 "+m" (v->arch.xen.evtchn_pending_sel) 522 : "0" (evtchn_pending_sel32)); 523 WRITE_ONCE(vi->evtchn_upcall_pending, 1); 524 } 525 read_unlock_irqrestore(&gpc->lock, flags); 526 527 /* For the per-vCPU lapic vector, deliver it as MSI. */ 528 if (v->arch.xen.upcall_vector) 529 kvm_xen_inject_vcpu_vector(v); 530 531 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 532 } 533 534 int __kvm_xen_has_interrupt(struct kvm_vcpu *v) 535 { 536 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; 537 unsigned long flags; 538 u8 rc = 0; 539 540 /* 541 * If the global upcall vector (HVMIRQ_callback_vector) is set and 542 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. 543 */ 544 545 /* No need for compat handling here */ 546 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != 547 offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); 548 BUILD_BUG_ON(sizeof(rc) != 549 sizeof_field(struct vcpu_info, evtchn_upcall_pending)); 550 BUILD_BUG_ON(sizeof(rc) != 551 sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); 552 553 read_lock_irqsave(&gpc->lock, flags); 554 while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 555 read_unlock_irqrestore(&gpc->lock, flags); 556 557 /* 558 * This function gets called from kvm_vcpu_block() after setting the 559 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately 560 * from a HLT. So we really mustn't sleep. If the page ended up absent 561 * at that point, just return 1 in order to trigger an immediate wake, 562 * and we'll end up getting called again from a context where we *can* 563 * fault in the page and wait for it. 564 */ 565 if (in_atomic() || !task_is_running(current)) 566 return 1; 567 568 if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) { 569 /* 570 * If this failed, userspace has screwed up the 571 * vcpu_info mapping. No interrupts for you. 572 */ 573 return 0; 574 } 575 read_lock_irqsave(&gpc->lock, flags); 576 } 577 578 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; 579 read_unlock_irqrestore(&gpc->lock, flags); 580 return rc; 581 } 582 583 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 584 { 585 int r = -ENOENT; 586 587 588 switch (data->type) { 589 case KVM_XEN_ATTR_TYPE_LONG_MODE: 590 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { 591 r = -EINVAL; 592 } else { 593 mutex_lock(&kvm->lock); 594 kvm->arch.xen.long_mode = !!data->u.long_mode; 595 mutex_unlock(&kvm->lock); 596 r = 0; 597 } 598 break; 599 600 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 601 mutex_lock(&kvm->lock); 602 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); 603 mutex_unlock(&kvm->lock); 604 break; 605 606 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 607 if (data->u.vector && data->u.vector < 0x10) 608 r = -EINVAL; 609 else { 610 mutex_lock(&kvm->lock); 611 kvm->arch.xen.upcall_vector = data->u.vector; 612 mutex_unlock(&kvm->lock); 613 r = 0; 614 } 615 break; 616 617 case KVM_XEN_ATTR_TYPE_EVTCHN: 618 r = kvm_xen_setattr_evtchn(kvm, data); 619 break; 620 621 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 622 mutex_lock(&kvm->lock); 623 kvm->arch.xen.xen_version = data->u.xen_version; 624 mutex_unlock(&kvm->lock); 625 r = 0; 626 break; 627 628 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: 629 if (!sched_info_on()) { 630 r = -EOPNOTSUPP; 631 break; 632 } 633 mutex_lock(&kvm->lock); 634 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; 635 mutex_unlock(&kvm->lock); 636 r = 0; 637 break; 638 639 default: 640 break; 641 } 642 643 return r; 644 } 645 646 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 647 { 648 int r = -ENOENT; 649 650 mutex_lock(&kvm->lock); 651 652 switch (data->type) { 653 case KVM_XEN_ATTR_TYPE_LONG_MODE: 654 data->u.long_mode = kvm->arch.xen.long_mode; 655 r = 0; 656 break; 657 658 case KVM_XEN_ATTR_TYPE_SHARED_INFO: 659 if (kvm->arch.xen.shinfo_cache.active) 660 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); 661 else 662 data->u.shared_info.gfn = GPA_INVALID; 663 r = 0; 664 break; 665 666 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: 667 data->u.vector = kvm->arch.xen.upcall_vector; 668 r = 0; 669 break; 670 671 case KVM_XEN_ATTR_TYPE_XEN_VERSION: 672 data->u.xen_version = kvm->arch.xen.xen_version; 673 r = 0; 674 break; 675 676 case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG: 677 if (!sched_info_on()) { 678 r = -EOPNOTSUPP; 679 break; 680 } 681 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; 682 r = 0; 683 break; 684 685 default: 686 break; 687 } 688 689 mutex_unlock(&kvm->lock); 690 return r; 691 } 692 693 int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 694 { 695 int idx, r = -ENOENT; 696 697 mutex_lock(&vcpu->kvm->lock); 698 idx = srcu_read_lock(&vcpu->kvm->srcu); 699 700 switch (data->type) { 701 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 702 /* No compat necessary here. */ 703 BUILD_BUG_ON(sizeof(struct vcpu_info) != 704 sizeof(struct compat_vcpu_info)); 705 BUILD_BUG_ON(offsetof(struct vcpu_info, time) != 706 offsetof(struct compat_vcpu_info, time)); 707 708 if (data->u.gpa == GPA_INVALID) { 709 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 710 r = 0; 711 break; 712 } 713 714 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, 715 data->u.gpa, sizeof(struct vcpu_info)); 716 if (!r) 717 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 718 719 break; 720 721 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 722 if (data->u.gpa == GPA_INVALID) { 723 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 724 r = 0; 725 break; 726 } 727 728 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, 729 data->u.gpa, 730 sizeof(struct pvclock_vcpu_time_info)); 731 if (!r) 732 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 733 break; 734 735 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: { 736 size_t sz, sz1, sz2; 737 738 if (!sched_info_on()) { 739 r = -EOPNOTSUPP; 740 break; 741 } 742 if (data->u.gpa == GPA_INVALID) { 743 r = 0; 744 deactivate_out: 745 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); 746 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 747 break; 748 } 749 750 /* 751 * If the guest switches to 64-bit mode after setting the runstate 752 * address, that's actually OK. kvm_xen_update_runstate_guest() 753 * will cope. 754 */ 755 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) 756 sz = sizeof(struct vcpu_runstate_info); 757 else 758 sz = sizeof(struct compat_vcpu_runstate_info); 759 760 /* How much fits in the (first) page? */ 761 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); 762 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, 763 data->u.gpa, sz1); 764 if (r) 765 goto deactivate_out; 766 767 /* Either map the second page, or deactivate the second GPC */ 768 if (sz1 >= sz) { 769 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 770 } else { 771 sz2 = sz - sz1; 772 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); 773 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, 774 data->u.gpa + sz1, sz2); 775 if (r) 776 goto deactivate_out; 777 } 778 779 kvm_xen_update_runstate_guest(vcpu, false); 780 break; 781 } 782 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 783 if (!sched_info_on()) { 784 r = -EOPNOTSUPP; 785 break; 786 } 787 if (data->u.runstate.state > RUNSTATE_offline) { 788 r = -EINVAL; 789 break; 790 } 791 792 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 793 r = 0; 794 break; 795 796 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 797 if (!sched_info_on()) { 798 r = -EOPNOTSUPP; 799 break; 800 } 801 if (data->u.runstate.state > RUNSTATE_offline) { 802 r = -EINVAL; 803 break; 804 } 805 if (data->u.runstate.state_entry_time != 806 (data->u.runstate.time_running + 807 data->u.runstate.time_runnable + 808 data->u.runstate.time_blocked + 809 data->u.runstate.time_offline)) { 810 r = -EINVAL; 811 break; 812 } 813 if (get_kvmclock_ns(vcpu->kvm) < 814 data->u.runstate.state_entry_time) { 815 r = -EINVAL; 816 break; 817 } 818 819 vcpu->arch.xen.current_runstate = data->u.runstate.state; 820 vcpu->arch.xen.runstate_entry_time = 821 data->u.runstate.state_entry_time; 822 vcpu->arch.xen.runstate_times[RUNSTATE_running] = 823 data->u.runstate.time_running; 824 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = 825 data->u.runstate.time_runnable; 826 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = 827 data->u.runstate.time_blocked; 828 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = 829 data->u.runstate.time_offline; 830 vcpu->arch.xen.last_steal = current->sched_info.run_delay; 831 r = 0; 832 break; 833 834 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 835 if (!sched_info_on()) { 836 r = -EOPNOTSUPP; 837 break; 838 } 839 if (data->u.runstate.state > RUNSTATE_offline && 840 data->u.runstate.state != (u64)-1) { 841 r = -EINVAL; 842 break; 843 } 844 /* The adjustment must add up */ 845 if (data->u.runstate.state_entry_time != 846 (data->u.runstate.time_running + 847 data->u.runstate.time_runnable + 848 data->u.runstate.time_blocked + 849 data->u.runstate.time_offline)) { 850 r = -EINVAL; 851 break; 852 } 853 854 if (get_kvmclock_ns(vcpu->kvm) < 855 (vcpu->arch.xen.runstate_entry_time + 856 data->u.runstate.state_entry_time)) { 857 r = -EINVAL; 858 break; 859 } 860 861 vcpu->arch.xen.runstate_entry_time += 862 data->u.runstate.state_entry_time; 863 vcpu->arch.xen.runstate_times[RUNSTATE_running] += 864 data->u.runstate.time_running; 865 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += 866 data->u.runstate.time_runnable; 867 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += 868 data->u.runstate.time_blocked; 869 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += 870 data->u.runstate.time_offline; 871 872 if (data->u.runstate.state <= RUNSTATE_offline) 873 kvm_xen_update_runstate(vcpu, data->u.runstate.state); 874 else if (vcpu->arch.xen.runstate_cache.active) 875 kvm_xen_update_runstate_guest(vcpu, false); 876 r = 0; 877 break; 878 879 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 880 if (data->u.vcpu_id >= KVM_MAX_VCPUS) 881 r = -EINVAL; 882 else { 883 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; 884 r = 0; 885 } 886 break; 887 888 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 889 if (data->u.timer.port && 890 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { 891 r = -EINVAL; 892 break; 893 } 894 895 if (!vcpu->arch.xen.timer.function) 896 kvm_xen_init_timer(vcpu); 897 898 /* Stop the timer (if it's running) before changing the vector */ 899 kvm_xen_stop_timer(vcpu); 900 vcpu->arch.xen.timer_virq = data->u.timer.port; 901 902 /* Start the timer if the new value has a valid vector+expiry. */ 903 if (data->u.timer.port && data->u.timer.expires_ns) 904 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, 905 data->u.timer.expires_ns - 906 get_kvmclock_ns(vcpu->kvm)); 907 908 r = 0; 909 break; 910 911 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 912 if (data->u.vector && data->u.vector < 0x10) 913 r = -EINVAL; 914 else { 915 vcpu->arch.xen.upcall_vector = data->u.vector; 916 r = 0; 917 } 918 break; 919 920 default: 921 break; 922 } 923 924 srcu_read_unlock(&vcpu->kvm->srcu, idx); 925 mutex_unlock(&vcpu->kvm->lock); 926 return r; 927 } 928 929 int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) 930 { 931 int r = -ENOENT; 932 933 mutex_lock(&vcpu->kvm->lock); 934 935 switch (data->type) { 936 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: 937 if (vcpu->arch.xen.vcpu_info_cache.active) 938 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; 939 else 940 data->u.gpa = GPA_INVALID; 941 r = 0; 942 break; 943 944 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: 945 if (vcpu->arch.xen.vcpu_time_info_cache.active) 946 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; 947 else 948 data->u.gpa = GPA_INVALID; 949 r = 0; 950 break; 951 952 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: 953 if (!sched_info_on()) { 954 r = -EOPNOTSUPP; 955 break; 956 } 957 if (vcpu->arch.xen.runstate_cache.active) { 958 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; 959 r = 0; 960 } 961 break; 962 963 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT: 964 if (!sched_info_on()) { 965 r = -EOPNOTSUPP; 966 break; 967 } 968 data->u.runstate.state = vcpu->arch.xen.current_runstate; 969 r = 0; 970 break; 971 972 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA: 973 if (!sched_info_on()) { 974 r = -EOPNOTSUPP; 975 break; 976 } 977 data->u.runstate.state = vcpu->arch.xen.current_runstate; 978 data->u.runstate.state_entry_time = 979 vcpu->arch.xen.runstate_entry_time; 980 data->u.runstate.time_running = 981 vcpu->arch.xen.runstate_times[RUNSTATE_running]; 982 data->u.runstate.time_runnable = 983 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; 984 data->u.runstate.time_blocked = 985 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; 986 data->u.runstate.time_offline = 987 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; 988 r = 0; 989 break; 990 991 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST: 992 r = -EINVAL; 993 break; 994 995 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID: 996 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; 997 r = 0; 998 break; 999 1000 case KVM_XEN_VCPU_ATTR_TYPE_TIMER: 1001 data->u.timer.port = vcpu->arch.xen.timer_virq; 1002 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; 1003 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; 1004 r = 0; 1005 break; 1006 1007 case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR: 1008 data->u.vector = vcpu->arch.xen.upcall_vector; 1009 r = 0; 1010 break; 1011 1012 default: 1013 break; 1014 } 1015 1016 mutex_unlock(&vcpu->kvm->lock); 1017 return r; 1018 } 1019 1020 int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 1021 { 1022 struct kvm *kvm = vcpu->kvm; 1023 u32 page_num = data & ~PAGE_MASK; 1024 u64 page_addr = data & PAGE_MASK; 1025 bool lm = is_long_mode(vcpu); 1026 1027 /* Latch long_mode for shared_info pages etc. */ 1028 vcpu->kvm->arch.xen.long_mode = lm; 1029 1030 /* 1031 * If Xen hypercall intercept is enabled, fill the hypercall 1032 * page with VMCALL/VMMCALL instructions since that's what 1033 * we catch. Else the VMM has provided the hypercall pages 1034 * with instructions of its own choosing, so use those. 1035 */ 1036 if (kvm_xen_hypercall_enabled(kvm)) { 1037 u8 instructions[32]; 1038 int i; 1039 1040 if (page_num) 1041 return 1; 1042 1043 /* mov imm32, %eax */ 1044 instructions[0] = 0xb8; 1045 1046 /* vmcall / vmmcall */ 1047 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5); 1048 1049 /* ret */ 1050 instructions[8] = 0xc3; 1051 1052 /* int3 to pad */ 1053 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); 1054 1055 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { 1056 *(u32 *)&instructions[1] = i; 1057 if (kvm_vcpu_write_guest(vcpu, 1058 page_addr + (i * sizeof(instructions)), 1059 instructions, sizeof(instructions))) 1060 return 1; 1061 } 1062 } else { 1063 /* 1064 * Note, truncation is a non-issue as 'lm' is guaranteed to be 1065 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. 1066 */ 1067 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 1068 : kvm->arch.xen_hvm_config.blob_addr_32; 1069 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 1070 : kvm->arch.xen_hvm_config.blob_size_32; 1071 u8 *page; 1072 1073 if (page_num >= blob_size) 1074 return 1; 1075 1076 blob_addr += page_num * PAGE_SIZE; 1077 1078 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); 1079 if (IS_ERR(page)) 1080 return PTR_ERR(page); 1081 1082 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { 1083 kfree(page); 1084 return 1; 1085 } 1086 } 1087 return 0; 1088 } 1089 1090 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) 1091 { 1092 /* Only some feature flags need to be *enabled* by userspace */ 1093 u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 1094 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 1095 1096 if (xhc->flags & ~permitted_flags) 1097 return -EINVAL; 1098 1099 /* 1100 * With hypercall interception the kernel generates its own 1101 * hypercall page so it must not be provided. 1102 */ 1103 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && 1104 (xhc->blob_addr_32 || xhc->blob_addr_64 || 1105 xhc->blob_size_32 || xhc->blob_size_64)) 1106 return -EINVAL; 1107 1108 mutex_lock(&kvm->lock); 1109 1110 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) 1111 static_branch_inc(&kvm_xen_enabled.key); 1112 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) 1113 static_branch_slow_dec_deferred(&kvm_xen_enabled); 1114 1115 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); 1116 1117 mutex_unlock(&kvm->lock); 1118 return 0; 1119 } 1120 1121 static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) 1122 { 1123 kvm_rax_write(vcpu, result); 1124 return kvm_skip_emulated_instruction(vcpu); 1125 } 1126 1127 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1128 { 1129 struct kvm_run *run = vcpu->run; 1130 1131 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) 1132 return 1; 1133 1134 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); 1135 } 1136 1137 static inline int max_evtchn_port(struct kvm *kvm) 1138 { 1139 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) 1140 return EVTCHN_2L_NR_CHANNELS; 1141 else 1142 return COMPAT_EVTCHN_2L_NR_CHANNELS; 1143 } 1144 1145 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, 1146 evtchn_port_t *ports) 1147 { 1148 struct kvm *kvm = vcpu->kvm; 1149 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1150 unsigned long *pending_bits; 1151 unsigned long flags; 1152 bool ret = true; 1153 int idx, i; 1154 1155 idx = srcu_read_lock(&kvm->srcu); 1156 read_lock_irqsave(&gpc->lock, flags); 1157 if (!kvm_gpc_check(gpc, PAGE_SIZE)) 1158 goto out_rcu; 1159 1160 ret = false; 1161 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1162 struct shared_info *shinfo = gpc->khva; 1163 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1164 } else { 1165 struct compat_shared_info *shinfo = gpc->khva; 1166 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1167 } 1168 1169 for (i = 0; i < nr_ports; i++) { 1170 if (test_bit(ports[i], pending_bits)) { 1171 ret = true; 1172 break; 1173 } 1174 } 1175 1176 out_rcu: 1177 read_unlock_irqrestore(&gpc->lock, flags); 1178 srcu_read_unlock(&kvm->srcu, idx); 1179 1180 return ret; 1181 } 1182 1183 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, 1184 u64 param, u64 *r) 1185 { 1186 int idx, i; 1187 struct sched_poll sched_poll; 1188 evtchn_port_t port, *ports; 1189 gpa_t gpa; 1190 1191 if (!lapic_in_kernel(vcpu) || 1192 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) 1193 return false; 1194 1195 idx = srcu_read_lock(&vcpu->kvm->srcu); 1196 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 1197 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1198 if (!gpa) { 1199 *r = -EFAULT; 1200 return true; 1201 } 1202 1203 if (IS_ENABLED(CONFIG_64BIT) && !longmode) { 1204 struct compat_sched_poll sp32; 1205 1206 /* Sanity check that the compat struct definition is correct */ 1207 BUILD_BUG_ON(sizeof(sp32) != 16); 1208 1209 if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) { 1210 *r = -EFAULT; 1211 return true; 1212 } 1213 1214 /* 1215 * This is a 32-bit pointer to an array of evtchn_port_t which 1216 * are uint32_t, so once it's converted no further compat 1217 * handling is needed. 1218 */ 1219 sched_poll.ports = (void *)(unsigned long)(sp32.ports); 1220 sched_poll.nr_ports = sp32.nr_ports; 1221 sched_poll.timeout = sp32.timeout; 1222 } else { 1223 if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, 1224 sizeof(sched_poll))) { 1225 *r = -EFAULT; 1226 return true; 1227 } 1228 } 1229 1230 if (unlikely(sched_poll.nr_ports > 1)) { 1231 /* Xen (unofficially) limits number of pollers to 128 */ 1232 if (sched_poll.nr_ports > 128) { 1233 *r = -EINVAL; 1234 return true; 1235 } 1236 1237 ports = kmalloc_array(sched_poll.nr_ports, 1238 sizeof(*ports), GFP_KERNEL); 1239 if (!ports) { 1240 *r = -ENOMEM; 1241 return true; 1242 } 1243 } else 1244 ports = &port; 1245 1246 for (i = 0; i < sched_poll.nr_ports; i++) { 1247 idx = srcu_read_lock(&vcpu->kvm->srcu); 1248 gpa = kvm_mmu_gva_to_gpa_system(vcpu, 1249 (gva_t)(sched_poll.ports + i), 1250 NULL); 1251 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1252 1253 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, 1254 &ports[i], sizeof(port))) { 1255 *r = -EFAULT; 1256 goto out; 1257 } 1258 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { 1259 *r = -EINVAL; 1260 goto out; 1261 } 1262 } 1263 1264 if (sched_poll.nr_ports == 1) 1265 vcpu->arch.xen.poll_evtchn = port; 1266 else 1267 vcpu->arch.xen.poll_evtchn = -1; 1268 1269 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1270 1271 if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) { 1272 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 1273 1274 if (sched_poll.timeout) 1275 mod_timer(&vcpu->arch.xen.poll_timer, 1276 jiffies + nsecs_to_jiffies(sched_poll.timeout)); 1277 1278 kvm_vcpu_halt(vcpu); 1279 1280 if (sched_poll.timeout) 1281 del_timer(&vcpu->arch.xen.poll_timer); 1282 1283 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 1284 } 1285 1286 vcpu->arch.xen.poll_evtchn = 0; 1287 *r = 0; 1288 out: 1289 /* Really, this is only needed in case of timeout */ 1290 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); 1291 1292 if (unlikely(sched_poll.nr_ports > 1)) 1293 kfree(ports); 1294 return true; 1295 } 1296 1297 static void cancel_evtchn_poll(struct timer_list *t) 1298 { 1299 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer); 1300 1301 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1302 kvm_vcpu_kick(vcpu); 1303 } 1304 1305 static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode, 1306 int cmd, u64 param, u64 *r) 1307 { 1308 switch (cmd) { 1309 case SCHEDOP_poll: 1310 if (kvm_xen_schedop_poll(vcpu, longmode, param, r)) 1311 return true; 1312 fallthrough; 1313 case SCHEDOP_yield: 1314 kvm_vcpu_on_spin(vcpu, true); 1315 *r = 0; 1316 return true; 1317 default: 1318 break; 1319 } 1320 1321 return false; 1322 } 1323 1324 struct compat_vcpu_set_singleshot_timer { 1325 uint64_t timeout_abs_ns; 1326 uint32_t flags; 1327 } __attribute__((packed)); 1328 1329 static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, 1330 int vcpu_id, u64 param, u64 *r) 1331 { 1332 struct vcpu_set_singleshot_timer oneshot; 1333 s64 delta; 1334 gpa_t gpa; 1335 int idx; 1336 1337 if (!kvm_xen_timer_enabled(vcpu)) 1338 return false; 1339 1340 switch (cmd) { 1341 case VCPUOP_set_singleshot_timer: 1342 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1343 *r = -EINVAL; 1344 return true; 1345 } 1346 idx = srcu_read_lock(&vcpu->kvm->srcu); 1347 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 1348 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1349 1350 /* 1351 * The only difference for 32-bit compat is the 4 bytes of 1352 * padding after the interesting part of the structure. So 1353 * for a faithful emulation of Xen we have to *try* to copy 1354 * the padding and return -EFAULT if we can't. Otherwise we 1355 * might as well just have copied the 12-byte 32-bit struct. 1356 */ 1357 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1358 offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1359 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) != 1360 sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns)); 1361 BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) != 1362 offsetof(struct vcpu_set_singleshot_timer, flags)); 1363 BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) != 1364 sizeof_field(struct vcpu_set_singleshot_timer, flags)); 1365 1366 if (!gpa || 1367 kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : 1368 sizeof(struct compat_vcpu_set_singleshot_timer))) { 1369 *r = -EFAULT; 1370 return true; 1371 } 1372 1373 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm); 1374 if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) { 1375 *r = -ETIME; 1376 return true; 1377 } 1378 1379 kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta); 1380 *r = 0; 1381 return true; 1382 1383 case VCPUOP_stop_singleshot_timer: 1384 if (vcpu->arch.xen.vcpu_id != vcpu_id) { 1385 *r = -EINVAL; 1386 return true; 1387 } 1388 kvm_xen_stop_timer(vcpu); 1389 *r = 0; 1390 return true; 1391 } 1392 1393 return false; 1394 } 1395 1396 static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, 1397 u64 *r) 1398 { 1399 if (!kvm_xen_timer_enabled(vcpu)) 1400 return false; 1401 1402 if (timeout) { 1403 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm); 1404 int64_t delta = timeout - guest_now; 1405 1406 /* Xen has a 'Linux workaround' in do_set_timer_op() which 1407 * checks for negative absolute timeout values (caused by 1408 * integer overflow), and for values about 13 days in the 1409 * future (2^50ns) which would be caused by jiffies 1410 * overflow. For those cases, it sets the timeout 100ms in 1411 * the future (not *too* soon, since if a guest really did 1412 * set a long timeout on purpose we don't want to keep 1413 * churning CPU time by waking it up). 1414 */ 1415 if (unlikely((int64_t)timeout < 0 || 1416 (delta > 0 && (uint32_t) (delta >> 50) != 0))) { 1417 delta = 100 * NSEC_PER_MSEC; 1418 timeout = guest_now + delta; 1419 } 1420 1421 kvm_xen_start_timer(vcpu, timeout, delta); 1422 } else { 1423 kvm_xen_stop_timer(vcpu); 1424 } 1425 1426 *r = 0; 1427 return true; 1428 } 1429 1430 int kvm_xen_hypercall(struct kvm_vcpu *vcpu) 1431 { 1432 bool longmode; 1433 u64 input, params[6], r = -ENOSYS; 1434 bool handled = false; 1435 u8 cpl; 1436 1437 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); 1438 1439 /* Hyper-V hypercalls get bit 31 set in EAX */ 1440 if ((input & 0x80000000) && 1441 kvm_hv_hypercall_enabled(vcpu)) 1442 return kvm_hv_hypercall(vcpu); 1443 1444 longmode = is_64_bit_hypercall(vcpu); 1445 if (!longmode) { 1446 params[0] = (u32)kvm_rbx_read(vcpu); 1447 params[1] = (u32)kvm_rcx_read(vcpu); 1448 params[2] = (u32)kvm_rdx_read(vcpu); 1449 params[3] = (u32)kvm_rsi_read(vcpu); 1450 params[4] = (u32)kvm_rdi_read(vcpu); 1451 params[5] = (u32)kvm_rbp_read(vcpu); 1452 } 1453 #ifdef CONFIG_X86_64 1454 else { 1455 params[0] = (u64)kvm_rdi_read(vcpu); 1456 params[1] = (u64)kvm_rsi_read(vcpu); 1457 params[2] = (u64)kvm_rdx_read(vcpu); 1458 params[3] = (u64)kvm_r10_read(vcpu); 1459 params[4] = (u64)kvm_r8_read(vcpu); 1460 params[5] = (u64)kvm_r9_read(vcpu); 1461 } 1462 #endif 1463 cpl = static_call(kvm_x86_get_cpl)(vcpu); 1464 trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2], 1465 params[3], params[4], params[5]); 1466 1467 /* 1468 * Only allow hypercall acceleration for CPL0. The rare hypercalls that 1469 * are permitted in guest userspace can be handled by the VMM. 1470 */ 1471 if (unlikely(cpl > 0)) 1472 goto handle_in_userspace; 1473 1474 switch (input) { 1475 case __HYPERVISOR_xen_version: 1476 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { 1477 r = vcpu->kvm->arch.xen.xen_version; 1478 handled = true; 1479 } 1480 break; 1481 case __HYPERVISOR_event_channel_op: 1482 if (params[0] == EVTCHNOP_send) 1483 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); 1484 break; 1485 case __HYPERVISOR_sched_op: 1486 handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0], 1487 params[1], &r); 1488 break; 1489 case __HYPERVISOR_vcpu_op: 1490 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1], 1491 params[2], &r); 1492 break; 1493 case __HYPERVISOR_set_timer_op: { 1494 u64 timeout = params[0]; 1495 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ 1496 if (!longmode) 1497 timeout |= params[1] << 32; 1498 handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r); 1499 break; 1500 } 1501 default: 1502 break; 1503 } 1504 1505 if (handled) 1506 return kvm_xen_hypercall_set_result(vcpu, r); 1507 1508 handle_in_userspace: 1509 vcpu->run->exit_reason = KVM_EXIT_XEN; 1510 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; 1511 vcpu->run->xen.u.hcall.longmode = longmode; 1512 vcpu->run->xen.u.hcall.cpl = cpl; 1513 vcpu->run->xen.u.hcall.input = input; 1514 vcpu->run->xen.u.hcall.params[0] = params[0]; 1515 vcpu->run->xen.u.hcall.params[1] = params[1]; 1516 vcpu->run->xen.u.hcall.params[2] = params[2]; 1517 vcpu->run->xen.u.hcall.params[3] = params[3]; 1518 vcpu->run->xen.u.hcall.params[4] = params[4]; 1519 vcpu->run->xen.u.hcall.params[5] = params[5]; 1520 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); 1521 vcpu->arch.complete_userspace_io = 1522 kvm_xen_hypercall_complete_userspace; 1523 1524 return 0; 1525 } 1526 1527 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port) 1528 { 1529 int poll_evtchn = vcpu->arch.xen.poll_evtchn; 1530 1531 if ((poll_evtchn == port || poll_evtchn == -1) && 1532 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { 1533 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1534 kvm_vcpu_kick(vcpu); 1535 } 1536 } 1537 1538 /* 1539 * The return value from this function is propagated to kvm_set_irq() API, 1540 * so it returns: 1541 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 1542 * = 0 Interrupt was coalesced (previous irq is still pending) 1543 * > 0 Number of CPUs interrupt was delivered to 1544 * 1545 * It is also called directly from kvm_arch_set_irq_inatomic(), where the 1546 * only check on its return value is a comparison with -EWOULDBLOCK'. 1547 */ 1548 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1549 { 1550 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1551 struct kvm_vcpu *vcpu; 1552 unsigned long *pending_bits, *mask_bits; 1553 unsigned long flags; 1554 int port_word_bit; 1555 bool kick_vcpu = false; 1556 int vcpu_idx, idx, rc; 1557 1558 vcpu_idx = READ_ONCE(xe->vcpu_idx); 1559 if (vcpu_idx >= 0) 1560 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 1561 else { 1562 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); 1563 if (!vcpu) 1564 return -EINVAL; 1565 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); 1566 } 1567 1568 if (!vcpu->arch.xen.vcpu_info_cache.active) 1569 return -EINVAL; 1570 1571 if (xe->port >= max_evtchn_port(kvm)) 1572 return -EINVAL; 1573 1574 rc = -EWOULDBLOCK; 1575 1576 idx = srcu_read_lock(&kvm->srcu); 1577 1578 read_lock_irqsave(&gpc->lock, flags); 1579 if (!kvm_gpc_check(gpc, PAGE_SIZE)) 1580 goto out_rcu; 1581 1582 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1583 struct shared_info *shinfo = gpc->khva; 1584 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1585 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1586 port_word_bit = xe->port / 64; 1587 } else { 1588 struct compat_shared_info *shinfo = gpc->khva; 1589 pending_bits = (unsigned long *)&shinfo->evtchn_pending; 1590 mask_bits = (unsigned long *)&shinfo->evtchn_mask; 1591 port_word_bit = xe->port / 32; 1592 } 1593 1594 /* 1595 * If this port wasn't already set, and if it isn't masked, then 1596 * we try to set the corresponding bit in the in-kernel shadow of 1597 * evtchn_pending_sel for the target vCPU. And if *that* wasn't 1598 * already set, then we kick the vCPU in question to write to the 1599 * *real* evtchn_pending_sel in its own guest vcpu_info struct. 1600 */ 1601 if (test_and_set_bit(xe->port, pending_bits)) { 1602 rc = 0; /* It was already raised */ 1603 } else if (test_bit(xe->port, mask_bits)) { 1604 rc = -ENOTCONN; /* Masked */ 1605 kvm_xen_check_poller(vcpu, xe->port); 1606 } else { 1607 rc = 1; /* Delivered to the bitmap in shared_info. */ 1608 /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */ 1609 read_unlock_irqrestore(&gpc->lock, flags); 1610 gpc = &vcpu->arch.xen.vcpu_info_cache; 1611 1612 read_lock_irqsave(&gpc->lock, flags); 1613 if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { 1614 /* 1615 * Could not access the vcpu_info. Set the bit in-kernel 1616 * and prod the vCPU to deliver it for itself. 1617 */ 1618 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) 1619 kick_vcpu = true; 1620 goto out_rcu; 1621 } 1622 1623 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { 1624 struct vcpu_info *vcpu_info = gpc->khva; 1625 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { 1626 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1627 kick_vcpu = true; 1628 } 1629 } else { 1630 struct compat_vcpu_info *vcpu_info = gpc->khva; 1631 if (!test_and_set_bit(port_word_bit, 1632 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { 1633 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); 1634 kick_vcpu = true; 1635 } 1636 } 1637 1638 /* For the per-vCPU lapic vector, deliver it as MSI. */ 1639 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { 1640 kvm_xen_inject_vcpu_vector(vcpu); 1641 kick_vcpu = false; 1642 } 1643 } 1644 1645 out_rcu: 1646 read_unlock_irqrestore(&gpc->lock, flags); 1647 srcu_read_unlock(&kvm->srcu, idx); 1648 1649 if (kick_vcpu) { 1650 kvm_make_request(KVM_REQ_UNBLOCK, vcpu); 1651 kvm_vcpu_kick(vcpu); 1652 } 1653 1654 return rc; 1655 } 1656 1657 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm) 1658 { 1659 bool mm_borrowed = false; 1660 int rc; 1661 1662 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1663 if (rc != -EWOULDBLOCK) 1664 return rc; 1665 1666 if (current->mm != kvm->mm) { 1667 /* 1668 * If not on a thread which already belongs to this KVM, 1669 * we'd better be in the irqfd workqueue. 1670 */ 1671 if (WARN_ON_ONCE(current->mm)) 1672 return -EINVAL; 1673 1674 kthread_use_mm(kvm->mm); 1675 mm_borrowed = true; 1676 } 1677 1678 /* 1679 * For the irqfd workqueue, using the main kvm->lock mutex is 1680 * fine since this function is invoked from kvm_set_irq() with 1681 * no other lock held, no srcu. In future if it will be called 1682 * directly from a vCPU thread (e.g. on hypercall for an IPI) 1683 * then it may need to switch to using a leaf-node mutex for 1684 * serializing the shared_info mapping. 1685 */ 1686 mutex_lock(&kvm->lock); 1687 1688 /* 1689 * It is theoretically possible for the page to be unmapped 1690 * and the MMU notifier to invalidate the shared_info before 1691 * we even get to use it. In that case, this looks like an 1692 * infinite loop. It was tempting to do it via the userspace 1693 * HVA instead... but that just *hides* the fact that it's 1694 * an infinite loop, because if a fault occurs and it waits 1695 * for the page to come back, it can *still* immediately 1696 * fault and have to wait again, repeatedly. 1697 * 1698 * Conversely, the page could also have been reinstated by 1699 * another thread before we even obtain the mutex above, so 1700 * check again *first* before remapping it. 1701 */ 1702 do { 1703 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; 1704 int idx; 1705 1706 rc = kvm_xen_set_evtchn_fast(xe, kvm); 1707 if (rc != -EWOULDBLOCK) 1708 break; 1709 1710 idx = srcu_read_lock(&kvm->srcu); 1711 rc = kvm_gpc_refresh(gpc, PAGE_SIZE); 1712 srcu_read_unlock(&kvm->srcu, idx); 1713 } while(!rc); 1714 1715 mutex_unlock(&kvm->lock); 1716 1717 if (mm_borrowed) 1718 kthread_unuse_mm(kvm->mm); 1719 1720 return rc; 1721 } 1722 1723 /* This is the version called from kvm_set_irq() as the .set function */ 1724 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1725 int irq_source_id, int level, bool line_status) 1726 { 1727 if (!level) 1728 return -EINVAL; 1729 1730 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); 1731 } 1732 1733 /* 1734 * Set up an event channel interrupt from the KVM IRQ routing table. 1735 * Used for e.g. PIRQ from passed through physical devices. 1736 */ 1737 int kvm_xen_setup_evtchn(struct kvm *kvm, 1738 struct kvm_kernel_irq_routing_entry *e, 1739 const struct kvm_irq_routing_entry *ue) 1740 1741 { 1742 struct kvm_vcpu *vcpu; 1743 1744 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) 1745 return -EINVAL; 1746 1747 /* We only support 2 level event channels for now */ 1748 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1749 return -EINVAL; 1750 1751 /* 1752 * Xen gives us interesting mappings from vCPU index to APIC ID, 1753 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs 1754 * to find it. Do that once at setup time, instead of every time. 1755 * But beware that on live update / live migration, the routing 1756 * table might be reinstated before the vCPU threads have finished 1757 * recreating their vCPUs. 1758 */ 1759 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); 1760 if (vcpu) 1761 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; 1762 else 1763 e->xen_evtchn.vcpu_idx = -1; 1764 1765 e->xen_evtchn.port = ue->u.xen_evtchn.port; 1766 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; 1767 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; 1768 e->set = evtchn_set_fn; 1769 1770 return 0; 1771 } 1772 1773 /* 1774 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl. 1775 */ 1776 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe) 1777 { 1778 struct kvm_xen_evtchn e; 1779 int ret; 1780 1781 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) 1782 return -EINVAL; 1783 1784 /* We only support 2 level event channels for now */ 1785 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1786 return -EINVAL; 1787 1788 e.port = uxe->port; 1789 e.vcpu_id = uxe->vcpu; 1790 e.vcpu_idx = -1; 1791 e.priority = uxe->priority; 1792 1793 ret = kvm_xen_set_evtchn(&e, kvm); 1794 1795 /* 1796 * None of that 'return 1 if it actually got delivered' nonsense. 1797 * We don't care if it was masked (-ENOTCONN) either. 1798 */ 1799 if (ret > 0 || ret == -ENOTCONN) 1800 ret = 0; 1801 1802 return ret; 1803 } 1804 1805 /* 1806 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall. 1807 */ 1808 struct evtchnfd { 1809 u32 send_port; 1810 u32 type; 1811 union { 1812 struct kvm_xen_evtchn port; 1813 struct { 1814 u32 port; /* zero */ 1815 struct eventfd_ctx *ctx; 1816 } eventfd; 1817 } deliver; 1818 }; 1819 1820 /* 1821 * Update target vCPU or priority for a registered sending channel. 1822 */ 1823 static int kvm_xen_eventfd_update(struct kvm *kvm, 1824 struct kvm_xen_hvm_attr *data) 1825 { 1826 u32 port = data->u.evtchn.send_port; 1827 struct evtchnfd *evtchnfd; 1828 1829 if (!port || port >= max_evtchn_port(kvm)) 1830 return -EINVAL; 1831 1832 mutex_lock(&kvm->lock); 1833 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); 1834 mutex_unlock(&kvm->lock); 1835 1836 if (!evtchnfd) 1837 return -ENOENT; 1838 1839 /* For an UPDATE, nothing may change except the priority/vcpu */ 1840 if (evtchnfd->type != data->u.evtchn.type) 1841 return -EINVAL; 1842 1843 /* 1844 * Port cannot change, and if it's zero that was an eventfd 1845 * which can't be changed either. 1846 */ 1847 if (!evtchnfd->deliver.port.port || 1848 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) 1849 return -EINVAL; 1850 1851 /* We only support 2 level event channels for now */ 1852 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1853 return -EINVAL; 1854 1855 mutex_lock(&kvm->lock); 1856 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1857 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { 1858 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1859 evtchnfd->deliver.port.vcpu_idx = -1; 1860 } 1861 mutex_unlock(&kvm->lock); 1862 return 0; 1863 } 1864 1865 /* 1866 * Configure the target (eventfd or local port delivery) for sending on 1867 * a given event channel. 1868 */ 1869 static int kvm_xen_eventfd_assign(struct kvm *kvm, 1870 struct kvm_xen_hvm_attr *data) 1871 { 1872 u32 port = data->u.evtchn.send_port; 1873 struct eventfd_ctx *eventfd = NULL; 1874 struct evtchnfd *evtchnfd = NULL; 1875 int ret = -EINVAL; 1876 1877 if (!port || port >= max_evtchn_port(kvm)) 1878 return -EINVAL; 1879 1880 evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL); 1881 if (!evtchnfd) 1882 return -ENOMEM; 1883 1884 switch(data->u.evtchn.type) { 1885 case EVTCHNSTAT_ipi: 1886 /* IPI must map back to the same port# */ 1887 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) 1888 goto out_noeventfd; /* -EINVAL */ 1889 break; 1890 1891 case EVTCHNSTAT_interdomain: 1892 if (data->u.evtchn.deliver.port.port) { 1893 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) 1894 goto out_noeventfd; /* -EINVAL */ 1895 } else { 1896 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); 1897 if (IS_ERR(eventfd)) { 1898 ret = PTR_ERR(eventfd); 1899 goto out_noeventfd; 1900 } 1901 } 1902 break; 1903 1904 case EVTCHNSTAT_virq: 1905 case EVTCHNSTAT_closed: 1906 case EVTCHNSTAT_unbound: 1907 case EVTCHNSTAT_pirq: 1908 default: /* Unknown event channel type */ 1909 goto out; /* -EINVAL */ 1910 } 1911 1912 evtchnfd->send_port = data->u.evtchn.send_port; 1913 evtchnfd->type = data->u.evtchn.type; 1914 if (eventfd) { 1915 evtchnfd->deliver.eventfd.ctx = eventfd; 1916 } else { 1917 /* We only support 2 level event channels for now */ 1918 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) 1919 goto out; /* -EINVAL; */ 1920 1921 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; 1922 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; 1923 evtchnfd->deliver.port.vcpu_idx = -1; 1924 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; 1925 } 1926 1927 mutex_lock(&kvm->lock); 1928 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, 1929 GFP_KERNEL); 1930 mutex_unlock(&kvm->lock); 1931 if (ret >= 0) 1932 return 0; 1933 1934 if (ret == -ENOSPC) 1935 ret = -EEXIST; 1936 out: 1937 if (eventfd) 1938 eventfd_ctx_put(eventfd); 1939 out_noeventfd: 1940 kfree(evtchnfd); 1941 return ret; 1942 } 1943 1944 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port) 1945 { 1946 struct evtchnfd *evtchnfd; 1947 1948 mutex_lock(&kvm->lock); 1949 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); 1950 mutex_unlock(&kvm->lock); 1951 1952 if (!evtchnfd) 1953 return -ENOENT; 1954 1955 if (kvm) 1956 synchronize_srcu(&kvm->srcu); 1957 if (!evtchnfd->deliver.port.port) 1958 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1959 kfree(evtchnfd); 1960 return 0; 1961 } 1962 1963 static int kvm_xen_eventfd_reset(struct kvm *kvm) 1964 { 1965 struct evtchnfd *evtchnfd; 1966 int i; 1967 1968 mutex_lock(&kvm->lock); 1969 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 1970 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); 1971 synchronize_srcu(&kvm->srcu); 1972 if (!evtchnfd->deliver.port.port) 1973 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 1974 kfree(evtchnfd); 1975 } 1976 mutex_unlock(&kvm->lock); 1977 1978 return 0; 1979 } 1980 1981 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data) 1982 { 1983 u32 port = data->u.evtchn.send_port; 1984 1985 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) 1986 return kvm_xen_eventfd_reset(kvm); 1987 1988 if (!port || port >= max_evtchn_port(kvm)) 1989 return -EINVAL; 1990 1991 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) 1992 return kvm_xen_eventfd_deassign(kvm, port); 1993 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) 1994 return kvm_xen_eventfd_update(kvm, data); 1995 if (data->u.evtchn.flags) 1996 return -EINVAL; 1997 1998 return kvm_xen_eventfd_assign(kvm, data); 1999 } 2000 2001 static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r) 2002 { 2003 struct evtchnfd *evtchnfd; 2004 struct evtchn_send send; 2005 gpa_t gpa; 2006 int idx; 2007 2008 idx = srcu_read_lock(&vcpu->kvm->srcu); 2009 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); 2010 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2011 2012 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { 2013 *r = -EFAULT; 2014 return true; 2015 } 2016 2017 /* The evtchn_ports idr is protected by vcpu->kvm->srcu */ 2018 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); 2019 if (!evtchnfd) 2020 return false; 2021 2022 if (evtchnfd->deliver.port.port) { 2023 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); 2024 if (ret < 0 && ret != -ENOTCONN) 2025 return false; 2026 } else { 2027 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1); 2028 } 2029 2030 *r = 0; 2031 return true; 2032 } 2033 2034 void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) 2035 { 2036 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; 2037 vcpu->arch.xen.poll_evtchn = 0; 2038 2039 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); 2040 2041 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL, 2042 KVM_HOST_USES_PFN); 2043 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL, 2044 KVM_HOST_USES_PFN); 2045 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL, 2046 KVM_HOST_USES_PFN); 2047 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL, 2048 KVM_HOST_USES_PFN); 2049 } 2050 2051 void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) 2052 { 2053 if (kvm_xen_timer_enabled(vcpu)) 2054 kvm_xen_stop_timer(vcpu); 2055 2056 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); 2057 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); 2058 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 2059 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 2060 2061 del_timer_sync(&vcpu->arch.xen.poll_timer); 2062 } 2063 2064 void kvm_xen_init_vm(struct kvm *kvm) 2065 { 2066 idr_init(&kvm->arch.xen.evtchn_ports); 2067 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN); 2068 } 2069 2070 void kvm_xen_destroy_vm(struct kvm *kvm) 2071 { 2072 struct evtchnfd *evtchnfd; 2073 int i; 2074 2075 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); 2076 2077 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { 2078 if (!evtchnfd->deliver.port.port) 2079 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); 2080 kfree(evtchnfd); 2081 } 2082 idr_destroy(&kvm->arch.xen.evtchn_ports); 2083 2084 if (kvm->arch.xen_hvm_config.msr) 2085 static_branch_slow_dec_deferred(&kvm_xen_enabled); 2086 } 2087