1 /* 2 * 8253/8254 interval timer emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2006 Intel Corporation 6 * Copyright (c) 2007 Keir Fraser, XenSource Inc 7 * Copyright (c) 2008 Intel Corporation 8 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 * 28 * Authors: 29 * Sheng Yang <sheng.yang@intel.com> 30 * Based on QEMU and Xen. 31 */ 32 33 #define pr_fmt(fmt) "pit: " fmt 34 35 #include <linux/kvm_host.h> 36 #include <linux/slab.h> 37 38 #include "ioapic.h" 39 #include "irq.h" 40 #include "i8254.h" 41 #include "x86.h" 42 43 #ifndef CONFIG_X86_64 44 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 45 #else 46 #define mod_64(x, y) ((x) % (y)) 47 #endif 48 49 #define RW_STATE_LSB 1 50 #define RW_STATE_MSB 2 51 #define RW_STATE_WORD0 3 52 #define RW_STATE_WORD1 4 53 54 /* Compute with 96 bit intermediate result: (a*b)/c */ 55 static u64 muldiv64(u64 a, u32 b, u32 c) 56 { 57 union { 58 u64 ll; 59 struct { 60 u32 low, high; 61 } l; 62 } u, res; 63 u64 rl, rh; 64 65 u.ll = a; 66 rl = (u64)u.l.low * (u64)b; 67 rh = (u64)u.l.high * (u64)b; 68 rh += (rl >> 32); 69 res.l.high = div64_u64(rh, c); 70 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); 71 return res.ll; 72 } 73 74 static void pit_set_gate(struct kvm *kvm, int channel, u32 val) 75 { 76 struct kvm_kpit_channel_state *c = 77 &kvm->arch.vpit->pit_state.channels[channel]; 78 79 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 80 81 switch (c->mode) { 82 default: 83 case 0: 84 case 4: 85 /* XXX: just disable/enable counting */ 86 break; 87 case 1: 88 case 2: 89 case 3: 90 case 5: 91 /* Restart counting on rising edge. */ 92 if (c->gate < val) 93 c->count_load_time = ktime_get(); 94 break; 95 } 96 97 c->gate = val; 98 } 99 100 static int pit_get_gate(struct kvm *kvm, int channel) 101 { 102 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 103 104 return kvm->arch.vpit->pit_state.channels[channel].gate; 105 } 106 107 static s64 __kpit_elapsed(struct kvm *kvm) 108 { 109 s64 elapsed; 110 ktime_t remaining; 111 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 112 113 if (!ps->period) 114 return 0; 115 116 /* 117 * The Counter does not stop when it reaches zero. In 118 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to 119 * the highest count, either FFFF hex for binary counting 120 * or 9999 for BCD counting, and continues counting. 121 * Modes 2 and 3 are periodic; the Counter reloads 122 * itself with the initial count and continues counting 123 * from there. 124 */ 125 remaining = hrtimer_get_remaining(&ps->timer); 126 elapsed = ps->period - ktime_to_ns(remaining); 127 128 return elapsed; 129 } 130 131 static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, 132 int channel) 133 { 134 if (channel == 0) 135 return __kpit_elapsed(kvm); 136 137 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); 138 } 139 140 static int pit_get_count(struct kvm *kvm, int channel) 141 { 142 struct kvm_kpit_channel_state *c = 143 &kvm->arch.vpit->pit_state.channels[channel]; 144 s64 d, t; 145 int counter; 146 147 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 148 149 t = kpit_elapsed(kvm, c, channel); 150 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 151 152 switch (c->mode) { 153 case 0: 154 case 1: 155 case 4: 156 case 5: 157 counter = (c->count - d) & 0xffff; 158 break; 159 case 3: 160 /* XXX: may be incorrect for odd counts */ 161 counter = c->count - (mod_64((2 * d), c->count)); 162 break; 163 default: 164 counter = c->count - mod_64(d, c->count); 165 break; 166 } 167 return counter; 168 } 169 170 static int pit_get_out(struct kvm *kvm, int channel) 171 { 172 struct kvm_kpit_channel_state *c = 173 &kvm->arch.vpit->pit_state.channels[channel]; 174 s64 d, t; 175 int out; 176 177 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 178 179 t = kpit_elapsed(kvm, c, channel); 180 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); 181 182 switch (c->mode) { 183 default: 184 case 0: 185 out = (d >= c->count); 186 break; 187 case 1: 188 out = (d < c->count); 189 break; 190 case 2: 191 out = ((mod_64(d, c->count) == 0) && (d != 0)); 192 break; 193 case 3: 194 out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); 195 break; 196 case 4: 197 case 5: 198 out = (d == c->count); 199 break; 200 } 201 202 return out; 203 } 204 205 static void pit_latch_count(struct kvm *kvm, int channel) 206 { 207 struct kvm_kpit_channel_state *c = 208 &kvm->arch.vpit->pit_state.channels[channel]; 209 210 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 211 212 if (!c->count_latched) { 213 c->latched_count = pit_get_count(kvm, channel); 214 c->count_latched = c->rw_mode; 215 } 216 } 217 218 static void pit_latch_status(struct kvm *kvm, int channel) 219 { 220 struct kvm_kpit_channel_state *c = 221 &kvm->arch.vpit->pit_state.channels[channel]; 222 223 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); 224 225 if (!c->status_latched) { 226 /* TODO: Return NULL COUNT (bit 6). */ 227 c->status = ((pit_get_out(kvm, channel) << 7) | 228 (c->rw_mode << 4) | 229 (c->mode << 1) | 230 c->bcd); 231 c->status_latched = 1; 232 } 233 } 234 235 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 236 { 237 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 238 irq_ack_notifier); 239 int value; 240 241 spin_lock(&ps->inject_lock); 242 value = atomic_dec_return(&ps->pending); 243 if (value < 0) 244 /* spurious acks can be generated if, for example, the 245 * PIC is being reset. Handle it gracefully here 246 */ 247 atomic_inc(&ps->pending); 248 else if (value > 0) 249 /* in this case, we had multiple outstanding pit interrupts 250 * that we needed to inject. Reinject 251 */ 252 queue_kthread_work(&ps->pit->worker, &ps->pit->expired); 253 ps->irq_ack = 1; 254 spin_unlock(&ps->inject_lock); 255 } 256 257 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) 258 { 259 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 260 struct hrtimer *timer; 261 262 if (!kvm_vcpu_is_bsp(vcpu) || !pit) 263 return; 264 265 timer = &pit->pit_state.timer; 266 mutex_lock(&pit->pit_state.lock); 267 if (hrtimer_cancel(timer)) 268 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 269 mutex_unlock(&pit->pit_state.lock); 270 } 271 272 static void destroy_pit_timer(struct kvm_pit *pit) 273 { 274 hrtimer_cancel(&pit->pit_state.timer); 275 flush_kthread_work(&pit->expired); 276 } 277 278 static void pit_do_work(struct kthread_work *work) 279 { 280 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); 281 struct kvm *kvm = pit->kvm; 282 struct kvm_vcpu *vcpu; 283 int i; 284 struct kvm_kpit_state *ps = &pit->pit_state; 285 int inject = 0; 286 287 /* Try to inject pending interrupts when 288 * last one has been acked. 289 */ 290 spin_lock(&ps->inject_lock); 291 if (ps->irq_ack) { 292 ps->irq_ack = 0; 293 inject = 1; 294 } 295 spin_unlock(&ps->inject_lock); 296 if (inject) { 297 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); 298 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); 299 300 /* 301 * Provides NMI watchdog support via Virtual Wire mode. 302 * The route is: PIT -> PIC -> LVT0 in NMI mode. 303 * 304 * Note: Our Virtual Wire implementation is simplified, only 305 * propagating PIT interrupts to all VCPUs when they have set 306 * LVT0 to NMI delivery. Other PIC interrupts are just sent to 307 * VCPU0, and only if its LVT0 is in EXTINT mode. 308 */ 309 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) 310 kvm_for_each_vcpu(i, vcpu, kvm) 311 kvm_apic_nmi_wd_deliver(vcpu); 312 } 313 } 314 315 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) 316 { 317 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); 318 struct kvm_pit *pt = ps->kvm->arch.vpit; 319 320 if (ps->reinject || !atomic_read(&ps->pending)) { 321 atomic_inc(&ps->pending); 322 queue_kthread_work(&pt->worker, &pt->expired); 323 } 324 325 if (ps->is_periodic) { 326 hrtimer_add_expires_ns(&ps->timer, ps->period); 327 return HRTIMER_RESTART; 328 } else 329 return HRTIMER_NORESTART; 330 } 331 332 static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) 333 { 334 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 335 s64 interval; 336 337 if (!ioapic_in_kernel(kvm) || 338 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY) 339 return; 340 341 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); 342 343 pr_debug("create pit timer, interval is %llu nsec\n", interval); 344 345 /* TODO The new value only affected after the retriggered */ 346 hrtimer_cancel(&ps->timer); 347 flush_kthread_work(&ps->pit->expired); 348 ps->period = interval; 349 ps->is_periodic = is_period; 350 351 ps->timer.function = pit_timer_fn; 352 ps->kvm = ps->pit->kvm; 353 354 atomic_set(&ps->pending, 0); 355 ps->irq_ack = 1; 356 357 /* 358 * Do not allow the guest to program periodic timers with small 359 * interval, since the hrtimers are not throttled by the host 360 * scheduler. 361 */ 362 if (ps->is_periodic) { 363 s64 min_period = min_timer_period_us * 1000LL; 364 365 if (ps->period < min_period) { 366 pr_info_ratelimited( 367 "kvm: requested %lld ns " 368 "i8254 timer period limited to %lld ns\n", 369 ps->period, min_period); 370 ps->period = min_period; 371 } 372 } 373 374 hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval), 375 HRTIMER_MODE_ABS); 376 } 377 378 static void pit_load_count(struct kvm *kvm, int channel, u32 val) 379 { 380 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; 381 382 WARN_ON(!mutex_is_locked(&ps->lock)); 383 384 pr_debug("load_count val is %d, channel is %d\n", val, channel); 385 386 /* 387 * The largest possible initial count is 0; this is equivalent 388 * to 216 for binary counting and 104 for BCD counting. 389 */ 390 if (val == 0) 391 val = 0x10000; 392 393 ps->channels[channel].count = val; 394 395 if (channel != 0) { 396 ps->channels[channel].count_load_time = ktime_get(); 397 return; 398 } 399 400 /* Two types of timer 401 * mode 1 is one shot, mode 2 is period, otherwise del timer */ 402 switch (ps->channels[0].mode) { 403 case 0: 404 case 1: 405 /* FIXME: enhance mode 4 precision */ 406 case 4: 407 create_pit_timer(kvm, val, 0); 408 break; 409 case 2: 410 case 3: 411 create_pit_timer(kvm, val, 1); 412 break; 413 default: 414 destroy_pit_timer(kvm->arch.vpit); 415 } 416 } 417 418 void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) 419 { 420 u8 saved_mode; 421 if (hpet_legacy_start) { 422 /* save existing mode for later reenablement */ 423 WARN_ON(channel != 0); 424 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; 425 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ 426 pit_load_count(kvm, channel, val); 427 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; 428 } else { 429 pit_load_count(kvm, channel, val); 430 } 431 } 432 433 static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev) 434 { 435 return container_of(dev, struct kvm_pit, dev); 436 } 437 438 static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev) 439 { 440 return container_of(dev, struct kvm_pit, speaker_dev); 441 } 442 443 static inline int pit_in_range(gpa_t addr) 444 { 445 return ((addr >= KVM_PIT_BASE_ADDRESS) && 446 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); 447 } 448 449 static int pit_ioport_write(struct kvm_vcpu *vcpu, 450 struct kvm_io_device *this, 451 gpa_t addr, int len, const void *data) 452 { 453 struct kvm_pit *pit = dev_to_pit(this); 454 struct kvm_kpit_state *pit_state = &pit->pit_state; 455 struct kvm *kvm = pit->kvm; 456 int channel, access; 457 struct kvm_kpit_channel_state *s; 458 u32 val = *(u32 *) data; 459 if (!pit_in_range(addr)) 460 return -EOPNOTSUPP; 461 462 val &= 0xff; 463 addr &= KVM_PIT_CHANNEL_MASK; 464 465 mutex_lock(&pit_state->lock); 466 467 if (val != 0) 468 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n", 469 (unsigned int)addr, len, val); 470 471 if (addr == 3) { 472 channel = val >> 6; 473 if (channel == 3) { 474 /* Read-Back Command. */ 475 for (channel = 0; channel < 3; channel++) { 476 s = &pit_state->channels[channel]; 477 if (val & (2 << channel)) { 478 if (!(val & 0x20)) 479 pit_latch_count(kvm, channel); 480 if (!(val & 0x10)) 481 pit_latch_status(kvm, channel); 482 } 483 } 484 } else { 485 /* Select Counter <channel>. */ 486 s = &pit_state->channels[channel]; 487 access = (val >> 4) & KVM_PIT_CHANNEL_MASK; 488 if (access == 0) { 489 pit_latch_count(kvm, channel); 490 } else { 491 s->rw_mode = access; 492 s->read_state = access; 493 s->write_state = access; 494 s->mode = (val >> 1) & 7; 495 if (s->mode > 5) 496 s->mode -= 4; 497 s->bcd = val & 1; 498 } 499 } 500 } else { 501 /* Write Count. */ 502 s = &pit_state->channels[addr]; 503 switch (s->write_state) { 504 default: 505 case RW_STATE_LSB: 506 pit_load_count(kvm, addr, val); 507 break; 508 case RW_STATE_MSB: 509 pit_load_count(kvm, addr, val << 8); 510 break; 511 case RW_STATE_WORD0: 512 s->write_latch = val; 513 s->write_state = RW_STATE_WORD1; 514 break; 515 case RW_STATE_WORD1: 516 pit_load_count(kvm, addr, s->write_latch | (val << 8)); 517 s->write_state = RW_STATE_WORD0; 518 break; 519 } 520 } 521 522 mutex_unlock(&pit_state->lock); 523 return 0; 524 } 525 526 static int pit_ioport_read(struct kvm_vcpu *vcpu, 527 struct kvm_io_device *this, 528 gpa_t addr, int len, void *data) 529 { 530 struct kvm_pit *pit = dev_to_pit(this); 531 struct kvm_kpit_state *pit_state = &pit->pit_state; 532 struct kvm *kvm = pit->kvm; 533 int ret, count; 534 struct kvm_kpit_channel_state *s; 535 if (!pit_in_range(addr)) 536 return -EOPNOTSUPP; 537 538 addr &= KVM_PIT_CHANNEL_MASK; 539 if (addr == 3) 540 return 0; 541 542 s = &pit_state->channels[addr]; 543 544 mutex_lock(&pit_state->lock); 545 546 if (s->status_latched) { 547 s->status_latched = 0; 548 ret = s->status; 549 } else if (s->count_latched) { 550 switch (s->count_latched) { 551 default: 552 case RW_STATE_LSB: 553 ret = s->latched_count & 0xff; 554 s->count_latched = 0; 555 break; 556 case RW_STATE_MSB: 557 ret = s->latched_count >> 8; 558 s->count_latched = 0; 559 break; 560 case RW_STATE_WORD0: 561 ret = s->latched_count & 0xff; 562 s->count_latched = RW_STATE_MSB; 563 break; 564 } 565 } else { 566 switch (s->read_state) { 567 default: 568 case RW_STATE_LSB: 569 count = pit_get_count(kvm, addr); 570 ret = count & 0xff; 571 break; 572 case RW_STATE_MSB: 573 count = pit_get_count(kvm, addr); 574 ret = (count >> 8) & 0xff; 575 break; 576 case RW_STATE_WORD0: 577 count = pit_get_count(kvm, addr); 578 ret = count & 0xff; 579 s->read_state = RW_STATE_WORD1; 580 break; 581 case RW_STATE_WORD1: 582 count = pit_get_count(kvm, addr); 583 ret = (count >> 8) & 0xff; 584 s->read_state = RW_STATE_WORD0; 585 break; 586 } 587 } 588 589 if (len > sizeof(ret)) 590 len = sizeof(ret); 591 memcpy(data, (char *)&ret, len); 592 593 mutex_unlock(&pit_state->lock); 594 return 0; 595 } 596 597 static int speaker_ioport_write(struct kvm_vcpu *vcpu, 598 struct kvm_io_device *this, 599 gpa_t addr, int len, const void *data) 600 { 601 struct kvm_pit *pit = speaker_to_pit(this); 602 struct kvm_kpit_state *pit_state = &pit->pit_state; 603 struct kvm *kvm = pit->kvm; 604 u32 val = *(u32 *) data; 605 if (addr != KVM_SPEAKER_BASE_ADDRESS) 606 return -EOPNOTSUPP; 607 608 mutex_lock(&pit_state->lock); 609 pit_state->speaker_data_on = (val >> 1) & 1; 610 pit_set_gate(kvm, 2, val & 1); 611 mutex_unlock(&pit_state->lock); 612 return 0; 613 } 614 615 static int speaker_ioport_read(struct kvm_vcpu *vcpu, 616 struct kvm_io_device *this, 617 gpa_t addr, int len, void *data) 618 { 619 struct kvm_pit *pit = speaker_to_pit(this); 620 struct kvm_kpit_state *pit_state = &pit->pit_state; 621 struct kvm *kvm = pit->kvm; 622 unsigned int refresh_clock; 623 int ret; 624 if (addr != KVM_SPEAKER_BASE_ADDRESS) 625 return -EOPNOTSUPP; 626 627 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ 628 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 629 630 mutex_lock(&pit_state->lock); 631 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | 632 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); 633 if (len > sizeof(ret)) 634 len = sizeof(ret); 635 memcpy(data, (char *)&ret, len); 636 mutex_unlock(&pit_state->lock); 637 return 0; 638 } 639 640 void kvm_pit_reset(struct kvm_pit *pit) 641 { 642 int i; 643 struct kvm_kpit_channel_state *c; 644 645 mutex_lock(&pit->pit_state.lock); 646 pit->pit_state.flags = 0; 647 for (i = 0; i < 3; i++) { 648 c = &pit->pit_state.channels[i]; 649 c->mode = 0xff; 650 c->gate = (i != 2); 651 pit_load_count(pit->kvm, i, 0); 652 } 653 mutex_unlock(&pit->pit_state.lock); 654 655 atomic_set(&pit->pit_state.pending, 0); 656 pit->pit_state.irq_ack = 1; 657 } 658 659 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) 660 { 661 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); 662 663 if (!mask) { 664 atomic_set(&pit->pit_state.pending, 0); 665 pit->pit_state.irq_ack = 1; 666 } 667 } 668 669 static const struct kvm_io_device_ops pit_dev_ops = { 670 .read = pit_ioport_read, 671 .write = pit_ioport_write, 672 }; 673 674 static const struct kvm_io_device_ops speaker_dev_ops = { 675 .read = speaker_ioport_read, 676 .write = speaker_ioport_write, 677 }; 678 679 /* Caller must hold slots_lock */ 680 struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) 681 { 682 struct kvm_pit *pit; 683 struct kvm_kpit_state *pit_state; 684 struct pid *pid; 685 pid_t pid_nr; 686 int ret; 687 688 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 689 if (!pit) 690 return NULL; 691 692 pit->irq_source_id = kvm_request_irq_source_id(kvm); 693 if (pit->irq_source_id < 0) { 694 kfree(pit); 695 return NULL; 696 } 697 698 mutex_init(&pit->pit_state.lock); 699 mutex_lock(&pit->pit_state.lock); 700 spin_lock_init(&pit->pit_state.inject_lock); 701 702 pid = get_pid(task_tgid(current)); 703 pid_nr = pid_vnr(pid); 704 put_pid(pid); 705 706 init_kthread_worker(&pit->worker); 707 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker, 708 "kvm-pit/%d", pid_nr); 709 if (IS_ERR(pit->worker_task)) { 710 mutex_unlock(&pit->pit_state.lock); 711 kvm_free_irq_source_id(kvm, pit->irq_source_id); 712 kfree(pit); 713 return NULL; 714 } 715 init_kthread_work(&pit->expired, pit_do_work); 716 717 kvm->arch.vpit = pit; 718 pit->kvm = kvm; 719 720 pit_state = &pit->pit_state; 721 pit_state->pit = pit; 722 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 723 pit_state->irq_ack_notifier.gsi = 0; 724 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 725 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 726 pit_state->reinject = true; 727 mutex_unlock(&pit->pit_state.lock); 728 729 kvm_pit_reset(pit); 730 731 pit->mask_notifier.func = pit_mask_notifer; 732 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 733 734 kvm_iodevice_init(&pit->dev, &pit_dev_ops); 735 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS, 736 KVM_PIT_MEM_LENGTH, &pit->dev); 737 if (ret < 0) 738 goto fail; 739 740 if (flags & KVM_PIT_SPEAKER_DUMMY) { 741 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); 742 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 743 KVM_SPEAKER_BASE_ADDRESS, 4, 744 &pit->speaker_dev); 745 if (ret < 0) 746 goto fail_unregister; 747 } 748 749 return pit; 750 751 fail_unregister: 752 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev); 753 754 fail: 755 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 756 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 757 kvm_free_irq_source_id(kvm, pit->irq_source_id); 758 kthread_stop(pit->worker_task); 759 kfree(pit); 760 return NULL; 761 } 762 763 void kvm_free_pit(struct kvm *kvm) 764 { 765 struct hrtimer *timer; 766 767 if (kvm->arch.vpit) { 768 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev); 769 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, 770 &kvm->arch.vpit->speaker_dev); 771 kvm_unregister_irq_mask_notifier(kvm, 0, 772 &kvm->arch.vpit->mask_notifier); 773 kvm_unregister_irq_ack_notifier(kvm, 774 &kvm->arch.vpit->pit_state.irq_ack_notifier); 775 mutex_lock(&kvm->arch.vpit->pit_state.lock); 776 timer = &kvm->arch.vpit->pit_state.timer; 777 hrtimer_cancel(timer); 778 flush_kthread_work(&kvm->arch.vpit->expired); 779 kthread_stop(kvm->arch.vpit->worker_task); 780 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); 781 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 782 kfree(kvm->arch.vpit); 783 } 784 } 785