1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * Authors: 26 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 27 * Port from Qemu. 28 */ 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/mm.h> 32 #include <linux/slab.h> 33 #include <linux/bitops.h> 34 35 #include "ioapic.h" 36 #include "irq.h" 37 38 #include <linux/kvm_host.h> 39 #include "trace.h" 40 41 #define pr_pic_unimpl(fmt, ...) \ 42 pr_err_ratelimited("pic: " fmt, ## __VA_ARGS__) 43 44 static void pic_irq_request(struct kvm *kvm, int level); 45 46 static void pic_lock(struct kvm_pic *s) 47 __acquires(&s->lock) 48 { 49 spin_lock(&s->lock); 50 } 51 52 static void pic_unlock(struct kvm_pic *s) 53 __releases(&s->lock) 54 { 55 bool wakeup = s->wakeup_needed; 56 struct kvm_vcpu *vcpu; 57 unsigned long i; 58 59 s->wakeup_needed = false; 60 61 spin_unlock(&s->lock); 62 63 if (wakeup) { 64 kvm_for_each_vcpu(i, vcpu, s->kvm) { 65 if (kvm_apic_accept_pic_intr(vcpu)) { 66 kvm_make_request(KVM_REQ_EVENT, vcpu); 67 kvm_vcpu_kick(vcpu); 68 return; 69 } 70 } 71 } 72 } 73 74 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 75 { 76 s->isr &= ~(1 << irq); 77 if (s != &s->pics_state->pics[0]) 78 irq += 8; 79 /* 80 * We are dropping lock while calling ack notifiers since ack 81 * notifier callbacks for assigned devices call into PIC recursively. 82 * Other interrupt may be delivered to PIC while lock is dropped but 83 * it should be safe since PIC state is already updated at this stage. 84 */ 85 pic_unlock(s->pics_state); 86 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 87 pic_lock(s->pics_state); 88 } 89 90 /* 91 * set irq level. If an edge is detected, then the IRR is set to 1 92 */ 93 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 94 { 95 int mask, ret = 1; 96 mask = 1 << irq; 97 if (s->elcr & mask) /* level triggered */ 98 if (level) { 99 ret = !(s->irr & mask); 100 s->irr |= mask; 101 s->last_irr |= mask; 102 } else { 103 s->irr &= ~mask; 104 s->last_irr &= ~mask; 105 } 106 else /* edge triggered */ 107 if (level) { 108 if ((s->last_irr & mask) == 0) { 109 ret = !(s->irr & mask); 110 s->irr |= mask; 111 } 112 s->last_irr |= mask; 113 } else 114 s->last_irr &= ~mask; 115 116 return (s->imr & mask) ? -1 : ret; 117 } 118 119 /* 120 * return the highest priority found in mask (highest = smallest 121 * number). Return 8 if no irq 122 */ 123 static inline int get_priority(struct kvm_kpic_state *s, int mask) 124 { 125 int priority; 126 if (mask == 0) 127 return 8; 128 priority = 0; 129 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 130 priority++; 131 return priority; 132 } 133 134 /* 135 * return the pic wanted interrupt. return -1 if none 136 */ 137 static int pic_get_irq(struct kvm_kpic_state *s) 138 { 139 int mask, cur_priority, priority; 140 141 mask = s->irr & ~s->imr; 142 priority = get_priority(s, mask); 143 if (priority == 8) 144 return -1; 145 /* 146 * compute current priority. If special fully nested mode on the 147 * master, the IRQ coming from the slave is not taken into account 148 * for the priority computation. 149 */ 150 mask = s->isr; 151 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 152 mask &= ~(1 << 2); 153 cur_priority = get_priority(s, mask); 154 if (priority < cur_priority) 155 /* 156 * higher priority found: an irq should be generated 157 */ 158 return (priority + s->priority_add) & 7; 159 else 160 return -1; 161 } 162 163 /* 164 * raise irq to CPU if necessary. must be called every time the active 165 * irq may change 166 */ 167 static void pic_update_irq(struct kvm_pic *s) 168 { 169 int irq2, irq; 170 171 irq2 = pic_get_irq(&s->pics[1]); 172 if (irq2 >= 0) { 173 /* 174 * if irq request by slave pic, signal master PIC 175 */ 176 pic_set_irq1(&s->pics[0], 2, 1); 177 pic_set_irq1(&s->pics[0], 2, 0); 178 } 179 irq = pic_get_irq(&s->pics[0]); 180 pic_irq_request(s->kvm, irq >= 0); 181 } 182 183 void kvm_pic_update_irq(struct kvm_pic *s) 184 { 185 pic_lock(s); 186 pic_update_irq(s); 187 pic_unlock(s); 188 } 189 190 int kvm_pic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 191 int irq_source_id, int level, bool line_status) 192 { 193 struct kvm_pic *s = kvm->arch.vpic; 194 int irq = e->irqchip.pin; 195 int ret, irq_level; 196 197 BUG_ON(irq < 0 || irq >= PIC_NUM_PINS); 198 199 pic_lock(s); 200 irq_level = __kvm_irq_line_state(&s->irq_states[irq], 201 irq_source_id, level); 202 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level); 203 pic_update_irq(s); 204 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 205 s->pics[irq >> 3].imr, ret == 0); 206 pic_unlock(s); 207 208 return ret; 209 } 210 211 /* 212 * acknowledge interrupt 'irq' 213 */ 214 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 215 { 216 s->isr |= 1 << irq; 217 /* 218 * We don't clear a level sensitive interrupt here 219 */ 220 if (!(s->elcr & (1 << irq))) 221 s->irr &= ~(1 << irq); 222 223 if (s->auto_eoi) { 224 if (s->rotate_on_auto_eoi) 225 s->priority_add = (irq + 1) & 7; 226 pic_clear_isr(s, irq); 227 } 228 229 } 230 231 int kvm_pic_read_irq(struct kvm *kvm) 232 { 233 int irq, irq2, intno; 234 struct kvm_pic *s = kvm->arch.vpic; 235 236 s->output = 0; 237 238 pic_lock(s); 239 irq = pic_get_irq(&s->pics[0]); 240 if (irq >= 0) { 241 pic_intack(&s->pics[0], irq); 242 if (irq == 2) { 243 irq2 = pic_get_irq(&s->pics[1]); 244 if (irq2 >= 0) 245 pic_intack(&s->pics[1], irq2); 246 else 247 /* 248 * spurious IRQ on slave controller 249 */ 250 irq2 = 7; 251 intno = s->pics[1].irq_base + irq2; 252 } else 253 intno = s->pics[0].irq_base + irq; 254 } else { 255 /* 256 * spurious IRQ on host controller 257 */ 258 irq = 7; 259 intno = s->pics[0].irq_base + irq; 260 } 261 pic_update_irq(s); 262 pic_unlock(s); 263 264 return intno; 265 } 266 267 static void kvm_pic_reset(struct kvm_kpic_state *s) 268 { 269 int irq; 270 unsigned long i; 271 struct kvm_vcpu *vcpu; 272 u8 edge_irr = s->irr & ~s->elcr; 273 bool found = false; 274 275 s->last_irr = 0; 276 s->irr &= s->elcr; 277 s->imr = 0; 278 s->priority_add = 0; 279 s->special_mask = 0; 280 s->read_reg_select = 0; 281 if (!s->init4) { 282 s->special_fully_nested_mode = 0; 283 s->auto_eoi = 0; 284 } 285 s->init_state = 1; 286 287 kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) 288 if (kvm_apic_accept_pic_intr(vcpu)) { 289 found = true; 290 break; 291 } 292 293 294 if (!found) 295 return; 296 297 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 298 if (edge_irr & (1 << irq)) 299 pic_clear_isr(s, irq); 300 } 301 302 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 303 { 304 struct kvm_kpic_state *s = opaque; 305 int priority, cmd, irq; 306 307 addr &= 1; 308 if (addr == 0) { 309 if (val & 0x10) { 310 s->init4 = val & 1; 311 if (val & 0x02) 312 pr_pic_unimpl("single mode not supported"); 313 if (val & 0x08) 314 pr_pic_unimpl( 315 "level sensitive irq not supported"); 316 kvm_pic_reset(s); 317 } else if (val & 0x08) { 318 if (val & 0x04) 319 s->poll = 1; 320 if (val & 0x02) 321 s->read_reg_select = val & 1; 322 if (val & 0x40) 323 s->special_mask = (val >> 5) & 1; 324 } else { 325 cmd = val >> 5; 326 switch (cmd) { 327 case 0: 328 case 4: 329 s->rotate_on_auto_eoi = cmd >> 2; 330 break; 331 case 1: /* end of interrupt */ 332 case 5: 333 priority = get_priority(s, s->isr); 334 if (priority != 8) { 335 irq = (priority + s->priority_add) & 7; 336 if (cmd == 5) 337 s->priority_add = (irq + 1) & 7; 338 pic_clear_isr(s, irq); 339 pic_update_irq(s->pics_state); 340 } 341 break; 342 case 3: 343 irq = val & 7; 344 pic_clear_isr(s, irq); 345 pic_update_irq(s->pics_state); 346 break; 347 case 6: 348 s->priority_add = (val + 1) & 7; 349 pic_update_irq(s->pics_state); 350 break; 351 case 7: 352 irq = val & 7; 353 s->priority_add = (irq + 1) & 7; 354 pic_clear_isr(s, irq); 355 pic_update_irq(s->pics_state); 356 break; 357 default: 358 break; /* no operation */ 359 } 360 } 361 } else 362 switch (s->init_state) { 363 case 0: { /* normal mode */ 364 u8 imr_diff = s->imr ^ val, 365 off = (s == &s->pics_state->pics[0]) ? 0 : 8; 366 s->imr = val; 367 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 368 if (imr_diff & (1 << irq)) 369 kvm_fire_mask_notifiers( 370 s->pics_state->kvm, 371 SELECT_PIC(irq + off), 372 irq + off, 373 !!(s->imr & (1 << irq))); 374 pic_update_irq(s->pics_state); 375 break; 376 } 377 case 1: 378 s->irq_base = val & 0xf8; 379 s->init_state = 2; 380 break; 381 case 2: 382 if (s->init4) 383 s->init_state = 3; 384 else 385 s->init_state = 0; 386 break; 387 case 3: 388 s->special_fully_nested_mode = (val >> 4) & 1; 389 s->auto_eoi = (val >> 1) & 1; 390 s->init_state = 0; 391 break; 392 } 393 } 394 395 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 396 { 397 int ret; 398 399 ret = pic_get_irq(s); 400 if (ret >= 0) { 401 if (addr1 >> 7) { 402 s->pics_state->pics[0].isr &= ~(1 << 2); 403 s->pics_state->pics[0].irr &= ~(1 << 2); 404 } 405 s->irr &= ~(1 << ret); 406 pic_clear_isr(s, ret); 407 if (addr1 >> 7 || ret != 2) 408 pic_update_irq(s->pics_state); 409 /* Bit 7 is 1, means there's an interrupt */ 410 ret |= 0x80; 411 } else { 412 /* Bit 7 is 0, means there's no interrupt */ 413 ret = 0x07; 414 pic_update_irq(s->pics_state); 415 } 416 417 return ret; 418 } 419 420 static u32 pic_ioport_read(void *opaque, u32 addr) 421 { 422 struct kvm_kpic_state *s = opaque; 423 int ret; 424 425 if (s->poll) { 426 ret = pic_poll_read(s, addr); 427 s->poll = 0; 428 } else 429 if ((addr & 1) == 0) 430 if (s->read_reg_select) 431 ret = s->isr; 432 else 433 ret = s->irr; 434 else 435 ret = s->imr; 436 return ret; 437 } 438 439 static void elcr_ioport_write(void *opaque, u32 val) 440 { 441 struct kvm_kpic_state *s = opaque; 442 s->elcr = val & s->elcr_mask; 443 } 444 445 static u32 elcr_ioport_read(void *opaque) 446 { 447 struct kvm_kpic_state *s = opaque; 448 return s->elcr; 449 } 450 451 static int picdev_write(struct kvm_pic *s, 452 gpa_t addr, int len, const void *val) 453 { 454 unsigned char data = *(unsigned char *)val; 455 456 if (len != 1) { 457 pr_pic_unimpl("non byte write\n"); 458 return 0; 459 } 460 switch (addr) { 461 case 0x20: 462 case 0x21: 463 pic_lock(s); 464 pic_ioport_write(&s->pics[0], addr, data); 465 pic_unlock(s); 466 break; 467 case 0xa0: 468 case 0xa1: 469 pic_lock(s); 470 pic_ioport_write(&s->pics[1], addr, data); 471 pic_unlock(s); 472 break; 473 case 0x4d0: 474 case 0x4d1: 475 pic_lock(s); 476 elcr_ioport_write(&s->pics[addr & 1], data); 477 pic_unlock(s); 478 break; 479 default: 480 return -EOPNOTSUPP; 481 } 482 return 0; 483 } 484 485 static int picdev_read(struct kvm_pic *s, 486 gpa_t addr, int len, void *val) 487 { 488 unsigned char *data = (unsigned char *)val; 489 490 if (len != 1) { 491 memset(val, 0, len); 492 pr_pic_unimpl("non byte read\n"); 493 return 0; 494 } 495 switch (addr) { 496 case 0x20: 497 case 0x21: 498 case 0xa0: 499 case 0xa1: 500 pic_lock(s); 501 *data = pic_ioport_read(&s->pics[addr >> 7], addr); 502 pic_unlock(s); 503 break; 504 case 0x4d0: 505 case 0x4d1: 506 pic_lock(s); 507 *data = elcr_ioport_read(&s->pics[addr & 1]); 508 pic_unlock(s); 509 break; 510 default: 511 return -EOPNOTSUPP; 512 } 513 return 0; 514 } 515 516 static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 517 gpa_t addr, int len, const void *val) 518 { 519 return picdev_write(container_of(dev, struct kvm_pic, dev_master), 520 addr, len, val); 521 } 522 523 static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 524 gpa_t addr, int len, void *val) 525 { 526 return picdev_read(container_of(dev, struct kvm_pic, dev_master), 527 addr, len, val); 528 } 529 530 static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 531 gpa_t addr, int len, const void *val) 532 { 533 return picdev_write(container_of(dev, struct kvm_pic, dev_slave), 534 addr, len, val); 535 } 536 537 static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 538 gpa_t addr, int len, void *val) 539 { 540 return picdev_read(container_of(dev, struct kvm_pic, dev_slave), 541 addr, len, val); 542 } 543 544 static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 545 gpa_t addr, int len, const void *val) 546 { 547 return picdev_write(container_of(dev, struct kvm_pic, dev_elcr), 548 addr, len, val); 549 } 550 551 static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, 552 gpa_t addr, int len, void *val) 553 { 554 return picdev_read(container_of(dev, struct kvm_pic, dev_elcr), 555 addr, len, val); 556 } 557 558 /* 559 * callback when PIC0 irq status changed 560 */ 561 static void pic_irq_request(struct kvm *kvm, int level) 562 { 563 struct kvm_pic *s = kvm->arch.vpic; 564 565 if (!s->output && level) 566 s->wakeup_needed = true; 567 s->output = level; 568 } 569 570 static const struct kvm_io_device_ops picdev_master_ops = { 571 .read = picdev_master_read, 572 .write = picdev_master_write, 573 }; 574 575 static const struct kvm_io_device_ops picdev_slave_ops = { 576 .read = picdev_slave_read, 577 .write = picdev_slave_write, 578 }; 579 580 static const struct kvm_io_device_ops picdev_elcr_ops = { 581 .read = picdev_elcr_read, 582 .write = picdev_elcr_write, 583 }; 584 585 int kvm_pic_init(struct kvm *kvm) 586 { 587 struct kvm_pic *s; 588 int ret; 589 590 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT); 591 if (!s) 592 return -ENOMEM; 593 spin_lock_init(&s->lock); 594 s->kvm = kvm; 595 s->pics[0].elcr_mask = 0xf8; 596 s->pics[1].elcr_mask = 0xde; 597 s->pics[0].pics_state = s; 598 s->pics[1].pics_state = s; 599 600 /* 601 * Initialize PIO device 602 */ 603 kvm_iodevice_init(&s->dev_master, &picdev_master_ops); 604 kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops); 605 kvm_iodevice_init(&s->dev_elcr, &picdev_elcr_ops); 606 mutex_lock(&kvm->slots_lock); 607 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2, 608 &s->dev_master); 609 if (ret < 0) 610 goto fail_unlock; 611 612 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave); 613 if (ret < 0) 614 goto fail_unreg_2; 615 616 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_elcr); 617 if (ret < 0) 618 goto fail_unreg_1; 619 620 mutex_unlock(&kvm->slots_lock); 621 622 kvm->arch.vpic = s; 623 624 return 0; 625 626 fail_unreg_1: 627 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave); 628 629 fail_unreg_2: 630 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master); 631 632 fail_unlock: 633 mutex_unlock(&kvm->slots_lock); 634 635 kfree(s); 636 637 return ret; 638 } 639 640 void kvm_pic_destroy(struct kvm *kvm) 641 { 642 struct kvm_pic *vpic = kvm->arch.vpic; 643 644 if (!vpic) 645 return; 646 647 mutex_lock(&kvm->slots_lock); 648 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 649 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 650 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_elcr); 651 mutex_unlock(&kvm->slots_lock); 652 653 kvm->arch.vpic = NULL; 654 kfree(vpic); 655 } 656