1 /* 2 * 8259 interrupt controller emulation 3 * 4 * Copyright (c) 2003-2004 Fabrice Bellard 5 * Copyright (c) 2007 Intel Corporation 6 * Copyright 2009 Red Hat, Inc. and/or its affiliates. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * Authors: 26 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 27 * Port from Qemu. 28 */ 29 #include <linux/mm.h> 30 #include <linux/slab.h> 31 #include <linux/bitops.h> 32 #include "irq.h" 33 34 #include <linux/kvm_host.h> 35 #include "trace.h" 36 37 static void pic_irq_request(struct kvm *kvm, int level); 38 39 static void pic_lock(struct kvm_pic *s) 40 __acquires(&s->lock) 41 { 42 spin_lock(&s->lock); 43 } 44 45 static void pic_unlock(struct kvm_pic *s) 46 __releases(&s->lock) 47 { 48 bool wakeup = s->wakeup_needed; 49 struct kvm_vcpu *vcpu, *found = NULL; 50 int i; 51 52 s->wakeup_needed = false; 53 54 spin_unlock(&s->lock); 55 56 if (wakeup) { 57 kvm_for_each_vcpu(i, vcpu, s->kvm) { 58 if (kvm_apic_accept_pic_intr(vcpu)) { 59 found = vcpu; 60 break; 61 } 62 } 63 64 if (!found) 65 found = s->kvm->bsp_vcpu; 66 67 if (!found) 68 return; 69 70 kvm_make_request(KVM_REQ_EVENT, found); 71 kvm_vcpu_kick(found); 72 } 73 } 74 75 static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 76 { 77 s->isr &= ~(1 << irq); 78 s->isr_ack |= (1 << irq); 79 if (s != &s->pics_state->pics[0]) 80 irq += 8; 81 /* 82 * We are dropping lock while calling ack notifiers since ack 83 * notifier callbacks for assigned devices call into PIC recursively. 84 * Other interrupt may be delivered to PIC while lock is dropped but 85 * it should be safe since PIC state is already updated at this stage. 86 */ 87 pic_unlock(s->pics_state); 88 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); 89 pic_lock(s->pics_state); 90 } 91 92 void kvm_pic_clear_isr_ack(struct kvm *kvm) 93 { 94 struct kvm_pic *s = pic_irqchip(kvm); 95 96 pic_lock(s); 97 s->pics[0].isr_ack = 0xff; 98 s->pics[1].isr_ack = 0xff; 99 pic_unlock(s); 100 } 101 102 /* 103 * set irq level. If an edge is detected, then the IRR is set to 1 104 */ 105 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 106 { 107 int mask, ret = 1; 108 mask = 1 << irq; 109 if (s->elcr & mask) /* level triggered */ 110 if (level) { 111 ret = !(s->irr & mask); 112 s->irr |= mask; 113 s->last_irr |= mask; 114 } else { 115 s->irr &= ~mask; 116 s->last_irr &= ~mask; 117 } 118 else /* edge triggered */ 119 if (level) { 120 if ((s->last_irr & mask) == 0) { 121 ret = !(s->irr & mask); 122 s->irr |= mask; 123 } 124 s->last_irr |= mask; 125 } else 126 s->last_irr &= ~mask; 127 128 return (s->imr & mask) ? -1 : ret; 129 } 130 131 /* 132 * return the highest priority found in mask (highest = smallest 133 * number). Return 8 if no irq 134 */ 135 static inline int get_priority(struct kvm_kpic_state *s, int mask) 136 { 137 int priority; 138 if (mask == 0) 139 return 8; 140 priority = 0; 141 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) 142 priority++; 143 return priority; 144 } 145 146 /* 147 * return the pic wanted interrupt. return -1 if none 148 */ 149 static int pic_get_irq(struct kvm_kpic_state *s) 150 { 151 int mask, cur_priority, priority; 152 153 mask = s->irr & ~s->imr; 154 priority = get_priority(s, mask); 155 if (priority == 8) 156 return -1; 157 /* 158 * compute current priority. If special fully nested mode on the 159 * master, the IRQ coming from the slave is not taken into account 160 * for the priority computation. 161 */ 162 mask = s->isr; 163 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) 164 mask &= ~(1 << 2); 165 cur_priority = get_priority(s, mask); 166 if (priority < cur_priority) 167 /* 168 * higher priority found: an irq should be generated 169 */ 170 return (priority + s->priority_add) & 7; 171 else 172 return -1; 173 } 174 175 /* 176 * raise irq to CPU if necessary. must be called every time the active 177 * irq may change 178 */ 179 static void pic_update_irq(struct kvm_pic *s) 180 { 181 int irq2, irq; 182 183 irq2 = pic_get_irq(&s->pics[1]); 184 if (irq2 >= 0) { 185 /* 186 * if irq request by slave pic, signal master PIC 187 */ 188 pic_set_irq1(&s->pics[0], 2, 1); 189 pic_set_irq1(&s->pics[0], 2, 0); 190 } 191 irq = pic_get_irq(&s->pics[0]); 192 pic_irq_request(s->kvm, irq >= 0); 193 } 194 195 void kvm_pic_update_irq(struct kvm_pic *s) 196 { 197 pic_lock(s); 198 pic_update_irq(s); 199 pic_unlock(s); 200 } 201 202 int kvm_pic_set_irq(void *opaque, int irq, int level) 203 { 204 struct kvm_pic *s = opaque; 205 int ret = -1; 206 207 pic_lock(s); 208 if (irq >= 0 && irq < PIC_NUM_PINS) { 209 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 210 pic_update_irq(s); 211 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, 212 s->pics[irq >> 3].imr, ret == 0); 213 } 214 pic_unlock(s); 215 216 return ret; 217 } 218 219 /* 220 * acknowledge interrupt 'irq' 221 */ 222 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 223 { 224 s->isr |= 1 << irq; 225 /* 226 * We don't clear a level sensitive interrupt here 227 */ 228 if (!(s->elcr & (1 << irq))) 229 s->irr &= ~(1 << irq); 230 231 if (s->auto_eoi) { 232 if (s->rotate_on_auto_eoi) 233 s->priority_add = (irq + 1) & 7; 234 pic_clear_isr(s, irq); 235 } 236 237 } 238 239 int kvm_pic_read_irq(struct kvm *kvm) 240 { 241 int irq, irq2, intno; 242 struct kvm_pic *s = pic_irqchip(kvm); 243 244 pic_lock(s); 245 irq = pic_get_irq(&s->pics[0]); 246 if (irq >= 0) { 247 pic_intack(&s->pics[0], irq); 248 if (irq == 2) { 249 irq2 = pic_get_irq(&s->pics[1]); 250 if (irq2 >= 0) 251 pic_intack(&s->pics[1], irq2); 252 else 253 /* 254 * spurious IRQ on slave controller 255 */ 256 irq2 = 7; 257 intno = s->pics[1].irq_base + irq2; 258 irq = irq2 + 8; 259 } else 260 intno = s->pics[0].irq_base + irq; 261 } else { 262 /* 263 * spurious IRQ on host controller 264 */ 265 irq = 7; 266 intno = s->pics[0].irq_base + irq; 267 } 268 pic_update_irq(s); 269 pic_unlock(s); 270 271 return intno; 272 } 273 274 void kvm_pic_reset(struct kvm_kpic_state *s) 275 { 276 int irq; 277 struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu; 278 u8 irr = s->irr, isr = s->imr; 279 280 s->last_irr = 0; 281 s->irr = 0; 282 s->imr = 0; 283 s->isr = 0; 284 s->isr_ack = 0xff; 285 s->priority_add = 0; 286 s->irq_base = 0; 287 s->read_reg_select = 0; 288 s->poll = 0; 289 s->special_mask = 0; 290 s->init_state = 0; 291 s->auto_eoi = 0; 292 s->rotate_on_auto_eoi = 0; 293 s->special_fully_nested_mode = 0; 294 s->init4 = 0; 295 296 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { 297 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 298 if (irr & (1 << irq) || isr & (1 << irq)) { 299 pic_clear_isr(s, irq); 300 } 301 } 302 } 303 304 static void pic_ioport_write(void *opaque, u32 addr, u32 val) 305 { 306 struct kvm_kpic_state *s = opaque; 307 int priority, cmd, irq; 308 309 addr &= 1; 310 if (addr == 0) { 311 if (val & 0x10) { 312 s->init4 = val & 1; 313 s->last_irr = 0; 314 s->imr = 0; 315 s->priority_add = 0; 316 s->special_mask = 0; 317 s->read_reg_select = 0; 318 if (!s->init4) { 319 s->special_fully_nested_mode = 0; 320 s->auto_eoi = 0; 321 } 322 s->init_state = 1; 323 if (val & 0x02) 324 printk(KERN_ERR "single mode not supported"); 325 if (val & 0x08) 326 printk(KERN_ERR 327 "level sensitive irq not supported"); 328 } else if (val & 0x08) { 329 if (val & 0x04) 330 s->poll = 1; 331 if (val & 0x02) 332 s->read_reg_select = val & 1; 333 if (val & 0x40) 334 s->special_mask = (val >> 5) & 1; 335 } else { 336 cmd = val >> 5; 337 switch (cmd) { 338 case 0: 339 case 4: 340 s->rotate_on_auto_eoi = cmd >> 2; 341 break; 342 case 1: /* end of interrupt */ 343 case 5: 344 priority = get_priority(s, s->isr); 345 if (priority != 8) { 346 irq = (priority + s->priority_add) & 7; 347 if (cmd == 5) 348 s->priority_add = (irq + 1) & 7; 349 pic_clear_isr(s, irq); 350 pic_update_irq(s->pics_state); 351 } 352 break; 353 case 3: 354 irq = val & 7; 355 pic_clear_isr(s, irq); 356 pic_update_irq(s->pics_state); 357 break; 358 case 6: 359 s->priority_add = (val + 1) & 7; 360 pic_update_irq(s->pics_state); 361 break; 362 case 7: 363 irq = val & 7; 364 s->priority_add = (irq + 1) & 7; 365 pic_clear_isr(s, irq); 366 pic_update_irq(s->pics_state); 367 break; 368 default: 369 break; /* no operation */ 370 } 371 } 372 } else 373 switch (s->init_state) { 374 case 0: { /* normal mode */ 375 u8 imr_diff = s->imr ^ val, 376 off = (s == &s->pics_state->pics[0]) ? 0 : 8; 377 s->imr = val; 378 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) 379 if (imr_diff & (1 << irq)) 380 kvm_fire_mask_notifiers( 381 s->pics_state->kvm, 382 SELECT_PIC(irq + off), 383 irq + off, 384 !!(s->imr & (1 << irq))); 385 pic_update_irq(s->pics_state); 386 break; 387 } 388 case 1: 389 s->irq_base = val & 0xf8; 390 s->init_state = 2; 391 break; 392 case 2: 393 if (s->init4) 394 s->init_state = 3; 395 else 396 s->init_state = 0; 397 break; 398 case 3: 399 s->special_fully_nested_mode = (val >> 4) & 1; 400 s->auto_eoi = (val >> 1) & 1; 401 s->init_state = 0; 402 break; 403 } 404 } 405 406 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) 407 { 408 int ret; 409 410 ret = pic_get_irq(s); 411 if (ret >= 0) { 412 if (addr1 >> 7) { 413 s->pics_state->pics[0].isr &= ~(1 << 2); 414 s->pics_state->pics[0].irr &= ~(1 << 2); 415 } 416 s->irr &= ~(1 << ret); 417 pic_clear_isr(s, ret); 418 if (addr1 >> 7 || ret != 2) 419 pic_update_irq(s->pics_state); 420 } else { 421 ret = 0x07; 422 pic_update_irq(s->pics_state); 423 } 424 425 return ret; 426 } 427 428 static u32 pic_ioport_read(void *opaque, u32 addr1) 429 { 430 struct kvm_kpic_state *s = opaque; 431 unsigned int addr; 432 int ret; 433 434 addr = addr1; 435 addr &= 1; 436 if (s->poll) { 437 ret = pic_poll_read(s, addr1); 438 s->poll = 0; 439 } else 440 if (addr == 0) 441 if (s->read_reg_select) 442 ret = s->isr; 443 else 444 ret = s->irr; 445 else 446 ret = s->imr; 447 return ret; 448 } 449 450 static void elcr_ioport_write(void *opaque, u32 addr, u32 val) 451 { 452 struct kvm_kpic_state *s = opaque; 453 s->elcr = val & s->elcr_mask; 454 } 455 456 static u32 elcr_ioport_read(void *opaque, u32 addr1) 457 { 458 struct kvm_kpic_state *s = opaque; 459 return s->elcr; 460 } 461 462 static int picdev_in_range(gpa_t addr) 463 { 464 switch (addr) { 465 case 0x20: 466 case 0x21: 467 case 0xa0: 468 case 0xa1: 469 case 0x4d0: 470 case 0x4d1: 471 return 1; 472 default: 473 return 0; 474 } 475 } 476 477 static inline struct kvm_pic *to_pic(struct kvm_io_device *dev) 478 { 479 return container_of(dev, struct kvm_pic, dev); 480 } 481 482 static int picdev_write(struct kvm_io_device *this, 483 gpa_t addr, int len, const void *val) 484 { 485 struct kvm_pic *s = to_pic(this); 486 unsigned char data = *(unsigned char *)val; 487 if (!picdev_in_range(addr)) 488 return -EOPNOTSUPP; 489 490 if (len != 1) { 491 if (printk_ratelimit()) 492 printk(KERN_ERR "PIC: non byte write\n"); 493 return 0; 494 } 495 pic_lock(s); 496 switch (addr) { 497 case 0x20: 498 case 0x21: 499 case 0xa0: 500 case 0xa1: 501 pic_ioport_write(&s->pics[addr >> 7], addr, data); 502 break; 503 case 0x4d0: 504 case 0x4d1: 505 elcr_ioport_write(&s->pics[addr & 1], addr, data); 506 break; 507 } 508 pic_unlock(s); 509 return 0; 510 } 511 512 static int picdev_read(struct kvm_io_device *this, 513 gpa_t addr, int len, void *val) 514 { 515 struct kvm_pic *s = to_pic(this); 516 unsigned char data = 0; 517 if (!picdev_in_range(addr)) 518 return -EOPNOTSUPP; 519 520 if (len != 1) { 521 if (printk_ratelimit()) 522 printk(KERN_ERR "PIC: non byte read\n"); 523 return 0; 524 } 525 pic_lock(s); 526 switch (addr) { 527 case 0x20: 528 case 0x21: 529 case 0xa0: 530 case 0xa1: 531 data = pic_ioport_read(&s->pics[addr >> 7], addr); 532 break; 533 case 0x4d0: 534 case 0x4d1: 535 data = elcr_ioport_read(&s->pics[addr & 1], addr); 536 break; 537 } 538 *(unsigned char *)val = data; 539 pic_unlock(s); 540 return 0; 541 } 542 543 /* 544 * callback when PIC0 irq status changed 545 */ 546 static void pic_irq_request(struct kvm *kvm, int level) 547 { 548 struct kvm_vcpu *vcpu = kvm->bsp_vcpu; 549 struct kvm_pic *s = pic_irqchip(kvm); 550 int irq = pic_get_irq(&s->pics[0]); 551 552 s->output = level; 553 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 554 s->pics[0].isr_ack &= ~(1 << irq); 555 s->wakeup_needed = true; 556 } 557 } 558 559 static const struct kvm_io_device_ops picdev_ops = { 560 .read = picdev_read, 561 .write = picdev_write, 562 }; 563 564 struct kvm_pic *kvm_create_pic(struct kvm *kvm) 565 { 566 struct kvm_pic *s; 567 int ret; 568 569 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 570 if (!s) 571 return NULL; 572 spin_lock_init(&s->lock); 573 s->kvm = kvm; 574 s->pics[0].elcr_mask = 0xf8; 575 s->pics[1].elcr_mask = 0xde; 576 s->pics[0].pics_state = s; 577 s->pics[1].pics_state = s; 578 s->pics[0].isr_ack = 0xff; 579 s->pics[1].isr_ack = 0xff; 580 581 /* 582 * Initialize PIO device 583 */ 584 kvm_iodevice_init(&s->dev, &picdev_ops); 585 mutex_lock(&kvm->slots_lock); 586 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); 587 mutex_unlock(&kvm->slots_lock); 588 if (ret < 0) { 589 kfree(s); 590 return NULL; 591 } 592 593 return s; 594 } 595 596 void kvm_destroy_pic(struct kvm *kvm) 597 { 598 struct kvm_pic *vpic = kvm->arch.vpic; 599 600 if (vpic) { 601 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); 602 kvm->arch.vpic = NULL; 603 kfree(vpic); 604 } 605 } 606