1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/msidef.h> 58 #include <asm/hypertransport.h> 59 #include <asm/setup.h> 60 #include <asm/irq_remapping.h> 61 #include <asm/hpet.h> 62 #include <asm/hw_irq.h> 63 64 #include <asm/apic.h> 65 66 #define __apicdebuginit(type) static type __init 67 #define for_each_irq_pin(entry, head) \ 68 for (entry = head; entry; entry = entry->next) 69 70 /* 71 * Is the SiS APIC rmw bug present ? 72 * -1 = don't know, 0 = no, 1 = yes 73 */ 74 int sis_apic_bug = -1; 75 76 static DEFINE_RAW_SPINLOCK(ioapic_lock); 77 static DEFINE_RAW_SPINLOCK(vector_lock); 78 79 static struct ioapic { 80 /* 81 * # of IRQ routing registers 82 */ 83 int nr_registers; 84 /* 85 * Saved state during suspend/resume, or while enabling intr-remap. 86 */ 87 struct IO_APIC_route_entry *saved_registers; 88 /* I/O APIC config */ 89 struct mpc_ioapic mp_config; 90 /* IO APIC gsi routing info */ 91 struct mp_ioapic_gsi gsi_config; 92 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 93 } ioapics[MAX_IO_APICS]; 94 95 #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver 96 97 int mpc_ioapic_id(int ioapic_idx) 98 { 99 return ioapics[ioapic_idx].mp_config.apicid; 100 } 101 102 unsigned int mpc_ioapic_addr(int ioapic_idx) 103 { 104 return ioapics[ioapic_idx].mp_config.apicaddr; 105 } 106 107 struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx) 108 { 109 return &ioapics[ioapic_idx].gsi_config; 110 } 111 112 int nr_ioapics; 113 114 /* The one past the highest gsi number used */ 115 u32 gsi_top; 116 117 /* MP IRQ source entries */ 118 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 119 120 /* # of MP IRQ source entries */ 121 int mp_irq_entries; 122 123 /* GSI interrupts */ 124 static int nr_irqs_gsi = NR_IRQS_LEGACY; 125 126 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 127 int mp_bus_id_to_type[MAX_MP_BUSSES]; 128 #endif 129 130 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 131 132 int skip_ioapic_setup; 133 134 /** 135 * disable_ioapic_support() - disables ioapic support at runtime 136 */ 137 void disable_ioapic_support(void) 138 { 139 #ifdef CONFIG_PCI 140 noioapicquirk = 1; 141 noioapicreroute = -1; 142 #endif 143 skip_ioapic_setup = 1; 144 } 145 146 static int __init parse_noapic(char *str) 147 { 148 /* disable IO-APIC */ 149 disable_ioapic_support(); 150 return 0; 151 } 152 early_param("noapic", parse_noapic); 153 154 static int io_apic_setup_irq_pin(unsigned int irq, int node, 155 struct io_apic_irq_attr *attr); 156 157 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ 158 void mp_save_irq(struct mpc_intsrc *m) 159 { 160 int i; 161 162 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," 163 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 164 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, 165 m->srcbusirq, m->dstapic, m->dstirq); 166 167 for (i = 0; i < mp_irq_entries; i++) { 168 if (!memcmp(&mp_irqs[i], m, sizeof(*m))) 169 return; 170 } 171 172 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m)); 173 if (++mp_irq_entries == MAX_IRQ_SOURCES) 174 panic("Max # of irq sources exceeded!!\n"); 175 } 176 177 struct irq_pin_list { 178 int apic, pin; 179 struct irq_pin_list *next; 180 }; 181 182 static struct irq_pin_list *alloc_irq_pin_list(int node) 183 { 184 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 185 } 186 187 188 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 189 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 190 191 int __init arch_early_irq_init(void) 192 { 193 struct irq_cfg *cfg; 194 int count, node, i; 195 196 if (!legacy_pic->nr_legacy_irqs) { 197 nr_irqs_gsi = 0; 198 io_apic_irqs = ~0UL; 199 } 200 201 for (i = 0; i < nr_ioapics; i++) { 202 ioapics[i].saved_registers = 203 kzalloc(sizeof(struct IO_APIC_route_entry) * 204 ioapics[i].nr_registers, GFP_KERNEL); 205 if (!ioapics[i].saved_registers) 206 pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 207 } 208 209 cfg = irq_cfgx; 210 count = ARRAY_SIZE(irq_cfgx); 211 node = cpu_to_node(0); 212 213 /* Make sure the legacy interrupts are marked in the bitmap */ 214 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 215 216 for (i = 0; i < count; i++) { 217 irq_set_chip_data(i, &cfg[i]); 218 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 219 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 220 /* 221 * For legacy IRQ's, start with assigning irq0 to irq15 to 222 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 223 */ 224 if (i < legacy_pic->nr_legacy_irqs) { 225 cfg[i].vector = IRQ0_VECTOR + i; 226 cpumask_set_cpu(0, cfg[i].domain); 227 } 228 } 229 230 return 0; 231 } 232 233 static struct irq_cfg *irq_cfg(unsigned int irq) 234 { 235 return irq_get_chip_data(irq); 236 } 237 238 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 239 { 240 struct irq_cfg *cfg; 241 242 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 243 if (!cfg) 244 return NULL; 245 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 246 goto out_cfg; 247 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 248 goto out_domain; 249 return cfg; 250 out_domain: 251 free_cpumask_var(cfg->domain); 252 out_cfg: 253 kfree(cfg); 254 return NULL; 255 } 256 257 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 258 { 259 if (!cfg) 260 return; 261 irq_set_chip_data(at, NULL); 262 free_cpumask_var(cfg->domain); 263 free_cpumask_var(cfg->old_domain); 264 kfree(cfg); 265 } 266 267 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 268 { 269 int res = irq_alloc_desc_at(at, node); 270 struct irq_cfg *cfg; 271 272 if (res < 0) { 273 if (res != -EEXIST) 274 return NULL; 275 cfg = irq_get_chip_data(at); 276 if (cfg) 277 return cfg; 278 } 279 280 cfg = alloc_irq_cfg(at, node); 281 if (cfg) 282 irq_set_chip_data(at, cfg); 283 else 284 irq_free_desc(at); 285 return cfg; 286 } 287 288 static int alloc_irq_from(unsigned int from, int node) 289 { 290 return irq_alloc_desc_from(from, node); 291 } 292 293 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 294 { 295 free_irq_cfg(at, cfg); 296 irq_free_desc(at); 297 } 298 299 struct io_apic { 300 unsigned int index; 301 unsigned int unused[3]; 302 unsigned int data; 303 unsigned int unused2[11]; 304 unsigned int eoi; 305 }; 306 307 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 308 { 309 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 310 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 311 } 312 313 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 314 { 315 struct io_apic __iomem *io_apic = io_apic_base(apic); 316 writel(vector, &io_apic->eoi); 317 } 318 319 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 320 { 321 struct io_apic __iomem *io_apic = io_apic_base(apic); 322 writel(reg, &io_apic->index); 323 return readl(&io_apic->data); 324 } 325 326 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 327 { 328 struct io_apic __iomem *io_apic = io_apic_base(apic); 329 writel(reg, &io_apic->index); 330 writel(value, &io_apic->data); 331 } 332 333 /* 334 * Re-write a value: to be used for read-modify-write 335 * cycles where the read already set up the index register. 336 * 337 * Older SiS APIC requires we rewrite the index register 338 */ 339 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 340 { 341 struct io_apic __iomem *io_apic = io_apic_base(apic); 342 343 if (sis_apic_bug) 344 writel(reg, &io_apic->index); 345 writel(value, &io_apic->data); 346 } 347 348 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 349 { 350 struct irq_pin_list *entry; 351 unsigned long flags; 352 353 raw_spin_lock_irqsave(&ioapic_lock, flags); 354 for_each_irq_pin(entry, cfg->irq_2_pin) { 355 unsigned int reg; 356 int pin; 357 358 pin = entry->pin; 359 reg = io_apic_read(entry->apic, 0x10 + pin*2); 360 /* Is the remote IRR bit set? */ 361 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 362 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 363 return true; 364 } 365 } 366 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 367 368 return false; 369 } 370 371 union entry_union { 372 struct { u32 w1, w2; }; 373 struct IO_APIC_route_entry entry; 374 }; 375 376 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin) 377 { 378 union entry_union eu; 379 380 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 381 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 382 return eu.entry; 383 } 384 385 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 386 { 387 union entry_union eu; 388 unsigned long flags; 389 raw_spin_lock_irqsave(&ioapic_lock, flags); 390 eu.entry = __ioapic_read_entry(apic, pin); 391 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 392 return eu.entry; 393 } 394 395 /* 396 * When we write a new IO APIC routing entry, we need to write the high 397 * word first! If the mask bit in the low word is clear, we will enable 398 * the interrupt, and we need to make sure the entry is fully populated 399 * before that happens. 400 */ 401 static void 402 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 403 { 404 union entry_union eu = {{0, 0}}; 405 406 eu.entry = e; 407 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 408 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 409 } 410 411 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 412 { 413 unsigned long flags; 414 raw_spin_lock_irqsave(&ioapic_lock, flags); 415 __ioapic_write_entry(apic, pin, e); 416 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 417 } 418 419 /* 420 * When we mask an IO APIC routing entry, we need to write the low 421 * word first, in order to set the mask bit before we change the 422 * high bits! 423 */ 424 static void ioapic_mask_entry(int apic, int pin) 425 { 426 unsigned long flags; 427 union entry_union eu = { .entry.mask = 1 }; 428 429 raw_spin_lock_irqsave(&ioapic_lock, flags); 430 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 431 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 432 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 433 } 434 435 /* 436 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 437 * shared ISA-space IRQs, so we have to support them. We are super 438 * fast in the common case, and fast for shared ISA-space IRQs. 439 */ 440 static int 441 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 442 { 443 struct irq_pin_list **last, *entry; 444 445 /* don't allow duplicates */ 446 last = &cfg->irq_2_pin; 447 for_each_irq_pin(entry, cfg->irq_2_pin) { 448 if (entry->apic == apic && entry->pin == pin) 449 return 0; 450 last = &entry->next; 451 } 452 453 entry = alloc_irq_pin_list(node); 454 if (!entry) { 455 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 456 node, apic, pin); 457 return -ENOMEM; 458 } 459 entry->apic = apic; 460 entry->pin = pin; 461 462 *last = entry; 463 return 0; 464 } 465 466 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 467 { 468 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 469 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 470 } 471 472 /* 473 * Reroute an IRQ to a different pin. 474 */ 475 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 476 int oldapic, int oldpin, 477 int newapic, int newpin) 478 { 479 struct irq_pin_list *entry; 480 481 for_each_irq_pin(entry, cfg->irq_2_pin) { 482 if (entry->apic == oldapic && entry->pin == oldpin) { 483 entry->apic = newapic; 484 entry->pin = newpin; 485 /* every one is different, right? */ 486 return; 487 } 488 } 489 490 /* old apic/pin didn't exist, so just add new ones */ 491 add_pin_to_irq_node(cfg, node, newapic, newpin); 492 } 493 494 static void __io_apic_modify_irq(struct irq_pin_list *entry, 495 int mask_and, int mask_or, 496 void (*final)(struct irq_pin_list *entry)) 497 { 498 unsigned int reg, pin; 499 500 pin = entry->pin; 501 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 502 reg &= mask_and; 503 reg |= mask_or; 504 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 505 if (final) 506 final(entry); 507 } 508 509 static void io_apic_modify_irq(struct irq_cfg *cfg, 510 int mask_and, int mask_or, 511 void (*final)(struct irq_pin_list *entry)) 512 { 513 struct irq_pin_list *entry; 514 515 for_each_irq_pin(entry, cfg->irq_2_pin) 516 __io_apic_modify_irq(entry, mask_and, mask_or, final); 517 } 518 519 static void io_apic_sync(struct irq_pin_list *entry) 520 { 521 /* 522 * Synchronize the IO-APIC and the CPU by doing 523 * a dummy read from the IO-APIC 524 */ 525 struct io_apic __iomem *io_apic; 526 io_apic = io_apic_base(entry->apic); 527 readl(&io_apic->data); 528 } 529 530 static void mask_ioapic(struct irq_cfg *cfg) 531 { 532 unsigned long flags; 533 534 raw_spin_lock_irqsave(&ioapic_lock, flags); 535 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 536 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 537 } 538 539 static void mask_ioapic_irq(struct irq_data *data) 540 { 541 mask_ioapic(data->chip_data); 542 } 543 544 static void __unmask_ioapic(struct irq_cfg *cfg) 545 { 546 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 547 } 548 549 static void unmask_ioapic(struct irq_cfg *cfg) 550 { 551 unsigned long flags; 552 553 raw_spin_lock_irqsave(&ioapic_lock, flags); 554 __unmask_ioapic(cfg); 555 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 556 } 557 558 static void unmask_ioapic_irq(struct irq_data *data) 559 { 560 unmask_ioapic(data->chip_data); 561 } 562 563 /* 564 * IO-APIC versions below 0x20 don't support EOI register. 565 * For the record, here is the information about various versions: 566 * 0Xh 82489DX 567 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 568 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 569 * 30h-FFh Reserved 570 * 571 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 572 * version as 0x2. This is an error with documentation and these ICH chips 573 * use io-apic's of version 0x20. 574 * 575 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 576 * Otherwise, we simulate the EOI message manually by changing the trigger 577 * mode to edge and then back to level, with RTE being masked during this. 578 */ 579 static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) 580 { 581 if (mpc_ioapic_ver(apic) >= 0x20) { 582 /* 583 * Intr-remapping uses pin number as the virtual vector 584 * in the RTE. Actual vector is programmed in 585 * intr-remapping table entry. Hence for the io-apic 586 * EOI we use the pin number. 587 */ 588 if (cfg && irq_remapped(cfg)) 589 io_apic_eoi(apic, pin); 590 else 591 io_apic_eoi(apic, vector); 592 } else { 593 struct IO_APIC_route_entry entry, entry1; 594 595 entry = entry1 = __ioapic_read_entry(apic, pin); 596 597 /* 598 * Mask the entry and change the trigger mode to edge. 599 */ 600 entry1.mask = 1; 601 entry1.trigger = IOAPIC_EDGE; 602 603 __ioapic_write_entry(apic, pin, entry1); 604 605 /* 606 * Restore the previous level triggered entry. 607 */ 608 __ioapic_write_entry(apic, pin, entry); 609 } 610 } 611 612 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 613 { 614 struct irq_pin_list *entry; 615 unsigned long flags; 616 617 raw_spin_lock_irqsave(&ioapic_lock, flags); 618 for_each_irq_pin(entry, cfg->irq_2_pin) 619 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); 620 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 621 } 622 623 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 624 { 625 struct IO_APIC_route_entry entry; 626 627 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 628 entry = ioapic_read_entry(apic, pin); 629 if (entry.delivery_mode == dest_SMI) 630 return; 631 632 /* 633 * Make sure the entry is masked and re-read the contents to check 634 * if it is a level triggered pin and if the remote-IRR is set. 635 */ 636 if (!entry.mask) { 637 entry.mask = 1; 638 ioapic_write_entry(apic, pin, entry); 639 entry = ioapic_read_entry(apic, pin); 640 } 641 642 if (entry.irr) { 643 unsigned long flags; 644 645 /* 646 * Make sure the trigger mode is set to level. Explicit EOI 647 * doesn't clear the remote-IRR if the trigger mode is not 648 * set to level. 649 */ 650 if (!entry.trigger) { 651 entry.trigger = IOAPIC_LEVEL; 652 ioapic_write_entry(apic, pin, entry); 653 } 654 655 raw_spin_lock_irqsave(&ioapic_lock, flags); 656 __eoi_ioapic_pin(apic, pin, entry.vector, NULL); 657 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 658 } 659 660 /* 661 * Clear the rest of the bits in the IO-APIC RTE except for the mask 662 * bit. 663 */ 664 ioapic_mask_entry(apic, pin); 665 entry = ioapic_read_entry(apic, pin); 666 if (entry.irr) 667 printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n", 668 mpc_ioapic_id(apic), pin); 669 } 670 671 static void clear_IO_APIC (void) 672 { 673 int apic, pin; 674 675 for (apic = 0; apic < nr_ioapics; apic++) 676 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 677 clear_IO_APIC_pin(apic, pin); 678 } 679 680 #ifdef CONFIG_X86_32 681 /* 682 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 683 * specific CPU-side IRQs. 684 */ 685 686 #define MAX_PIRQS 8 687 static int pirq_entries[MAX_PIRQS] = { 688 [0 ... MAX_PIRQS - 1] = -1 689 }; 690 691 static int __init ioapic_pirq_setup(char *str) 692 { 693 int i, max; 694 int ints[MAX_PIRQS+1]; 695 696 get_options(str, ARRAY_SIZE(ints), ints); 697 698 apic_printk(APIC_VERBOSE, KERN_INFO 699 "PIRQ redirection, working around broken MP-BIOS.\n"); 700 max = MAX_PIRQS; 701 if (ints[0] < MAX_PIRQS) 702 max = ints[0]; 703 704 for (i = 0; i < max; i++) { 705 apic_printk(APIC_VERBOSE, KERN_DEBUG 706 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 707 /* 708 * PIRQs are mapped upside down, usually. 709 */ 710 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 711 } 712 return 1; 713 } 714 715 __setup("pirq=", ioapic_pirq_setup); 716 #endif /* CONFIG_X86_32 */ 717 718 /* 719 * Saves all the IO-APIC RTE's 720 */ 721 int save_ioapic_entries(void) 722 { 723 int apic, pin; 724 int err = 0; 725 726 for (apic = 0; apic < nr_ioapics; apic++) { 727 if (!ioapics[apic].saved_registers) { 728 err = -ENOMEM; 729 continue; 730 } 731 732 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 733 ioapics[apic].saved_registers[pin] = 734 ioapic_read_entry(apic, pin); 735 } 736 737 return err; 738 } 739 740 /* 741 * Mask all IO APIC entries. 742 */ 743 void mask_ioapic_entries(void) 744 { 745 int apic, pin; 746 747 for (apic = 0; apic < nr_ioapics; apic++) { 748 if (!ioapics[apic].saved_registers) 749 continue; 750 751 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 752 struct IO_APIC_route_entry entry; 753 754 entry = ioapics[apic].saved_registers[pin]; 755 if (!entry.mask) { 756 entry.mask = 1; 757 ioapic_write_entry(apic, pin, entry); 758 } 759 } 760 } 761 } 762 763 /* 764 * Restore IO APIC entries which was saved in the ioapic structure. 765 */ 766 int restore_ioapic_entries(void) 767 { 768 int apic, pin; 769 770 for (apic = 0; apic < nr_ioapics; apic++) { 771 if (!ioapics[apic].saved_registers) 772 continue; 773 774 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) 775 ioapic_write_entry(apic, pin, 776 ioapics[apic].saved_registers[pin]); 777 } 778 return 0; 779 } 780 781 /* 782 * Find the IRQ entry number of a certain pin. 783 */ 784 static int find_irq_entry(int ioapic_idx, int pin, int type) 785 { 786 int i; 787 788 for (i = 0; i < mp_irq_entries; i++) 789 if (mp_irqs[i].irqtype == type && 790 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) || 791 mp_irqs[i].dstapic == MP_APIC_ALL) && 792 mp_irqs[i].dstirq == pin) 793 return i; 794 795 return -1; 796 } 797 798 /* 799 * Find the pin to which IRQ[irq] (ISA) is connected 800 */ 801 static int __init find_isa_irq_pin(int irq, int type) 802 { 803 int i; 804 805 for (i = 0; i < mp_irq_entries; i++) { 806 int lbus = mp_irqs[i].srcbus; 807 808 if (test_bit(lbus, mp_bus_not_pci) && 809 (mp_irqs[i].irqtype == type) && 810 (mp_irqs[i].srcbusirq == irq)) 811 812 return mp_irqs[i].dstirq; 813 } 814 return -1; 815 } 816 817 static int __init find_isa_irq_apic(int irq, int type) 818 { 819 int i; 820 821 for (i = 0; i < mp_irq_entries; i++) { 822 int lbus = mp_irqs[i].srcbus; 823 824 if (test_bit(lbus, mp_bus_not_pci) && 825 (mp_irqs[i].irqtype == type) && 826 (mp_irqs[i].srcbusirq == irq)) 827 break; 828 } 829 830 if (i < mp_irq_entries) { 831 int ioapic_idx; 832 833 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 834 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic) 835 return ioapic_idx; 836 } 837 838 return -1; 839 } 840 841 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 842 /* 843 * EISA Edge/Level control register, ELCR 844 */ 845 static int EISA_ELCR(unsigned int irq) 846 { 847 if (irq < legacy_pic->nr_legacy_irqs) { 848 unsigned int port = 0x4d0 + (irq >> 3); 849 return (inb(port) >> (irq & 7)) & 1; 850 } 851 apic_printk(APIC_VERBOSE, KERN_INFO 852 "Broken MPtable reports ISA irq %d\n", irq); 853 return 0; 854 } 855 856 #endif 857 858 /* ISA interrupts are always polarity zero edge triggered, 859 * when listed as conforming in the MP table. */ 860 861 #define default_ISA_trigger(idx) (0) 862 #define default_ISA_polarity(idx) (0) 863 864 /* EISA interrupts are always polarity zero and can be edge or level 865 * trigger depending on the ELCR value. If an interrupt is listed as 866 * EISA conforming in the MP table, that means its trigger type must 867 * be read in from the ELCR */ 868 869 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 870 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 871 872 /* PCI interrupts are always polarity one level triggered, 873 * when listed as conforming in the MP table. */ 874 875 #define default_PCI_trigger(idx) (1) 876 #define default_PCI_polarity(idx) (1) 877 878 /* MCA interrupts are always polarity zero level triggered, 879 * when listed as conforming in the MP table. */ 880 881 #define default_MCA_trigger(idx) (1) 882 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 883 884 static int irq_polarity(int idx) 885 { 886 int bus = mp_irqs[idx].srcbus; 887 int polarity; 888 889 /* 890 * Determine IRQ line polarity (high active or low active): 891 */ 892 switch (mp_irqs[idx].irqflag & 3) 893 { 894 case 0: /* conforms, ie. bus-type dependent polarity */ 895 if (test_bit(bus, mp_bus_not_pci)) 896 polarity = default_ISA_polarity(idx); 897 else 898 polarity = default_PCI_polarity(idx); 899 break; 900 case 1: /* high active */ 901 { 902 polarity = 0; 903 break; 904 } 905 case 2: /* reserved */ 906 { 907 printk(KERN_WARNING "broken BIOS!!\n"); 908 polarity = 1; 909 break; 910 } 911 case 3: /* low active */ 912 { 913 polarity = 1; 914 break; 915 } 916 default: /* invalid */ 917 { 918 printk(KERN_WARNING "broken BIOS!!\n"); 919 polarity = 1; 920 break; 921 } 922 } 923 return polarity; 924 } 925 926 static int irq_trigger(int idx) 927 { 928 int bus = mp_irqs[idx].srcbus; 929 int trigger; 930 931 /* 932 * Determine IRQ trigger mode (edge or level sensitive): 933 */ 934 switch ((mp_irqs[idx].irqflag>>2) & 3) 935 { 936 case 0: /* conforms, ie. bus-type dependent */ 937 if (test_bit(bus, mp_bus_not_pci)) 938 trigger = default_ISA_trigger(idx); 939 else 940 trigger = default_PCI_trigger(idx); 941 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 942 switch (mp_bus_id_to_type[bus]) { 943 case MP_BUS_ISA: /* ISA pin */ 944 { 945 /* set before the switch */ 946 break; 947 } 948 case MP_BUS_EISA: /* EISA pin */ 949 { 950 trigger = default_EISA_trigger(idx); 951 break; 952 } 953 case MP_BUS_PCI: /* PCI pin */ 954 { 955 /* set before the switch */ 956 break; 957 } 958 case MP_BUS_MCA: /* MCA pin */ 959 { 960 trigger = default_MCA_trigger(idx); 961 break; 962 } 963 default: 964 { 965 printk(KERN_WARNING "broken BIOS!!\n"); 966 trigger = 1; 967 break; 968 } 969 } 970 #endif 971 break; 972 case 1: /* edge */ 973 { 974 trigger = 0; 975 break; 976 } 977 case 2: /* reserved */ 978 { 979 printk(KERN_WARNING "broken BIOS!!\n"); 980 trigger = 1; 981 break; 982 } 983 case 3: /* level */ 984 { 985 trigger = 1; 986 break; 987 } 988 default: /* invalid */ 989 { 990 printk(KERN_WARNING "broken BIOS!!\n"); 991 trigger = 0; 992 break; 993 } 994 } 995 return trigger; 996 } 997 998 static int pin_2_irq(int idx, int apic, int pin) 999 { 1000 int irq; 1001 int bus = mp_irqs[idx].srcbus; 1002 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic); 1003 1004 /* 1005 * Debugging check, we are in big trouble if this message pops up! 1006 */ 1007 if (mp_irqs[idx].dstirq != pin) 1008 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 1009 1010 if (test_bit(bus, mp_bus_not_pci)) { 1011 irq = mp_irqs[idx].srcbusirq; 1012 } else { 1013 u32 gsi = gsi_cfg->gsi_base + pin; 1014 1015 if (gsi >= NR_IRQS_LEGACY) 1016 irq = gsi; 1017 else 1018 irq = gsi_top + gsi; 1019 } 1020 1021 #ifdef CONFIG_X86_32 1022 /* 1023 * PCI IRQ command line redirection. Yes, limits are hardcoded. 1024 */ 1025 if ((pin >= 16) && (pin <= 23)) { 1026 if (pirq_entries[pin-16] != -1) { 1027 if (!pirq_entries[pin-16]) { 1028 apic_printk(APIC_VERBOSE, KERN_DEBUG 1029 "disabling PIRQ%d\n", pin-16); 1030 } else { 1031 irq = pirq_entries[pin-16]; 1032 apic_printk(APIC_VERBOSE, KERN_DEBUG 1033 "using PIRQ%d -> IRQ %d\n", 1034 pin-16, irq); 1035 } 1036 } 1037 } 1038 #endif 1039 1040 return irq; 1041 } 1042 1043 /* 1044 * Find a specific PCI IRQ entry. 1045 * Not an __init, possibly needed by modules 1046 */ 1047 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 1048 struct io_apic_irq_attr *irq_attr) 1049 { 1050 int ioapic_idx, i, best_guess = -1; 1051 1052 apic_printk(APIC_DEBUG, 1053 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 1054 bus, slot, pin); 1055 if (test_bit(bus, mp_bus_not_pci)) { 1056 apic_printk(APIC_VERBOSE, 1057 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 1058 return -1; 1059 } 1060 for (i = 0; i < mp_irq_entries; i++) { 1061 int lbus = mp_irqs[i].srcbus; 1062 1063 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1064 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic || 1065 mp_irqs[i].dstapic == MP_APIC_ALL) 1066 break; 1067 1068 if (!test_bit(lbus, mp_bus_not_pci) && 1069 !mp_irqs[i].irqtype && 1070 (bus == lbus) && 1071 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 1072 int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq); 1073 1074 if (!(ioapic_idx || IO_APIC_IRQ(irq))) 1075 continue; 1076 1077 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1078 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1079 mp_irqs[i].dstirq, 1080 irq_trigger(i), 1081 irq_polarity(i)); 1082 return irq; 1083 } 1084 /* 1085 * Use the first all-but-pin matching entry as a 1086 * best-guess fuzzy result for broken mptables. 1087 */ 1088 if (best_guess < 0) { 1089 set_io_apic_irq_attr(irq_attr, ioapic_idx, 1090 mp_irqs[i].dstirq, 1091 irq_trigger(i), 1092 irq_polarity(i)); 1093 best_guess = irq; 1094 } 1095 } 1096 } 1097 return best_guess; 1098 } 1099 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1100 1101 void lock_vector_lock(void) 1102 { 1103 /* Used to the online set of cpus does not change 1104 * during assign_irq_vector. 1105 */ 1106 raw_spin_lock(&vector_lock); 1107 } 1108 1109 void unlock_vector_lock(void) 1110 { 1111 raw_spin_unlock(&vector_lock); 1112 } 1113 1114 static int 1115 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1116 { 1117 /* 1118 * NOTE! The local APIC isn't very good at handling 1119 * multiple interrupts at the same interrupt level. 1120 * As the interrupt level is determined by taking the 1121 * vector number and shifting that right by 4, we 1122 * want to spread these out a bit so that they don't 1123 * all fall in the same interrupt level. 1124 * 1125 * Also, we've got to be careful not to trash gate 1126 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1127 */ 1128 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1129 static int current_offset = VECTOR_OFFSET_START % 8; 1130 unsigned int old_vector; 1131 int cpu, err; 1132 cpumask_var_t tmp_mask; 1133 1134 if (cfg->move_in_progress) 1135 return -EBUSY; 1136 1137 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1138 return -ENOMEM; 1139 1140 old_vector = cfg->vector; 1141 if (old_vector) { 1142 cpumask_and(tmp_mask, mask, cpu_online_mask); 1143 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1144 if (!cpumask_empty(tmp_mask)) { 1145 free_cpumask_var(tmp_mask); 1146 return 0; 1147 } 1148 } 1149 1150 /* Only try and allocate irqs on cpus that are present */ 1151 err = -ENOSPC; 1152 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1153 int new_cpu; 1154 int vector, offset; 1155 1156 apic->vector_allocation_domain(cpu, tmp_mask); 1157 1158 vector = current_vector; 1159 offset = current_offset; 1160 next: 1161 vector += 8; 1162 if (vector >= first_system_vector) { 1163 /* If out of vectors on large boxen, must share them. */ 1164 offset = (offset + 1) % 8; 1165 vector = FIRST_EXTERNAL_VECTOR + offset; 1166 } 1167 if (unlikely(current_vector == vector)) 1168 continue; 1169 1170 if (test_bit(vector, used_vectors)) 1171 goto next; 1172 1173 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1174 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1175 goto next; 1176 /* Found one! */ 1177 current_vector = vector; 1178 current_offset = offset; 1179 if (old_vector) { 1180 cfg->move_in_progress = 1; 1181 cpumask_copy(cfg->old_domain, cfg->domain); 1182 } 1183 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1184 per_cpu(vector_irq, new_cpu)[vector] = irq; 1185 cfg->vector = vector; 1186 cpumask_copy(cfg->domain, tmp_mask); 1187 err = 0; 1188 break; 1189 } 1190 free_cpumask_var(tmp_mask); 1191 return err; 1192 } 1193 1194 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1195 { 1196 int err; 1197 unsigned long flags; 1198 1199 raw_spin_lock_irqsave(&vector_lock, flags); 1200 err = __assign_irq_vector(irq, cfg, mask); 1201 raw_spin_unlock_irqrestore(&vector_lock, flags); 1202 return err; 1203 } 1204 1205 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1206 { 1207 int cpu, vector; 1208 1209 BUG_ON(!cfg->vector); 1210 1211 vector = cfg->vector; 1212 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1213 per_cpu(vector_irq, cpu)[vector] = -1; 1214 1215 cfg->vector = 0; 1216 cpumask_clear(cfg->domain); 1217 1218 if (likely(!cfg->move_in_progress)) 1219 return; 1220 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1221 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1222 vector++) { 1223 if (per_cpu(vector_irq, cpu)[vector] != irq) 1224 continue; 1225 per_cpu(vector_irq, cpu)[vector] = -1; 1226 break; 1227 } 1228 } 1229 cfg->move_in_progress = 0; 1230 } 1231 1232 void __setup_vector_irq(int cpu) 1233 { 1234 /* Initialize vector_irq on a new cpu */ 1235 int irq, vector; 1236 struct irq_cfg *cfg; 1237 1238 /* 1239 * vector_lock will make sure that we don't run into irq vector 1240 * assignments that might be happening on another cpu in parallel, 1241 * while we setup our initial vector to irq mappings. 1242 */ 1243 raw_spin_lock(&vector_lock); 1244 /* Mark the inuse vectors */ 1245 for_each_active_irq(irq) { 1246 cfg = irq_get_chip_data(irq); 1247 if (!cfg) 1248 continue; 1249 /* 1250 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1251 * will be part of the irq_cfg's domain. 1252 */ 1253 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1254 cpumask_set_cpu(cpu, cfg->domain); 1255 1256 if (!cpumask_test_cpu(cpu, cfg->domain)) 1257 continue; 1258 vector = cfg->vector; 1259 per_cpu(vector_irq, cpu)[vector] = irq; 1260 } 1261 /* Mark the free vectors */ 1262 for (vector = 0; vector < NR_VECTORS; ++vector) { 1263 irq = per_cpu(vector_irq, cpu)[vector]; 1264 if (irq < 0) 1265 continue; 1266 1267 cfg = irq_cfg(irq); 1268 if (!cpumask_test_cpu(cpu, cfg->domain)) 1269 per_cpu(vector_irq, cpu)[vector] = -1; 1270 } 1271 raw_spin_unlock(&vector_lock); 1272 } 1273 1274 static struct irq_chip ioapic_chip; 1275 1276 #ifdef CONFIG_X86_32 1277 static inline int IO_APIC_irq_trigger(int irq) 1278 { 1279 int apic, idx, pin; 1280 1281 for (apic = 0; apic < nr_ioapics; apic++) { 1282 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1283 idx = find_irq_entry(apic, pin, mp_INT); 1284 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1285 return irq_trigger(idx); 1286 } 1287 } 1288 /* 1289 * nonexistent IRQs are edge default 1290 */ 1291 return 0; 1292 } 1293 #else 1294 static inline int IO_APIC_irq_trigger(int irq) 1295 { 1296 return 1; 1297 } 1298 #endif 1299 1300 static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, 1301 unsigned long trigger) 1302 { 1303 struct irq_chip *chip = &ioapic_chip; 1304 irq_flow_handler_t hdl; 1305 bool fasteoi; 1306 1307 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1308 trigger == IOAPIC_LEVEL) { 1309 irq_set_status_flags(irq, IRQ_LEVEL); 1310 fasteoi = true; 1311 } else { 1312 irq_clear_status_flags(irq, IRQ_LEVEL); 1313 fasteoi = false; 1314 } 1315 1316 if (irq_remapped(cfg)) { 1317 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1318 irq_remap_modify_chip_defaults(chip); 1319 fasteoi = trigger != 0; 1320 } 1321 1322 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1323 irq_set_chip_and_handler_name(irq, chip, hdl, 1324 fasteoi ? "fasteoi" : "edge"); 1325 } 1326 1327 1328 static int setup_ir_ioapic_entry(int irq, 1329 struct IR_IO_APIC_route_entry *entry, 1330 unsigned int destination, int vector, 1331 struct io_apic_irq_attr *attr) 1332 { 1333 int index; 1334 struct irte irte; 1335 int ioapic_id = mpc_ioapic_id(attr->ioapic); 1336 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); 1337 1338 if (!iommu) { 1339 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); 1340 return -ENODEV; 1341 } 1342 1343 index = alloc_irte(iommu, irq, 1); 1344 if (index < 0) { 1345 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); 1346 return -ENOMEM; 1347 } 1348 1349 prepare_irte(&irte, vector, destination); 1350 1351 /* Set source-id of interrupt request */ 1352 set_ioapic_sid(&irte, ioapic_id); 1353 1354 modify_irte(irq, &irte); 1355 1356 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " 1357 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " 1358 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " 1359 "Avail:%X Vector:%02X Dest:%08X " 1360 "SID:%04X SQ:%X SVT:%X)\n", 1361 attr->ioapic, irte.present, irte.fpd, irte.dst_mode, 1362 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, 1363 irte.avail, irte.vector, irte.dest_id, 1364 irte.sid, irte.sq, irte.svt); 1365 1366 memset(entry, 0, sizeof(*entry)); 1367 1368 entry->index2 = (index >> 15) & 0x1; 1369 entry->zero = 0; 1370 entry->format = 1; 1371 entry->index = (index & 0x7fff); 1372 /* 1373 * IO-APIC RTE will be configured with virtual vector. 1374 * irq handler will do the explicit EOI to the io-apic. 1375 */ 1376 entry->vector = attr->ioapic_pin; 1377 entry->mask = 0; /* enable IRQ */ 1378 entry->trigger = attr->trigger; 1379 entry->polarity = attr->polarity; 1380 1381 /* Mask level triggered irqs. 1382 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1383 */ 1384 if (attr->trigger) 1385 entry->mask = 1; 1386 1387 return 0; 1388 } 1389 1390 static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, 1391 unsigned int destination, int vector, 1392 struct io_apic_irq_attr *attr) 1393 { 1394 if (intr_remapping_enabled) 1395 return setup_ir_ioapic_entry(irq, 1396 (struct IR_IO_APIC_route_entry *)entry, 1397 destination, vector, attr); 1398 1399 memset(entry, 0, sizeof(*entry)); 1400 1401 entry->delivery_mode = apic->irq_delivery_mode; 1402 entry->dest_mode = apic->irq_dest_mode; 1403 entry->dest = destination; 1404 entry->vector = vector; 1405 entry->mask = 0; /* enable IRQ */ 1406 entry->trigger = attr->trigger; 1407 entry->polarity = attr->polarity; 1408 1409 /* 1410 * Mask level triggered irqs. 1411 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1412 */ 1413 if (attr->trigger) 1414 entry->mask = 1; 1415 1416 return 0; 1417 } 1418 1419 static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, 1420 struct io_apic_irq_attr *attr) 1421 { 1422 struct IO_APIC_route_entry entry; 1423 unsigned int dest; 1424 1425 if (!IO_APIC_IRQ(irq)) 1426 return; 1427 /* 1428 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1429 * controllers like 8259. Now that IO-APIC can handle this irq, update 1430 * the cfg->domain. 1431 */ 1432 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1433 apic->vector_allocation_domain(0, cfg->domain); 1434 1435 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1436 return; 1437 1438 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1439 1440 apic_printk(APIC_VERBOSE,KERN_DEBUG 1441 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1442 "IRQ %d Mode:%i Active:%i Dest:%d)\n", 1443 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1444 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1445 1446 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1447 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1448 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1449 __clear_irq_vector(irq, cfg); 1450 1451 return; 1452 } 1453 1454 ioapic_register_intr(irq, cfg, attr->trigger); 1455 if (irq < legacy_pic->nr_legacy_irqs) 1456 legacy_pic->mask(irq); 1457 1458 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry); 1459 } 1460 1461 static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin) 1462 { 1463 if (idx != -1) 1464 return false; 1465 1466 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n", 1467 mpc_ioapic_id(ioapic_idx), pin); 1468 return true; 1469 } 1470 1471 static void __init __io_apic_setup_irqs(unsigned int ioapic_idx) 1472 { 1473 int idx, node = cpu_to_node(0); 1474 struct io_apic_irq_attr attr; 1475 unsigned int pin, irq; 1476 1477 for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) { 1478 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1479 if (io_apic_pin_not_connected(idx, ioapic_idx, pin)) 1480 continue; 1481 1482 irq = pin_2_irq(idx, ioapic_idx, pin); 1483 1484 if ((ioapic_idx > 0) && (irq > 16)) 1485 continue; 1486 1487 /* 1488 * Skip the timer IRQ if there's a quirk handler 1489 * installed and if it returns 1: 1490 */ 1491 if (apic->multi_timer_check && 1492 apic->multi_timer_check(ioapic_idx, irq)) 1493 continue; 1494 1495 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1496 irq_polarity(idx)); 1497 1498 io_apic_setup_irq_pin(irq, node, &attr); 1499 } 1500 } 1501 1502 static void __init setup_IO_APIC_irqs(void) 1503 { 1504 unsigned int ioapic_idx; 1505 1506 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1507 1508 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1509 __io_apic_setup_irqs(ioapic_idx); 1510 } 1511 1512 /* 1513 * for the gsit that is not in first ioapic 1514 * but could not use acpi_register_gsi() 1515 * like some special sci in IBM x3330 1516 */ 1517 void setup_IO_APIC_irq_extra(u32 gsi) 1518 { 1519 int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0); 1520 struct io_apic_irq_attr attr; 1521 1522 /* 1523 * Convert 'gsi' to 'ioapic.pin'. 1524 */ 1525 ioapic_idx = mp_find_ioapic(gsi); 1526 if (ioapic_idx < 0) 1527 return; 1528 1529 pin = mp_find_ioapic_pin(ioapic_idx, gsi); 1530 idx = find_irq_entry(ioapic_idx, pin, mp_INT); 1531 if (idx == -1) 1532 return; 1533 1534 irq = pin_2_irq(idx, ioapic_idx, pin); 1535 1536 /* Only handle the non legacy irqs on secondary ioapics */ 1537 if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY) 1538 return; 1539 1540 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx), 1541 irq_polarity(idx)); 1542 1543 io_apic_setup_irq_pin_once(irq, node, &attr); 1544 } 1545 1546 /* 1547 * Set up the timer pin, possibly with the 8259A-master behind. 1548 */ 1549 static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, 1550 unsigned int pin, int vector) 1551 { 1552 struct IO_APIC_route_entry entry; 1553 1554 if (intr_remapping_enabled) 1555 return; 1556 1557 memset(&entry, 0, sizeof(entry)); 1558 1559 /* 1560 * We use logical delivery to get the timer IRQ 1561 * to the first CPU. 1562 */ 1563 entry.dest_mode = apic->irq_dest_mode; 1564 entry.mask = 0; /* don't mask IRQ for edge */ 1565 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1566 entry.delivery_mode = apic->irq_delivery_mode; 1567 entry.polarity = 0; 1568 entry.trigger = 0; 1569 entry.vector = vector; 1570 1571 /* 1572 * The timer IRQ doesn't have to know that behind the 1573 * scene we may have a 8259A-master in AEOI mode ... 1574 */ 1575 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 1576 "edge"); 1577 1578 /* 1579 * Add it to the IO-APIC irq-routing table: 1580 */ 1581 ioapic_write_entry(ioapic_idx, pin, entry); 1582 } 1583 1584 __apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1585 { 1586 int i; 1587 union IO_APIC_reg_00 reg_00; 1588 union IO_APIC_reg_01 reg_01; 1589 union IO_APIC_reg_02 reg_02; 1590 union IO_APIC_reg_03 reg_03; 1591 unsigned long flags; 1592 1593 raw_spin_lock_irqsave(&ioapic_lock, flags); 1594 reg_00.raw = io_apic_read(ioapic_idx, 0); 1595 reg_01.raw = io_apic_read(ioapic_idx, 1); 1596 if (reg_01.bits.version >= 0x10) 1597 reg_02.raw = io_apic_read(ioapic_idx, 2); 1598 if (reg_01.bits.version >= 0x20) 1599 reg_03.raw = io_apic_read(ioapic_idx, 3); 1600 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1601 1602 printk("\n"); 1603 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); 1604 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1605 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1606 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1607 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1608 1609 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1610 printk(KERN_DEBUG "....... : max redirection entries: %02X\n", 1611 reg_01.bits.entries); 1612 1613 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1614 printk(KERN_DEBUG "....... : IO APIC version: %02X\n", 1615 reg_01.bits.version); 1616 1617 /* 1618 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1619 * but the value of reg_02 is read as the previous read register 1620 * value, so ignore it if reg_02 == reg_01. 1621 */ 1622 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1623 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1624 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1625 } 1626 1627 /* 1628 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1629 * or reg_03, but the value of reg_0[23] is read as the previous read 1630 * register value, so ignore it if reg_03 == reg_0[12]. 1631 */ 1632 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1633 reg_03.raw != reg_01.raw) { 1634 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1635 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1636 } 1637 1638 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1639 1640 if (intr_remapping_enabled) { 1641 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" 1642 " Pol Stat Indx2 Zero Vect:\n"); 1643 } else { 1644 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1645 " Stat Dmod Deli Vect:\n"); 1646 } 1647 1648 for (i = 0; i <= reg_01.bits.entries; i++) { 1649 if (intr_remapping_enabled) { 1650 struct IO_APIC_route_entry entry; 1651 struct IR_IO_APIC_route_entry *ir_entry; 1652 1653 entry = ioapic_read_entry(ioapic_idx, i); 1654 ir_entry = (struct IR_IO_APIC_route_entry *) &entry; 1655 printk(KERN_DEBUG " %02x %04X ", 1656 i, 1657 ir_entry->index 1658 ); 1659 printk("%1d %1d %1d %1d %1d " 1660 "%1d %1d %X %02X\n", 1661 ir_entry->format, 1662 ir_entry->mask, 1663 ir_entry->trigger, 1664 ir_entry->irr, 1665 ir_entry->polarity, 1666 ir_entry->delivery_status, 1667 ir_entry->index2, 1668 ir_entry->zero, 1669 ir_entry->vector 1670 ); 1671 } else { 1672 struct IO_APIC_route_entry entry; 1673 1674 entry = ioapic_read_entry(ioapic_idx, i); 1675 printk(KERN_DEBUG " %02x %02X ", 1676 i, 1677 entry.dest 1678 ); 1679 printk("%1d %1d %1d %1d %1d " 1680 "%1d %1d %02X\n", 1681 entry.mask, 1682 entry.trigger, 1683 entry.irr, 1684 entry.polarity, 1685 entry.delivery_status, 1686 entry.dest_mode, 1687 entry.delivery_mode, 1688 entry.vector 1689 ); 1690 } 1691 } 1692 } 1693 1694 __apicdebuginit(void) print_IO_APICs(void) 1695 { 1696 int ioapic_idx; 1697 struct irq_cfg *cfg; 1698 unsigned int irq; 1699 1700 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1701 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1702 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1703 mpc_ioapic_id(ioapic_idx), 1704 ioapics[ioapic_idx].nr_registers); 1705 1706 /* 1707 * We are a bit conservative about what we expect. We have to 1708 * know about every hardware change ASAP. 1709 */ 1710 printk(KERN_INFO "testing the IO APIC.......................\n"); 1711 1712 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) 1713 print_IO_APIC(ioapic_idx); 1714 1715 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1716 for_each_active_irq(irq) { 1717 struct irq_pin_list *entry; 1718 1719 cfg = irq_get_chip_data(irq); 1720 if (!cfg) 1721 continue; 1722 entry = cfg->irq_2_pin; 1723 if (!entry) 1724 continue; 1725 printk(KERN_DEBUG "IRQ%d ", irq); 1726 for_each_irq_pin(entry, cfg->irq_2_pin) 1727 printk("-> %d:%d", entry->apic, entry->pin); 1728 printk("\n"); 1729 } 1730 1731 printk(KERN_INFO ".................................... done.\n"); 1732 } 1733 1734 __apicdebuginit(void) print_APIC_field(int base) 1735 { 1736 int i; 1737 1738 printk(KERN_DEBUG); 1739 1740 for (i = 0; i < 8; i++) 1741 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1742 1743 printk(KERN_CONT "\n"); 1744 } 1745 1746 __apicdebuginit(void) print_local_APIC(void *dummy) 1747 { 1748 unsigned int i, v, ver, maxlvt; 1749 u64 icr; 1750 1751 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1752 smp_processor_id(), hard_smp_processor_id()); 1753 v = apic_read(APIC_ID); 1754 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1755 v = apic_read(APIC_LVR); 1756 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1757 ver = GET_APIC_VERSION(v); 1758 maxlvt = lapic_get_maxlvt(); 1759 1760 v = apic_read(APIC_TASKPRI); 1761 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1762 1763 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1764 if (!APIC_XAPIC(ver)) { 1765 v = apic_read(APIC_ARBPRI); 1766 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1767 v & APIC_ARBPRI_MASK); 1768 } 1769 v = apic_read(APIC_PROCPRI); 1770 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1771 } 1772 1773 /* 1774 * Remote read supported only in the 82489DX and local APIC for 1775 * Pentium processors. 1776 */ 1777 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1778 v = apic_read(APIC_RRR); 1779 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1780 } 1781 1782 v = apic_read(APIC_LDR); 1783 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1784 if (!x2apic_enabled()) { 1785 v = apic_read(APIC_DFR); 1786 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1787 } 1788 v = apic_read(APIC_SPIV); 1789 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1790 1791 printk(KERN_DEBUG "... APIC ISR field:\n"); 1792 print_APIC_field(APIC_ISR); 1793 printk(KERN_DEBUG "... APIC TMR field:\n"); 1794 print_APIC_field(APIC_TMR); 1795 printk(KERN_DEBUG "... APIC IRR field:\n"); 1796 print_APIC_field(APIC_IRR); 1797 1798 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1799 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1800 apic_write(APIC_ESR, 0); 1801 1802 v = apic_read(APIC_ESR); 1803 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1804 } 1805 1806 icr = apic_icr_read(); 1807 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1808 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1809 1810 v = apic_read(APIC_LVTT); 1811 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1812 1813 if (maxlvt > 3) { /* PC is LVT#4. */ 1814 v = apic_read(APIC_LVTPC); 1815 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1816 } 1817 v = apic_read(APIC_LVT0); 1818 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1819 v = apic_read(APIC_LVT1); 1820 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1821 1822 if (maxlvt > 2) { /* ERR is LVT#3. */ 1823 v = apic_read(APIC_LVTERR); 1824 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1825 } 1826 1827 v = apic_read(APIC_TMICT); 1828 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1829 v = apic_read(APIC_TMCCT); 1830 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1831 v = apic_read(APIC_TDCR); 1832 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1833 1834 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1835 v = apic_read(APIC_EFEAT); 1836 maxlvt = (v >> 16) & 0xff; 1837 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1838 v = apic_read(APIC_ECTRL); 1839 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1840 for (i = 0; i < maxlvt; i++) { 1841 v = apic_read(APIC_EILVTn(i)); 1842 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1843 } 1844 } 1845 printk("\n"); 1846 } 1847 1848 __apicdebuginit(void) print_local_APICs(int maxcpu) 1849 { 1850 int cpu; 1851 1852 if (!maxcpu) 1853 return; 1854 1855 preempt_disable(); 1856 for_each_online_cpu(cpu) { 1857 if (cpu >= maxcpu) 1858 break; 1859 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1860 } 1861 preempt_enable(); 1862 } 1863 1864 __apicdebuginit(void) print_PIC(void) 1865 { 1866 unsigned int v; 1867 unsigned long flags; 1868 1869 if (!legacy_pic->nr_legacy_irqs) 1870 return; 1871 1872 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1873 1874 raw_spin_lock_irqsave(&i8259A_lock, flags); 1875 1876 v = inb(0xa1) << 8 | inb(0x21); 1877 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1878 1879 v = inb(0xa0) << 8 | inb(0x20); 1880 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1881 1882 outb(0x0b,0xa0); 1883 outb(0x0b,0x20); 1884 v = inb(0xa0) << 8 | inb(0x20); 1885 outb(0x0a,0xa0); 1886 outb(0x0a,0x20); 1887 1888 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1889 1890 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1891 1892 v = inb(0x4d1) << 8 | inb(0x4d0); 1893 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1894 } 1895 1896 static int __initdata show_lapic = 1; 1897 static __init int setup_show_lapic(char *arg) 1898 { 1899 int num = -1; 1900 1901 if (strcmp(arg, "all") == 0) { 1902 show_lapic = CONFIG_NR_CPUS; 1903 } else { 1904 get_option(&arg, &num); 1905 if (num >= 0) 1906 show_lapic = num; 1907 } 1908 1909 return 1; 1910 } 1911 __setup("show_lapic=", setup_show_lapic); 1912 1913 __apicdebuginit(int) print_ICs(void) 1914 { 1915 if (apic_verbosity == APIC_QUIET) 1916 return 0; 1917 1918 print_PIC(); 1919 1920 /* don't print out if apic is not there */ 1921 if (!cpu_has_apic && !apic_from_smp_config()) 1922 return 0; 1923 1924 print_local_APICs(show_lapic); 1925 print_IO_APICs(); 1926 1927 return 0; 1928 } 1929 1930 late_initcall(print_ICs); 1931 1932 1933 /* Where if anywhere is the i8259 connect in external int mode */ 1934 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1935 1936 void __init enable_IO_APIC(void) 1937 { 1938 int i8259_apic, i8259_pin; 1939 int apic; 1940 1941 if (!legacy_pic->nr_legacy_irqs) 1942 return; 1943 1944 for(apic = 0; apic < nr_ioapics; apic++) { 1945 int pin; 1946 /* See if any of the pins is in ExtINT mode */ 1947 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) { 1948 struct IO_APIC_route_entry entry; 1949 entry = ioapic_read_entry(apic, pin); 1950 1951 /* If the interrupt line is enabled and in ExtInt mode 1952 * I have found the pin where the i8259 is connected. 1953 */ 1954 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1955 ioapic_i8259.apic = apic; 1956 ioapic_i8259.pin = pin; 1957 goto found_i8259; 1958 } 1959 } 1960 } 1961 found_i8259: 1962 /* Look to see what if the MP table has reported the ExtINT */ 1963 /* If we could not find the appropriate pin by looking at the ioapic 1964 * the i8259 probably is not connected the ioapic but give the 1965 * mptable a chance anyway. 1966 */ 1967 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1968 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1969 /* Trust the MP table if nothing is setup in the hardware */ 1970 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1971 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1972 ioapic_i8259.pin = i8259_pin; 1973 ioapic_i8259.apic = i8259_apic; 1974 } 1975 /* Complain if the MP table and the hardware disagree */ 1976 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1977 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1978 { 1979 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1980 } 1981 1982 /* 1983 * Do not trust the IO-APIC being empty at bootup 1984 */ 1985 clear_IO_APIC(); 1986 } 1987 1988 /* 1989 * Not an __init, needed by the reboot code 1990 */ 1991 void disable_IO_APIC(void) 1992 { 1993 /* 1994 * Clear the IO-APIC before rebooting: 1995 */ 1996 clear_IO_APIC(); 1997 1998 if (!legacy_pic->nr_legacy_irqs) 1999 return; 2000 2001 /* 2002 * If the i8259 is routed through an IOAPIC 2003 * Put that IOAPIC in virtual wire mode 2004 * so legacy interrupts can be delivered. 2005 * 2006 * With interrupt-remapping, for now we will use virtual wire A mode, 2007 * as virtual wire B is little complex (need to configure both 2008 * IOAPIC RTE as well as interrupt-remapping table entry). 2009 * As this gets called during crash dump, keep this simple for now. 2010 */ 2011 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 2012 struct IO_APIC_route_entry entry; 2013 2014 memset(&entry, 0, sizeof(entry)); 2015 entry.mask = 0; /* Enabled */ 2016 entry.trigger = 0; /* Edge */ 2017 entry.irr = 0; 2018 entry.polarity = 0; /* High */ 2019 entry.delivery_status = 0; 2020 entry.dest_mode = 0; /* Physical */ 2021 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 2022 entry.vector = 0; 2023 entry.dest = read_apic_id(); 2024 2025 /* 2026 * Add it to the IO-APIC irq-routing table: 2027 */ 2028 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 2029 } 2030 2031 /* 2032 * Use virtual wire A mode when interrupt remapping is enabled. 2033 */ 2034 if (cpu_has_apic || apic_from_smp_config()) 2035 disconnect_bsp_APIC(!intr_remapping_enabled && 2036 ioapic_i8259.pin != -1); 2037 } 2038 2039 #ifdef CONFIG_X86_32 2040 /* 2041 * function to set the IO-APIC physical IDs based on the 2042 * values stored in the MPC table. 2043 * 2044 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 2045 */ 2046 void __init setup_ioapic_ids_from_mpc_nocheck(void) 2047 { 2048 union IO_APIC_reg_00 reg_00; 2049 physid_mask_t phys_id_present_map; 2050 int ioapic_idx; 2051 int i; 2052 unsigned char old_id; 2053 unsigned long flags; 2054 2055 /* 2056 * This is broken; anything with a real cpu count has to 2057 * circumvent this idiocy regardless. 2058 */ 2059 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 2060 2061 /* 2062 * Set the IOAPIC ID to the value stored in the MPC table. 2063 */ 2064 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { 2065 /* Read the register 0 value */ 2066 raw_spin_lock_irqsave(&ioapic_lock, flags); 2067 reg_00.raw = io_apic_read(ioapic_idx, 0); 2068 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2069 2070 old_id = mpc_ioapic_id(ioapic_idx); 2071 2072 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) { 2073 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 2074 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 2075 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2076 reg_00.bits.ID); 2077 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID; 2078 } 2079 2080 /* 2081 * Sanity check, is the ID really free? Every APIC in a 2082 * system must have a unique ID or we get lots of nice 2083 * 'stuck on smp_invalidate_needed IPI wait' messages. 2084 */ 2085 if (apic->check_apicid_used(&phys_id_present_map, 2086 mpc_ioapic_id(ioapic_idx))) { 2087 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 2088 ioapic_idx, mpc_ioapic_id(ioapic_idx)); 2089 for (i = 0; i < get_physical_broadcast(); i++) 2090 if (!physid_isset(i, phys_id_present_map)) 2091 break; 2092 if (i >= get_physical_broadcast()) 2093 panic("Max APIC ID exceeded!\n"); 2094 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 2095 i); 2096 physid_set(i, phys_id_present_map); 2097 ioapics[ioapic_idx].mp_config.apicid = i; 2098 } else { 2099 physid_mask_t tmp; 2100 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx), 2101 &tmp); 2102 apic_printk(APIC_VERBOSE, "Setting %d in the " 2103 "phys_id_present_map\n", 2104 mpc_ioapic_id(ioapic_idx)); 2105 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2106 } 2107 2108 /* 2109 * We need to adjust the IRQ routing table 2110 * if the ID changed. 2111 */ 2112 if (old_id != mpc_ioapic_id(ioapic_idx)) 2113 for (i = 0; i < mp_irq_entries; i++) 2114 if (mp_irqs[i].dstapic == old_id) 2115 mp_irqs[i].dstapic 2116 = mpc_ioapic_id(ioapic_idx); 2117 2118 /* 2119 * Update the ID register according to the right value 2120 * from the MPC table if they are different. 2121 */ 2122 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID) 2123 continue; 2124 2125 apic_printk(APIC_VERBOSE, KERN_INFO 2126 "...changing IO-APIC physical APIC ID to %d ...", 2127 mpc_ioapic_id(ioapic_idx)); 2128 2129 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 2130 raw_spin_lock_irqsave(&ioapic_lock, flags); 2131 io_apic_write(ioapic_idx, 0, reg_00.raw); 2132 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2133 2134 /* 2135 * Sanity check 2136 */ 2137 raw_spin_lock_irqsave(&ioapic_lock, flags); 2138 reg_00.raw = io_apic_read(ioapic_idx, 0); 2139 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2140 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) 2141 printk("could not set ID!\n"); 2142 else 2143 apic_printk(APIC_VERBOSE, " ok.\n"); 2144 } 2145 } 2146 2147 void __init setup_ioapic_ids_from_mpc(void) 2148 { 2149 2150 if (acpi_ioapic) 2151 return; 2152 /* 2153 * Don't check I/O APIC IDs for xAPIC systems. They have 2154 * no meaning without the serial APIC bus. 2155 */ 2156 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2157 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 2158 return; 2159 setup_ioapic_ids_from_mpc_nocheck(); 2160 } 2161 #endif 2162 2163 int no_timer_check __initdata; 2164 2165 static int __init notimercheck(char *s) 2166 { 2167 no_timer_check = 1; 2168 return 1; 2169 } 2170 __setup("no_timer_check", notimercheck); 2171 2172 /* 2173 * There is a nasty bug in some older SMP boards, their mptable lies 2174 * about the timer IRQ. We do the following to work around the situation: 2175 * 2176 * - timer IRQ defaults to IO-APIC IRQ 2177 * - if this function detects that timer IRQs are defunct, then we fall 2178 * back to ISA timer IRQs 2179 */ 2180 static int __init timer_irq_works(void) 2181 { 2182 unsigned long t1 = jiffies; 2183 unsigned long flags; 2184 2185 if (no_timer_check) 2186 return 1; 2187 2188 local_save_flags(flags); 2189 local_irq_enable(); 2190 /* Let ten ticks pass... */ 2191 mdelay((10 * 1000) / HZ); 2192 local_irq_restore(flags); 2193 2194 /* 2195 * Expect a few ticks at least, to be sure some possible 2196 * glue logic does not lock up after one or two first 2197 * ticks in a non-ExtINT mode. Also the local APIC 2198 * might have cached one ExtINT interrupt. Finally, at 2199 * least one tick may be lost due to delays. 2200 */ 2201 2202 /* jiffies wrap? */ 2203 if (time_after(jiffies, t1 + 4)) 2204 return 1; 2205 return 0; 2206 } 2207 2208 /* 2209 * In the SMP+IOAPIC case it might happen that there are an unspecified 2210 * number of pending IRQ events unhandled. These cases are very rare, 2211 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2212 * better to do it this way as thus we do not have to be aware of 2213 * 'pending' interrupts in the IRQ path, except at this point. 2214 */ 2215 /* 2216 * Edge triggered needs to resend any interrupt 2217 * that was delayed but this is now handled in the device 2218 * independent code. 2219 */ 2220 2221 /* 2222 * Starting up a edge-triggered IO-APIC interrupt is 2223 * nasty - we need to make sure that we get the edge. 2224 * If it is already asserted for some reason, we need 2225 * return 1 to indicate that is was pending. 2226 * 2227 * This is not complete - we should be able to fake 2228 * an edge even if it isn't on the 8259A... 2229 */ 2230 2231 static unsigned int startup_ioapic_irq(struct irq_data *data) 2232 { 2233 int was_pending = 0, irq = data->irq; 2234 unsigned long flags; 2235 2236 raw_spin_lock_irqsave(&ioapic_lock, flags); 2237 if (irq < legacy_pic->nr_legacy_irqs) { 2238 legacy_pic->mask(irq); 2239 if (legacy_pic->irq_pending(irq)) 2240 was_pending = 1; 2241 } 2242 __unmask_ioapic(data->chip_data); 2243 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2244 2245 return was_pending; 2246 } 2247 2248 static int ioapic_retrigger_irq(struct irq_data *data) 2249 { 2250 struct irq_cfg *cfg = data->chip_data; 2251 unsigned long flags; 2252 2253 raw_spin_lock_irqsave(&vector_lock, flags); 2254 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2255 raw_spin_unlock_irqrestore(&vector_lock, flags); 2256 2257 return 1; 2258 } 2259 2260 /* 2261 * Level and edge triggered IO-APIC interrupts need different handling, 2262 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2263 * handled with the level-triggered descriptor, but that one has slightly 2264 * more overhead. Level-triggered interrupts cannot be handled with the 2265 * edge-triggered handler, without risking IRQ storms and other ugly 2266 * races. 2267 */ 2268 2269 #ifdef CONFIG_SMP 2270 void send_cleanup_vector(struct irq_cfg *cfg) 2271 { 2272 cpumask_var_t cleanup_mask; 2273 2274 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2275 unsigned int i; 2276 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2277 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2278 } else { 2279 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2280 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2281 free_cpumask_var(cleanup_mask); 2282 } 2283 cfg->move_in_progress = 0; 2284 } 2285 2286 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2287 { 2288 int apic, pin; 2289 struct irq_pin_list *entry; 2290 u8 vector = cfg->vector; 2291 2292 for_each_irq_pin(entry, cfg->irq_2_pin) { 2293 unsigned int reg; 2294 2295 apic = entry->apic; 2296 pin = entry->pin; 2297 /* 2298 * With interrupt-remapping, destination information comes 2299 * from interrupt-remapping table entry. 2300 */ 2301 if (!irq_remapped(cfg)) 2302 io_apic_write(apic, 0x11 + pin*2, dest); 2303 reg = io_apic_read(apic, 0x10 + pin*2); 2304 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2305 reg |= vector; 2306 io_apic_modify(apic, 0x10 + pin*2, reg); 2307 } 2308 } 2309 2310 /* 2311 * Either sets data->affinity to a valid value, and returns 2312 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2313 * leaves data->affinity untouched. 2314 */ 2315 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2316 unsigned int *dest_id) 2317 { 2318 struct irq_cfg *cfg = data->chip_data; 2319 2320 if (!cpumask_intersects(mask, cpu_online_mask)) 2321 return -1; 2322 2323 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2324 return -1; 2325 2326 cpumask_copy(data->affinity, mask); 2327 2328 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2329 return 0; 2330 } 2331 2332 static int 2333 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2334 bool force) 2335 { 2336 unsigned int dest, irq = data->irq; 2337 unsigned long flags; 2338 int ret; 2339 2340 raw_spin_lock_irqsave(&ioapic_lock, flags); 2341 ret = __ioapic_set_affinity(data, mask, &dest); 2342 if (!ret) { 2343 /* Only the high 8 bits are valid. */ 2344 dest = SET_APIC_LOGICAL_ID(dest); 2345 __target_IO_APIC_irq(irq, dest, data->chip_data); 2346 } 2347 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2348 return ret; 2349 } 2350 2351 #ifdef CONFIG_IRQ_REMAP 2352 2353 /* 2354 * Migrate the IO-APIC irq in the presence of intr-remapping. 2355 * 2356 * For both level and edge triggered, irq migration is a simple atomic 2357 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2358 * 2359 * For level triggered, we eliminate the io-apic RTE modification (with the 2360 * updated vector information), by using a virtual vector (io-apic pin number). 2361 * Real vector that is used for interrupting cpu will be coming from 2362 * the interrupt-remapping table entry. 2363 * 2364 * As the migration is a simple atomic update of IRTE, the same mechanism 2365 * is used to migrate MSI irq's in the presence of interrupt-remapping. 2366 */ 2367 static int 2368 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2369 bool force) 2370 { 2371 struct irq_cfg *cfg = data->chip_data; 2372 unsigned int dest, irq = data->irq; 2373 struct irte irte; 2374 2375 if (!cpumask_intersects(mask, cpu_online_mask)) 2376 return -EINVAL; 2377 2378 if (get_irte(irq, &irte)) 2379 return -EBUSY; 2380 2381 if (assign_irq_vector(irq, cfg, mask)) 2382 return -EBUSY; 2383 2384 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2385 2386 irte.vector = cfg->vector; 2387 irte.dest_id = IRTE_DEST(dest); 2388 2389 /* 2390 * Atomically updates the IRTE with the new destination, vector 2391 * and flushes the interrupt entry cache. 2392 */ 2393 modify_irte(irq, &irte); 2394 2395 /* 2396 * After this point, all the interrupts will start arriving 2397 * at the new destination. So, time to cleanup the previous 2398 * vector allocation. 2399 */ 2400 if (cfg->move_in_progress) 2401 send_cleanup_vector(cfg); 2402 2403 cpumask_copy(data->affinity, mask); 2404 return 0; 2405 } 2406 2407 #else 2408 static inline int 2409 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2410 bool force) 2411 { 2412 return 0; 2413 } 2414 #endif 2415 2416 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2417 { 2418 unsigned vector, me; 2419 2420 ack_APIC_irq(); 2421 exit_idle(); 2422 irq_enter(); 2423 2424 me = smp_processor_id(); 2425 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2426 unsigned int irq; 2427 unsigned int irr; 2428 struct irq_desc *desc; 2429 struct irq_cfg *cfg; 2430 irq = __this_cpu_read(vector_irq[vector]); 2431 2432 if (irq == -1) 2433 continue; 2434 2435 desc = irq_to_desc(irq); 2436 if (!desc) 2437 continue; 2438 2439 cfg = irq_cfg(irq); 2440 raw_spin_lock(&desc->lock); 2441 2442 /* 2443 * Check if the irq migration is in progress. If so, we 2444 * haven't received the cleanup request yet for this irq. 2445 */ 2446 if (cfg->move_in_progress) 2447 goto unlock; 2448 2449 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2450 goto unlock; 2451 2452 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2453 /* 2454 * Check if the vector that needs to be cleanedup is 2455 * registered at the cpu's IRR. If so, then this is not 2456 * the best time to clean it up. Lets clean it up in the 2457 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2458 * to myself. 2459 */ 2460 if (irr & (1 << (vector % 32))) { 2461 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2462 goto unlock; 2463 } 2464 __this_cpu_write(vector_irq[vector], -1); 2465 unlock: 2466 raw_spin_unlock(&desc->lock); 2467 } 2468 2469 irq_exit(); 2470 } 2471 2472 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2473 { 2474 unsigned me; 2475 2476 if (likely(!cfg->move_in_progress)) 2477 return; 2478 2479 me = smp_processor_id(); 2480 2481 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2482 send_cleanup_vector(cfg); 2483 } 2484 2485 static void irq_complete_move(struct irq_cfg *cfg) 2486 { 2487 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2488 } 2489 2490 void irq_force_complete_move(int irq) 2491 { 2492 struct irq_cfg *cfg = irq_get_chip_data(irq); 2493 2494 if (!cfg) 2495 return; 2496 2497 __irq_complete_move(cfg, cfg->vector); 2498 } 2499 #else 2500 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2501 #endif 2502 2503 static void ack_apic_edge(struct irq_data *data) 2504 { 2505 irq_complete_move(data->chip_data); 2506 irq_move_irq(data); 2507 ack_APIC_irq(); 2508 } 2509 2510 atomic_t irq_mis_count; 2511 2512 static void ack_apic_level(struct irq_data *data) 2513 { 2514 struct irq_cfg *cfg = data->chip_data; 2515 int i, do_unmask_irq = 0, irq = data->irq; 2516 unsigned long v; 2517 2518 irq_complete_move(cfg); 2519 #ifdef CONFIG_GENERIC_PENDING_IRQ 2520 /* If we are moving the irq we need to mask it */ 2521 if (unlikely(irqd_is_setaffinity_pending(data))) { 2522 do_unmask_irq = 1; 2523 mask_ioapic(cfg); 2524 } 2525 #endif 2526 2527 /* 2528 * It appears there is an erratum which affects at least version 0x11 2529 * of I/O APIC (that's the 82093AA and cores integrated into various 2530 * chipsets). Under certain conditions a level-triggered interrupt is 2531 * erroneously delivered as edge-triggered one but the respective IRR 2532 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2533 * message but it will never arrive and further interrupts are blocked 2534 * from the source. The exact reason is so far unknown, but the 2535 * phenomenon was observed when two consecutive interrupt requests 2536 * from a given source get delivered to the same CPU and the source is 2537 * temporarily disabled in between. 2538 * 2539 * A workaround is to simulate an EOI message manually. We achieve it 2540 * by setting the trigger mode to edge and then to level when the edge 2541 * trigger mode gets detected in the TMR of a local APIC for a 2542 * level-triggered interrupt. We mask the source for the time of the 2543 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2544 * The idea is from Manfred Spraul. --macro 2545 * 2546 * Also in the case when cpu goes offline, fixup_irqs() will forward 2547 * any unhandled interrupt on the offlined cpu to the new cpu 2548 * destination that is handling the corresponding interrupt. This 2549 * interrupt forwarding is done via IPI's. Hence, in this case also 2550 * level-triggered io-apic interrupt will be seen as an edge 2551 * interrupt in the IRR. And we can't rely on the cpu's EOI 2552 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2553 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2554 * supporting EOI register, we do an explicit EOI to clear the 2555 * remote IRR and on IO-APIC's which don't have an EOI register, 2556 * we use the above logic (mask+edge followed by unmask+level) from 2557 * Manfred Spraul to clear the remote IRR. 2558 */ 2559 i = cfg->vector; 2560 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2561 2562 /* 2563 * We must acknowledge the irq before we move it or the acknowledge will 2564 * not propagate properly. 2565 */ 2566 ack_APIC_irq(); 2567 2568 /* 2569 * Tail end of clearing remote IRR bit (either by delivering the EOI 2570 * message via io-apic EOI register write or simulating it using 2571 * mask+edge followed by unnask+level logic) manually when the 2572 * level triggered interrupt is seen as the edge triggered interrupt 2573 * at the cpu. 2574 */ 2575 if (!(v & (1 << (i & 0x1f)))) { 2576 atomic_inc(&irq_mis_count); 2577 2578 eoi_ioapic_irq(irq, cfg); 2579 } 2580 2581 /* Now we can move and renable the irq */ 2582 if (unlikely(do_unmask_irq)) { 2583 /* Only migrate the irq if the ack has been received. 2584 * 2585 * On rare occasions the broadcast level triggered ack gets 2586 * delayed going to ioapics, and if we reprogram the 2587 * vector while Remote IRR is still set the irq will never 2588 * fire again. 2589 * 2590 * To prevent this scenario we read the Remote IRR bit 2591 * of the ioapic. This has two effects. 2592 * - On any sane system the read of the ioapic will 2593 * flush writes (and acks) going to the ioapic from 2594 * this cpu. 2595 * - We get to see if the ACK has actually been delivered. 2596 * 2597 * Based on failed experiments of reprogramming the 2598 * ioapic entry from outside of irq context starting 2599 * with masking the ioapic entry and then polling until 2600 * Remote IRR was clear before reprogramming the 2601 * ioapic I don't trust the Remote IRR bit to be 2602 * completey accurate. 2603 * 2604 * However there appears to be no other way to plug 2605 * this race, so if the Remote IRR bit is not 2606 * accurate and is causing problems then it is a hardware bug 2607 * and you can go talk to the chipset vendor about it. 2608 */ 2609 if (!io_apic_level_ack_pending(cfg)) 2610 irq_move_masked_irq(data); 2611 unmask_ioapic(cfg); 2612 } 2613 } 2614 2615 #ifdef CONFIG_IRQ_REMAP 2616 static void ir_ack_apic_edge(struct irq_data *data) 2617 { 2618 ack_APIC_irq(); 2619 } 2620 2621 static void ir_ack_apic_level(struct irq_data *data) 2622 { 2623 ack_APIC_irq(); 2624 eoi_ioapic_irq(data->irq, data->chip_data); 2625 } 2626 2627 static void ir_print_prefix(struct irq_data *data, struct seq_file *p) 2628 { 2629 seq_printf(p, " IR-%s", data->chip->name); 2630 } 2631 2632 static void irq_remap_modify_chip_defaults(struct irq_chip *chip) 2633 { 2634 chip->irq_print_chip = ir_print_prefix; 2635 chip->irq_ack = ir_ack_apic_edge; 2636 chip->irq_eoi = ir_ack_apic_level; 2637 2638 #ifdef CONFIG_SMP 2639 chip->irq_set_affinity = ir_ioapic_set_affinity; 2640 #endif 2641 } 2642 #endif /* CONFIG_IRQ_REMAP */ 2643 2644 static struct irq_chip ioapic_chip __read_mostly = { 2645 .name = "IO-APIC", 2646 .irq_startup = startup_ioapic_irq, 2647 .irq_mask = mask_ioapic_irq, 2648 .irq_unmask = unmask_ioapic_irq, 2649 .irq_ack = ack_apic_edge, 2650 .irq_eoi = ack_apic_level, 2651 #ifdef CONFIG_SMP 2652 .irq_set_affinity = ioapic_set_affinity, 2653 #endif 2654 .irq_retrigger = ioapic_retrigger_irq, 2655 }; 2656 2657 static inline void init_IO_APIC_traps(void) 2658 { 2659 struct irq_cfg *cfg; 2660 unsigned int irq; 2661 2662 /* 2663 * NOTE! The local APIC isn't very good at handling 2664 * multiple interrupts at the same interrupt level. 2665 * As the interrupt level is determined by taking the 2666 * vector number and shifting that right by 4, we 2667 * want to spread these out a bit so that they don't 2668 * all fall in the same interrupt level. 2669 * 2670 * Also, we've got to be careful not to trash gate 2671 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2672 */ 2673 for_each_active_irq(irq) { 2674 cfg = irq_get_chip_data(irq); 2675 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2676 /* 2677 * Hmm.. We don't have an entry for this, 2678 * so default to an old-fashioned 8259 2679 * interrupt if we can.. 2680 */ 2681 if (irq < legacy_pic->nr_legacy_irqs) 2682 legacy_pic->make_irq(irq); 2683 else 2684 /* Strange. Oh, well.. */ 2685 irq_set_chip(irq, &no_irq_chip); 2686 } 2687 } 2688 } 2689 2690 /* 2691 * The local APIC irq-chip implementation: 2692 */ 2693 2694 static void mask_lapic_irq(struct irq_data *data) 2695 { 2696 unsigned long v; 2697 2698 v = apic_read(APIC_LVT0); 2699 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2700 } 2701 2702 static void unmask_lapic_irq(struct irq_data *data) 2703 { 2704 unsigned long v; 2705 2706 v = apic_read(APIC_LVT0); 2707 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2708 } 2709 2710 static void ack_lapic_irq(struct irq_data *data) 2711 { 2712 ack_APIC_irq(); 2713 } 2714 2715 static struct irq_chip lapic_chip __read_mostly = { 2716 .name = "local-APIC", 2717 .irq_mask = mask_lapic_irq, 2718 .irq_unmask = unmask_lapic_irq, 2719 .irq_ack = ack_lapic_irq, 2720 }; 2721 2722 static void lapic_register_intr(int irq) 2723 { 2724 irq_clear_status_flags(irq, IRQ_LEVEL); 2725 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2726 "edge"); 2727 } 2728 2729 /* 2730 * This looks a bit hackish but it's about the only one way of sending 2731 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2732 * not support the ExtINT mode, unfortunately. We need to send these 2733 * cycles as some i82489DX-based boards have glue logic that keeps the 2734 * 8259A interrupt line asserted until INTA. --macro 2735 */ 2736 static inline void __init unlock_ExtINT_logic(void) 2737 { 2738 int apic, pin, i; 2739 struct IO_APIC_route_entry entry0, entry1; 2740 unsigned char save_control, save_freq_select; 2741 2742 pin = find_isa_irq_pin(8, mp_INT); 2743 if (pin == -1) { 2744 WARN_ON_ONCE(1); 2745 return; 2746 } 2747 apic = find_isa_irq_apic(8, mp_INT); 2748 if (apic == -1) { 2749 WARN_ON_ONCE(1); 2750 return; 2751 } 2752 2753 entry0 = ioapic_read_entry(apic, pin); 2754 clear_IO_APIC_pin(apic, pin); 2755 2756 memset(&entry1, 0, sizeof(entry1)); 2757 2758 entry1.dest_mode = 0; /* physical delivery */ 2759 entry1.mask = 0; /* unmask IRQ now */ 2760 entry1.dest = hard_smp_processor_id(); 2761 entry1.delivery_mode = dest_ExtINT; 2762 entry1.polarity = entry0.polarity; 2763 entry1.trigger = 0; 2764 entry1.vector = 0; 2765 2766 ioapic_write_entry(apic, pin, entry1); 2767 2768 save_control = CMOS_READ(RTC_CONTROL); 2769 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2770 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2771 RTC_FREQ_SELECT); 2772 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2773 2774 i = 100; 2775 while (i-- > 0) { 2776 mdelay(10); 2777 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2778 i -= 10; 2779 } 2780 2781 CMOS_WRITE(save_control, RTC_CONTROL); 2782 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2783 clear_IO_APIC_pin(apic, pin); 2784 2785 ioapic_write_entry(apic, pin, entry0); 2786 } 2787 2788 static int disable_timer_pin_1 __initdata; 2789 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2790 static int __init disable_timer_pin_setup(char *arg) 2791 { 2792 disable_timer_pin_1 = 1; 2793 return 0; 2794 } 2795 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2796 2797 int timer_through_8259 __initdata; 2798 2799 /* 2800 * This code may look a bit paranoid, but it's supposed to cooperate with 2801 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2802 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2803 * fanatically on his truly buggy board. 2804 * 2805 * FIXME: really need to revamp this for all platforms. 2806 */ 2807 static inline void __init check_timer(void) 2808 { 2809 struct irq_cfg *cfg = irq_get_chip_data(0); 2810 int node = cpu_to_node(0); 2811 int apic1, pin1, apic2, pin2; 2812 unsigned long flags; 2813 int no_pin1 = 0; 2814 2815 local_irq_save(flags); 2816 2817 /* 2818 * get/set the timer IRQ vector: 2819 */ 2820 legacy_pic->mask(0); 2821 assign_irq_vector(0, cfg, apic->target_cpus()); 2822 2823 /* 2824 * As IRQ0 is to be enabled in the 8259A, the virtual 2825 * wire has to be disabled in the local APIC. Also 2826 * timer interrupts need to be acknowledged manually in 2827 * the 8259A for the i82489DX when using the NMI 2828 * watchdog as that APIC treats NMIs as level-triggered. 2829 * The AEOI mode will finish them in the 8259A 2830 * automatically. 2831 */ 2832 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2833 legacy_pic->init(1); 2834 2835 pin1 = find_isa_irq_pin(0, mp_INT); 2836 apic1 = find_isa_irq_apic(0, mp_INT); 2837 pin2 = ioapic_i8259.pin; 2838 apic2 = ioapic_i8259.apic; 2839 2840 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2841 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2842 cfg->vector, apic1, pin1, apic2, pin2); 2843 2844 /* 2845 * Some BIOS writers are clueless and report the ExtINTA 2846 * I/O APIC input from the cascaded 8259A as the timer 2847 * interrupt input. So just in case, if only one pin 2848 * was found above, try it both directly and through the 2849 * 8259A. 2850 */ 2851 if (pin1 == -1) { 2852 if (intr_remapping_enabled) 2853 panic("BIOS bug: timer not connected to IO-APIC"); 2854 pin1 = pin2; 2855 apic1 = apic2; 2856 no_pin1 = 1; 2857 } else if (pin2 == -1) { 2858 pin2 = pin1; 2859 apic2 = apic1; 2860 } 2861 2862 if (pin1 != -1) { 2863 /* 2864 * Ok, does IRQ0 through the IOAPIC work? 2865 */ 2866 if (no_pin1) { 2867 add_pin_to_irq_node(cfg, node, apic1, pin1); 2868 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2869 } else { 2870 /* for edge trigger, setup_ioapic_irq already 2871 * leave it unmasked. 2872 * so only need to unmask if it is level-trigger 2873 * do we really have level trigger timer? 2874 */ 2875 int idx; 2876 idx = find_irq_entry(apic1, pin1, mp_INT); 2877 if (idx != -1 && irq_trigger(idx)) 2878 unmask_ioapic(cfg); 2879 } 2880 if (timer_irq_works()) { 2881 if (disable_timer_pin_1 > 0) 2882 clear_IO_APIC_pin(0, pin1); 2883 goto out; 2884 } 2885 if (intr_remapping_enabled) 2886 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2887 local_irq_disable(); 2888 clear_IO_APIC_pin(apic1, pin1); 2889 if (!no_pin1) 2890 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2891 "8254 timer not connected to IO-APIC\n"); 2892 2893 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2894 "(IRQ0) through the 8259A ...\n"); 2895 apic_printk(APIC_QUIET, KERN_INFO 2896 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2897 /* 2898 * legacy devices should be connected to IO APIC #0 2899 */ 2900 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2901 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2902 legacy_pic->unmask(0); 2903 if (timer_irq_works()) { 2904 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2905 timer_through_8259 = 1; 2906 goto out; 2907 } 2908 /* 2909 * Cleanup, just in case ... 2910 */ 2911 local_irq_disable(); 2912 legacy_pic->mask(0); 2913 clear_IO_APIC_pin(apic2, pin2); 2914 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2915 } 2916 2917 apic_printk(APIC_QUIET, KERN_INFO 2918 "...trying to set up timer as Virtual Wire IRQ...\n"); 2919 2920 lapic_register_intr(0); 2921 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2922 legacy_pic->unmask(0); 2923 2924 if (timer_irq_works()) { 2925 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2926 goto out; 2927 } 2928 local_irq_disable(); 2929 legacy_pic->mask(0); 2930 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2931 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2932 2933 apic_printk(APIC_QUIET, KERN_INFO 2934 "...trying to set up timer as ExtINT IRQ...\n"); 2935 2936 legacy_pic->init(0); 2937 legacy_pic->make_irq(0); 2938 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2939 2940 unlock_ExtINT_logic(); 2941 2942 if (timer_irq_works()) { 2943 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2944 goto out; 2945 } 2946 local_irq_disable(); 2947 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2948 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2949 "report. Then try booting with the 'noapic' option.\n"); 2950 out: 2951 local_irq_restore(flags); 2952 } 2953 2954 /* 2955 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2956 * to devices. However there may be an I/O APIC pin available for 2957 * this interrupt regardless. The pin may be left unconnected, but 2958 * typically it will be reused as an ExtINT cascade interrupt for 2959 * the master 8259A. In the MPS case such a pin will normally be 2960 * reported as an ExtINT interrupt in the MP table. With ACPI 2961 * there is no provision for ExtINT interrupts, and in the absence 2962 * of an override it would be treated as an ordinary ISA I/O APIC 2963 * interrupt, that is edge-triggered and unmasked by default. We 2964 * used to do this, but it caused problems on some systems because 2965 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2966 * the same ExtINT cascade interrupt to drive the local APIC of the 2967 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2968 * the I/O APIC in all cases now. No actual device should request 2969 * it anyway. --macro 2970 */ 2971 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2972 2973 void __init setup_IO_APIC(void) 2974 { 2975 2976 /* 2977 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2978 */ 2979 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2980 2981 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2982 /* 2983 * Set up IO-APIC IRQ routing. 2984 */ 2985 x86_init.mpparse.setup_ioapic_ids(); 2986 2987 sync_Arb_IDs(); 2988 setup_IO_APIC_irqs(); 2989 init_IO_APIC_traps(); 2990 if (legacy_pic->nr_legacy_irqs) 2991 check_timer(); 2992 } 2993 2994 /* 2995 * Called after all the initialization is done. If we didn't find any 2996 * APIC bugs then we can allow the modify fast path 2997 */ 2998 2999 static int __init io_apic_bug_finalize(void) 3000 { 3001 if (sis_apic_bug == -1) 3002 sis_apic_bug = 0; 3003 return 0; 3004 } 3005 3006 late_initcall(io_apic_bug_finalize); 3007 3008 static void resume_ioapic_id(int ioapic_idx) 3009 { 3010 unsigned long flags; 3011 union IO_APIC_reg_00 reg_00; 3012 3013 raw_spin_lock_irqsave(&ioapic_lock, flags); 3014 reg_00.raw = io_apic_read(ioapic_idx, 0); 3015 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) { 3016 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx); 3017 io_apic_write(ioapic_idx, 0, reg_00.raw); 3018 } 3019 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3020 } 3021 3022 static void ioapic_resume(void) 3023 { 3024 int ioapic_idx; 3025 3026 for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--) 3027 resume_ioapic_id(ioapic_idx); 3028 3029 restore_ioapic_entries(); 3030 } 3031 3032 static struct syscore_ops ioapic_syscore_ops = { 3033 .suspend = save_ioapic_entries, 3034 .resume = ioapic_resume, 3035 }; 3036 3037 static int __init ioapic_init_ops(void) 3038 { 3039 register_syscore_ops(&ioapic_syscore_ops); 3040 3041 return 0; 3042 } 3043 3044 device_initcall(ioapic_init_ops); 3045 3046 /* 3047 * Dynamic irq allocate and deallocation 3048 */ 3049 unsigned int create_irq_nr(unsigned int from, int node) 3050 { 3051 struct irq_cfg *cfg; 3052 unsigned long flags; 3053 unsigned int ret = 0; 3054 int irq; 3055 3056 if (from < nr_irqs_gsi) 3057 from = nr_irqs_gsi; 3058 3059 irq = alloc_irq_from(from, node); 3060 if (irq < 0) 3061 return 0; 3062 cfg = alloc_irq_cfg(irq, node); 3063 if (!cfg) { 3064 free_irq_at(irq, NULL); 3065 return 0; 3066 } 3067 3068 raw_spin_lock_irqsave(&vector_lock, flags); 3069 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3070 ret = irq; 3071 raw_spin_unlock_irqrestore(&vector_lock, flags); 3072 3073 if (ret) { 3074 irq_set_chip_data(irq, cfg); 3075 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3076 } else { 3077 free_irq_at(irq, cfg); 3078 } 3079 return ret; 3080 } 3081 3082 int create_irq(void) 3083 { 3084 int node = cpu_to_node(0); 3085 unsigned int irq_want; 3086 int irq; 3087 3088 irq_want = nr_irqs_gsi; 3089 irq = create_irq_nr(irq_want, node); 3090 3091 if (irq == 0) 3092 irq = -1; 3093 3094 return irq; 3095 } 3096 3097 void destroy_irq(unsigned int irq) 3098 { 3099 struct irq_cfg *cfg = irq_get_chip_data(irq); 3100 unsigned long flags; 3101 3102 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3103 3104 if (irq_remapped(cfg)) 3105 free_irte(irq); 3106 raw_spin_lock_irqsave(&vector_lock, flags); 3107 __clear_irq_vector(irq, cfg); 3108 raw_spin_unlock_irqrestore(&vector_lock, flags); 3109 free_irq_at(irq, cfg); 3110 } 3111 3112 /* 3113 * MSI message composition 3114 */ 3115 #ifdef CONFIG_PCI_MSI 3116 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3117 struct msi_msg *msg, u8 hpet_id) 3118 { 3119 struct irq_cfg *cfg; 3120 int err; 3121 unsigned dest; 3122 3123 if (disable_apic) 3124 return -ENXIO; 3125 3126 cfg = irq_cfg(irq); 3127 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3128 if (err) 3129 return err; 3130 3131 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3132 3133 if (irq_remapped(cfg)) { 3134 struct irte irte; 3135 int ir_index; 3136 u16 sub_handle; 3137 3138 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3139 BUG_ON(ir_index == -1); 3140 3141 prepare_irte(&irte, cfg->vector, dest); 3142 3143 /* Set source-id of interrupt request */ 3144 if (pdev) 3145 set_msi_sid(&irte, pdev); 3146 else 3147 set_hpet_sid(&irte, hpet_id); 3148 3149 modify_irte(irq, &irte); 3150 3151 msg->address_hi = MSI_ADDR_BASE_HI; 3152 msg->data = sub_handle; 3153 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3154 MSI_ADDR_IR_SHV | 3155 MSI_ADDR_IR_INDEX1(ir_index) | 3156 MSI_ADDR_IR_INDEX2(ir_index); 3157 } else { 3158 if (x2apic_enabled()) 3159 msg->address_hi = MSI_ADDR_BASE_HI | 3160 MSI_ADDR_EXT_DEST_ID(dest); 3161 else 3162 msg->address_hi = MSI_ADDR_BASE_HI; 3163 3164 msg->address_lo = 3165 MSI_ADDR_BASE_LO | 3166 ((apic->irq_dest_mode == 0) ? 3167 MSI_ADDR_DEST_MODE_PHYSICAL: 3168 MSI_ADDR_DEST_MODE_LOGICAL) | 3169 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3170 MSI_ADDR_REDIRECTION_CPU: 3171 MSI_ADDR_REDIRECTION_LOWPRI) | 3172 MSI_ADDR_DEST_ID(dest); 3173 3174 msg->data = 3175 MSI_DATA_TRIGGER_EDGE | 3176 MSI_DATA_LEVEL_ASSERT | 3177 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3178 MSI_DATA_DELIVERY_FIXED: 3179 MSI_DATA_DELIVERY_LOWPRI) | 3180 MSI_DATA_VECTOR(cfg->vector); 3181 } 3182 return err; 3183 } 3184 3185 #ifdef CONFIG_SMP 3186 static int 3187 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3188 { 3189 struct irq_cfg *cfg = data->chip_data; 3190 struct msi_msg msg; 3191 unsigned int dest; 3192 3193 if (__ioapic_set_affinity(data, mask, &dest)) 3194 return -1; 3195 3196 __get_cached_msi_msg(data->msi_desc, &msg); 3197 3198 msg.data &= ~MSI_DATA_VECTOR_MASK; 3199 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3200 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3201 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3202 3203 __write_msi_msg(data->msi_desc, &msg); 3204 3205 return 0; 3206 } 3207 #endif /* CONFIG_SMP */ 3208 3209 /* 3210 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3211 * which implement the MSI or MSI-X Capability Structure. 3212 */ 3213 static struct irq_chip msi_chip = { 3214 .name = "PCI-MSI", 3215 .irq_unmask = unmask_msi_irq, 3216 .irq_mask = mask_msi_irq, 3217 .irq_ack = ack_apic_edge, 3218 #ifdef CONFIG_SMP 3219 .irq_set_affinity = msi_set_affinity, 3220 #endif 3221 .irq_retrigger = ioapic_retrigger_irq, 3222 }; 3223 3224 /* 3225 * Map the PCI dev to the corresponding remapping hardware unit 3226 * and allocate 'nvec' consecutive interrupt-remapping table entries 3227 * in it. 3228 */ 3229 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3230 { 3231 struct intel_iommu *iommu; 3232 int index; 3233 3234 iommu = map_dev_to_ir(dev); 3235 if (!iommu) { 3236 printk(KERN_ERR 3237 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3238 return -ENOENT; 3239 } 3240 3241 index = alloc_irte(iommu, irq, nvec); 3242 if (index < 0) { 3243 printk(KERN_ERR 3244 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3245 pci_name(dev)); 3246 return -ENOSPC; 3247 } 3248 return index; 3249 } 3250 3251 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3252 { 3253 struct irq_chip *chip = &msi_chip; 3254 struct msi_msg msg; 3255 int ret; 3256 3257 ret = msi_compose_msg(dev, irq, &msg, -1); 3258 if (ret < 0) 3259 return ret; 3260 3261 irq_set_msi_desc(irq, msidesc); 3262 write_msi_msg(irq, &msg); 3263 3264 if (irq_remapped(irq_get_chip_data(irq))) { 3265 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3266 irq_remap_modify_chip_defaults(chip); 3267 } 3268 3269 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3270 3271 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3272 3273 return 0; 3274 } 3275 3276 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3277 { 3278 int node, ret, sub_handle, index = 0; 3279 unsigned int irq, irq_want; 3280 struct msi_desc *msidesc; 3281 struct intel_iommu *iommu = NULL; 3282 3283 /* x86 doesn't support multiple MSI yet */ 3284 if (type == PCI_CAP_ID_MSI && nvec > 1) 3285 return 1; 3286 3287 node = dev_to_node(&dev->dev); 3288 irq_want = nr_irqs_gsi; 3289 sub_handle = 0; 3290 list_for_each_entry(msidesc, &dev->msi_list, list) { 3291 irq = create_irq_nr(irq_want, node); 3292 if (irq == 0) 3293 return -1; 3294 irq_want = irq + 1; 3295 if (!intr_remapping_enabled) 3296 goto no_ir; 3297 3298 if (!sub_handle) { 3299 /* 3300 * allocate the consecutive block of IRTE's 3301 * for 'nvec' 3302 */ 3303 index = msi_alloc_irte(dev, irq, nvec); 3304 if (index < 0) { 3305 ret = index; 3306 goto error; 3307 } 3308 } else { 3309 iommu = map_dev_to_ir(dev); 3310 if (!iommu) { 3311 ret = -ENOENT; 3312 goto error; 3313 } 3314 /* 3315 * setup the mapping between the irq and the IRTE 3316 * base index, the sub_handle pointing to the 3317 * appropriate interrupt remap table entry. 3318 */ 3319 set_irte_irq(irq, iommu, index, sub_handle); 3320 } 3321 no_ir: 3322 ret = setup_msi_irq(dev, msidesc, irq); 3323 if (ret < 0) 3324 goto error; 3325 sub_handle++; 3326 } 3327 return 0; 3328 3329 error: 3330 destroy_irq(irq); 3331 return ret; 3332 } 3333 3334 void native_teardown_msi_irq(unsigned int irq) 3335 { 3336 destroy_irq(irq); 3337 } 3338 3339 #ifdef CONFIG_DMAR_TABLE 3340 #ifdef CONFIG_SMP 3341 static int 3342 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3343 bool force) 3344 { 3345 struct irq_cfg *cfg = data->chip_data; 3346 unsigned int dest, irq = data->irq; 3347 struct msi_msg msg; 3348 3349 if (__ioapic_set_affinity(data, mask, &dest)) 3350 return -1; 3351 3352 dmar_msi_read(irq, &msg); 3353 3354 msg.data &= ~MSI_DATA_VECTOR_MASK; 3355 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3356 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3357 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3358 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); 3359 3360 dmar_msi_write(irq, &msg); 3361 3362 return 0; 3363 } 3364 3365 #endif /* CONFIG_SMP */ 3366 3367 static struct irq_chip dmar_msi_type = { 3368 .name = "DMAR_MSI", 3369 .irq_unmask = dmar_msi_unmask, 3370 .irq_mask = dmar_msi_mask, 3371 .irq_ack = ack_apic_edge, 3372 #ifdef CONFIG_SMP 3373 .irq_set_affinity = dmar_msi_set_affinity, 3374 #endif 3375 .irq_retrigger = ioapic_retrigger_irq, 3376 }; 3377 3378 int arch_setup_dmar_msi(unsigned int irq) 3379 { 3380 int ret; 3381 struct msi_msg msg; 3382 3383 ret = msi_compose_msg(NULL, irq, &msg, -1); 3384 if (ret < 0) 3385 return ret; 3386 dmar_msi_write(irq, &msg); 3387 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3388 "edge"); 3389 return 0; 3390 } 3391 #endif 3392 3393 #ifdef CONFIG_HPET_TIMER 3394 3395 #ifdef CONFIG_SMP 3396 static int hpet_msi_set_affinity(struct irq_data *data, 3397 const struct cpumask *mask, bool force) 3398 { 3399 struct irq_cfg *cfg = data->chip_data; 3400 struct msi_msg msg; 3401 unsigned int dest; 3402 3403 if (__ioapic_set_affinity(data, mask, &dest)) 3404 return -1; 3405 3406 hpet_msi_read(data->handler_data, &msg); 3407 3408 msg.data &= ~MSI_DATA_VECTOR_MASK; 3409 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3410 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3411 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3412 3413 hpet_msi_write(data->handler_data, &msg); 3414 3415 return 0; 3416 } 3417 3418 #endif /* CONFIG_SMP */ 3419 3420 static struct irq_chip hpet_msi_type = { 3421 .name = "HPET_MSI", 3422 .irq_unmask = hpet_msi_unmask, 3423 .irq_mask = hpet_msi_mask, 3424 .irq_ack = ack_apic_edge, 3425 #ifdef CONFIG_SMP 3426 .irq_set_affinity = hpet_msi_set_affinity, 3427 #endif 3428 .irq_retrigger = ioapic_retrigger_irq, 3429 }; 3430 3431 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3432 { 3433 struct irq_chip *chip = &hpet_msi_type; 3434 struct msi_msg msg; 3435 int ret; 3436 3437 if (intr_remapping_enabled) { 3438 struct intel_iommu *iommu = map_hpet_to_ir(id); 3439 int index; 3440 3441 if (!iommu) 3442 return -1; 3443 3444 index = alloc_irte(iommu, irq, 1); 3445 if (index < 0) 3446 return -1; 3447 } 3448 3449 ret = msi_compose_msg(NULL, irq, &msg, id); 3450 if (ret < 0) 3451 return ret; 3452 3453 hpet_msi_write(irq_get_handler_data(irq), &msg); 3454 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3455 if (irq_remapped(irq_get_chip_data(irq))) 3456 irq_remap_modify_chip_defaults(chip); 3457 3458 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3459 return 0; 3460 } 3461 #endif 3462 3463 #endif /* CONFIG_PCI_MSI */ 3464 /* 3465 * Hypertransport interrupt support 3466 */ 3467 #ifdef CONFIG_HT_IRQ 3468 3469 #ifdef CONFIG_SMP 3470 3471 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3472 { 3473 struct ht_irq_msg msg; 3474 fetch_ht_irq_msg(irq, &msg); 3475 3476 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3477 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3478 3479 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3480 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3481 3482 write_ht_irq_msg(irq, &msg); 3483 } 3484 3485 static int 3486 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3487 { 3488 struct irq_cfg *cfg = data->chip_data; 3489 unsigned int dest; 3490 3491 if (__ioapic_set_affinity(data, mask, &dest)) 3492 return -1; 3493 3494 target_ht_irq(data->irq, dest, cfg->vector); 3495 return 0; 3496 } 3497 3498 #endif 3499 3500 static struct irq_chip ht_irq_chip = { 3501 .name = "PCI-HT", 3502 .irq_mask = mask_ht_irq, 3503 .irq_unmask = unmask_ht_irq, 3504 .irq_ack = ack_apic_edge, 3505 #ifdef CONFIG_SMP 3506 .irq_set_affinity = ht_set_affinity, 3507 #endif 3508 .irq_retrigger = ioapic_retrigger_irq, 3509 }; 3510 3511 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3512 { 3513 struct irq_cfg *cfg; 3514 int err; 3515 3516 if (disable_apic) 3517 return -ENXIO; 3518 3519 cfg = irq_cfg(irq); 3520 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3521 if (!err) { 3522 struct ht_irq_msg msg; 3523 unsigned dest; 3524 3525 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3526 apic->target_cpus()); 3527 3528 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3529 3530 msg.address_lo = 3531 HT_IRQ_LOW_BASE | 3532 HT_IRQ_LOW_DEST_ID(dest) | 3533 HT_IRQ_LOW_VECTOR(cfg->vector) | 3534 ((apic->irq_dest_mode == 0) ? 3535 HT_IRQ_LOW_DM_PHYSICAL : 3536 HT_IRQ_LOW_DM_LOGICAL) | 3537 HT_IRQ_LOW_RQEOI_EDGE | 3538 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3539 HT_IRQ_LOW_MT_FIXED : 3540 HT_IRQ_LOW_MT_ARBITRATED) | 3541 HT_IRQ_LOW_IRQ_MASKED; 3542 3543 write_ht_irq_msg(irq, &msg); 3544 3545 irq_set_chip_and_handler_name(irq, &ht_irq_chip, 3546 handle_edge_irq, "edge"); 3547 3548 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3549 } 3550 return err; 3551 } 3552 #endif /* CONFIG_HT_IRQ */ 3553 3554 static int 3555 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) 3556 { 3557 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); 3558 int ret; 3559 3560 if (!cfg) 3561 return -EINVAL; 3562 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin); 3563 if (!ret) 3564 setup_ioapic_irq(irq, cfg, attr); 3565 return ret; 3566 } 3567 3568 int io_apic_setup_irq_pin_once(unsigned int irq, int node, 3569 struct io_apic_irq_attr *attr) 3570 { 3571 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; 3572 int ret; 3573 3574 /* Avoid redundant programming */ 3575 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { 3576 pr_debug("Pin %d-%d already programmed\n", 3577 mpc_ioapic_id(ioapic_idx), pin); 3578 return 0; 3579 } 3580 ret = io_apic_setup_irq_pin(irq, node, attr); 3581 if (!ret) 3582 set_bit(pin, ioapics[ioapic_idx].pin_programmed); 3583 return ret; 3584 } 3585 3586 static int __init io_apic_get_redir_entries(int ioapic) 3587 { 3588 union IO_APIC_reg_01 reg_01; 3589 unsigned long flags; 3590 3591 raw_spin_lock_irqsave(&ioapic_lock, flags); 3592 reg_01.raw = io_apic_read(ioapic, 1); 3593 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3594 3595 /* The register returns the maximum index redir index 3596 * supported, which is one less than the total number of redir 3597 * entries. 3598 */ 3599 return reg_01.bits.entries + 1; 3600 } 3601 3602 static void __init probe_nr_irqs_gsi(void) 3603 { 3604 int nr; 3605 3606 nr = gsi_top + NR_IRQS_LEGACY; 3607 if (nr > nr_irqs_gsi) 3608 nr_irqs_gsi = nr; 3609 3610 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3611 } 3612 3613 int get_nr_irqs_gsi(void) 3614 { 3615 return nr_irqs_gsi; 3616 } 3617 3618 int __init arch_probe_nr_irqs(void) 3619 { 3620 int nr; 3621 3622 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3623 nr_irqs = NR_VECTORS * nr_cpu_ids; 3624 3625 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3626 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3627 /* 3628 * for MSI and HT dyn irq 3629 */ 3630 nr += nr_irqs_gsi * 16; 3631 #endif 3632 if (nr < nr_irqs) 3633 nr_irqs = nr; 3634 3635 return NR_IRQS_LEGACY; 3636 } 3637 3638 int io_apic_set_pci_routing(struct device *dev, int irq, 3639 struct io_apic_irq_attr *irq_attr) 3640 { 3641 int node; 3642 3643 if (!IO_APIC_IRQ(irq)) { 3644 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3645 irq_attr->ioapic); 3646 return -EINVAL; 3647 } 3648 3649 node = dev ? dev_to_node(dev) : cpu_to_node(0); 3650 3651 return io_apic_setup_irq_pin_once(irq, node, irq_attr); 3652 } 3653 3654 #ifdef CONFIG_X86_32 3655 static int __init io_apic_get_unique_id(int ioapic, int apic_id) 3656 { 3657 union IO_APIC_reg_00 reg_00; 3658 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3659 physid_mask_t tmp; 3660 unsigned long flags; 3661 int i = 0; 3662 3663 /* 3664 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3665 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3666 * supports up to 16 on one shared APIC bus. 3667 * 3668 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3669 * advantage of new APIC bus architecture. 3670 */ 3671 3672 if (physids_empty(apic_id_map)) 3673 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3674 3675 raw_spin_lock_irqsave(&ioapic_lock, flags); 3676 reg_00.raw = io_apic_read(ioapic, 0); 3677 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3678 3679 if (apic_id >= get_physical_broadcast()) { 3680 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3681 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3682 apic_id = reg_00.bits.ID; 3683 } 3684 3685 /* 3686 * Every APIC in a system must have a unique ID or we get lots of nice 3687 * 'stuck on smp_invalidate_needed IPI wait' messages. 3688 */ 3689 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3690 3691 for (i = 0; i < get_physical_broadcast(); i++) { 3692 if (!apic->check_apicid_used(&apic_id_map, i)) 3693 break; 3694 } 3695 3696 if (i == get_physical_broadcast()) 3697 panic("Max apic_id exceeded!\n"); 3698 3699 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3700 "trying %d\n", ioapic, apic_id, i); 3701 3702 apic_id = i; 3703 } 3704 3705 apic->apicid_to_cpu_present(apic_id, &tmp); 3706 physids_or(apic_id_map, apic_id_map, tmp); 3707 3708 if (reg_00.bits.ID != apic_id) { 3709 reg_00.bits.ID = apic_id; 3710 3711 raw_spin_lock_irqsave(&ioapic_lock, flags); 3712 io_apic_write(ioapic, 0, reg_00.raw); 3713 reg_00.raw = io_apic_read(ioapic, 0); 3714 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3715 3716 /* Sanity check */ 3717 if (reg_00.bits.ID != apic_id) { 3718 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3719 return -1; 3720 } 3721 } 3722 3723 apic_printk(APIC_VERBOSE, KERN_INFO 3724 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3725 3726 return apic_id; 3727 } 3728 3729 static u8 __init io_apic_unique_id(u8 id) 3730 { 3731 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3732 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3733 return io_apic_get_unique_id(nr_ioapics, id); 3734 else 3735 return id; 3736 } 3737 #else 3738 static u8 __init io_apic_unique_id(u8 id) 3739 { 3740 int i; 3741 DECLARE_BITMAP(used, 256); 3742 3743 bitmap_zero(used, 256); 3744 for (i = 0; i < nr_ioapics; i++) { 3745 __set_bit(mpc_ioapic_id(i), used); 3746 } 3747 if (!test_bit(id, used)) 3748 return id; 3749 return find_first_zero_bit(used, 256); 3750 } 3751 #endif 3752 3753 static int __init io_apic_get_version(int ioapic) 3754 { 3755 union IO_APIC_reg_01 reg_01; 3756 unsigned long flags; 3757 3758 raw_spin_lock_irqsave(&ioapic_lock, flags); 3759 reg_01.raw = io_apic_read(ioapic, 1); 3760 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3761 3762 return reg_01.bits.version; 3763 } 3764 3765 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3766 { 3767 int ioapic, pin, idx; 3768 3769 if (skip_ioapic_setup) 3770 return -1; 3771 3772 ioapic = mp_find_ioapic(gsi); 3773 if (ioapic < 0) 3774 return -1; 3775 3776 pin = mp_find_ioapic_pin(ioapic, gsi); 3777 if (pin < 0) 3778 return -1; 3779 3780 idx = find_irq_entry(ioapic, pin, mp_INT); 3781 if (idx < 0) 3782 return -1; 3783 3784 *trigger = irq_trigger(idx); 3785 *polarity = irq_polarity(idx); 3786 return 0; 3787 } 3788 3789 /* 3790 * This function currently is only a helper for the i386 smp boot process where 3791 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3792 * so mask in all cases should simply be apic->target_cpus() 3793 */ 3794 #ifdef CONFIG_SMP 3795 void __init setup_ioapic_dest(void) 3796 { 3797 int pin, ioapic, irq, irq_entry; 3798 const struct cpumask *mask; 3799 struct irq_data *idata; 3800 3801 if (skip_ioapic_setup == 1) 3802 return; 3803 3804 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3805 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) { 3806 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3807 if (irq_entry == -1) 3808 continue; 3809 irq = pin_2_irq(irq_entry, ioapic, pin); 3810 3811 if ((ioapic > 0) && (irq > 16)) 3812 continue; 3813 3814 idata = irq_get_irq_data(irq); 3815 3816 /* 3817 * Honour affinities which have been set in early boot 3818 */ 3819 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) 3820 mask = idata->affinity; 3821 else 3822 mask = apic->target_cpus(); 3823 3824 if (intr_remapping_enabled) 3825 ir_ioapic_set_affinity(idata, mask, false); 3826 else 3827 ioapic_set_affinity(idata, mask, false); 3828 } 3829 3830 } 3831 #endif 3832 3833 #define IOAPIC_RESOURCE_NAME_SIZE 11 3834 3835 static struct resource *ioapic_resources; 3836 3837 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3838 { 3839 unsigned long n; 3840 struct resource *res; 3841 char *mem; 3842 int i; 3843 3844 if (nr_ioapics <= 0) 3845 return NULL; 3846 3847 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3848 n *= nr_ioapics; 3849 3850 mem = alloc_bootmem(n); 3851 res = (void *)mem; 3852 3853 mem += sizeof(struct resource) * nr_ioapics; 3854 3855 for (i = 0; i < nr_ioapics; i++) { 3856 res[i].name = mem; 3857 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3858 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3859 mem += IOAPIC_RESOURCE_NAME_SIZE; 3860 } 3861 3862 ioapic_resources = res; 3863 3864 return res; 3865 } 3866 3867 void __init ioapic_and_gsi_init(void) 3868 { 3869 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3870 struct resource *ioapic_res; 3871 int i; 3872 3873 ioapic_res = ioapic_setup_resources(nr_ioapics); 3874 for (i = 0; i < nr_ioapics; i++) { 3875 if (smp_found_config) { 3876 ioapic_phys = mpc_ioapic_addr(i); 3877 #ifdef CONFIG_X86_32 3878 if (!ioapic_phys) { 3879 printk(KERN_ERR 3880 "WARNING: bogus zero IO-APIC " 3881 "address found in MPTABLE, " 3882 "disabling IO/APIC support!\n"); 3883 smp_found_config = 0; 3884 skip_ioapic_setup = 1; 3885 goto fake_ioapic_page; 3886 } 3887 #endif 3888 } else { 3889 #ifdef CONFIG_X86_32 3890 fake_ioapic_page: 3891 #endif 3892 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3893 ioapic_phys = __pa(ioapic_phys); 3894 } 3895 set_fixmap_nocache(idx, ioapic_phys); 3896 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3897 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3898 ioapic_phys); 3899 idx++; 3900 3901 ioapic_res->start = ioapic_phys; 3902 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3903 ioapic_res++; 3904 } 3905 3906 probe_nr_irqs_gsi(); 3907 } 3908 3909 void __init ioapic_insert_resources(void) 3910 { 3911 int i; 3912 struct resource *r = ioapic_resources; 3913 3914 if (!r) { 3915 if (nr_ioapics > 0) 3916 printk(KERN_ERR 3917 "IO APIC resources couldn't be allocated.\n"); 3918 return; 3919 } 3920 3921 for (i = 0; i < nr_ioapics; i++) { 3922 insert_resource(&iomem_resource, r); 3923 r++; 3924 } 3925 } 3926 3927 int mp_find_ioapic(u32 gsi) 3928 { 3929 int i = 0; 3930 3931 if (nr_ioapics == 0) 3932 return -1; 3933 3934 /* Find the IOAPIC that manages this GSI. */ 3935 for (i = 0; i < nr_ioapics; i++) { 3936 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i); 3937 if ((gsi >= gsi_cfg->gsi_base) 3938 && (gsi <= gsi_cfg->gsi_end)) 3939 return i; 3940 } 3941 3942 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 3943 return -1; 3944 } 3945 3946 int mp_find_ioapic_pin(int ioapic, u32 gsi) 3947 { 3948 struct mp_ioapic_gsi *gsi_cfg; 3949 3950 if (WARN_ON(ioapic == -1)) 3951 return -1; 3952 3953 gsi_cfg = mp_ioapic_gsi_routing(ioapic); 3954 if (WARN_ON(gsi > gsi_cfg->gsi_end)) 3955 return -1; 3956 3957 return gsi - gsi_cfg->gsi_base; 3958 } 3959 3960 static __init int bad_ioapic(unsigned long address) 3961 { 3962 if (nr_ioapics >= MAX_IO_APICS) { 3963 printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " 3964 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 3965 return 1; 3966 } 3967 if (!address) { 3968 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 3969 " found in table, skipping!\n"); 3970 return 1; 3971 } 3972 return 0; 3973 } 3974 3975 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 3976 { 3977 int idx = 0; 3978 int entries; 3979 struct mp_ioapic_gsi *gsi_cfg; 3980 3981 if (bad_ioapic(address)) 3982 return; 3983 3984 idx = nr_ioapics; 3985 3986 ioapics[idx].mp_config.type = MP_IOAPIC; 3987 ioapics[idx].mp_config.flags = MPC_APIC_USABLE; 3988 ioapics[idx].mp_config.apicaddr = address; 3989 3990 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 3991 ioapics[idx].mp_config.apicid = io_apic_unique_id(id); 3992 ioapics[idx].mp_config.apicver = io_apic_get_version(idx); 3993 3994 /* 3995 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 3996 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 3997 */ 3998 entries = io_apic_get_redir_entries(idx); 3999 gsi_cfg = mp_ioapic_gsi_routing(idx); 4000 gsi_cfg->gsi_base = gsi_base; 4001 gsi_cfg->gsi_end = gsi_base + entries - 1; 4002 4003 /* 4004 * The number of IO-APIC IRQ registers (== #pins): 4005 */ 4006 ioapics[idx].nr_registers = entries; 4007 4008 if (gsi_cfg->gsi_end >= gsi_top) 4009 gsi_top = gsi_cfg->gsi_end + 1; 4010 4011 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 4012 "GSI %d-%d\n", idx, mpc_ioapic_id(idx), 4013 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), 4014 gsi_cfg->gsi_base, gsi_cfg->gsi_end); 4015 4016 nr_ioapics++; 4017 } 4018 4019 /* Enable IOAPIC early just for system timer */ 4020 void __init pre_init_apic_IRQ0(void) 4021 { 4022 struct io_apic_irq_attr attr = { 0, 0, 0, 0 }; 4023 4024 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4025 #ifndef CONFIG_SMP 4026 physid_set_mask_of_physid(boot_cpu_physical_apicid, 4027 &phys_cpu_present_map); 4028 #endif 4029 setup_local_APIC(); 4030 4031 io_apic_setup_irq_pin(0, 0, &attr); 4032 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, 4033 "edge"); 4034 } 4035