1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Multiprocessor Specification 1.1 and 1.4 4 * compliant MP-table parsing routines. 5 * 6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/init.h> 13 #include <linux/delay.h> 14 #include <linux/memblock.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/mc146818rtc.h> 17 #include <linux/bitops.h> 18 #include <linux/acpi.h> 19 #include <linux/smp.h> 20 #include <linux/pci.h> 21 22 #include <asm/i8259.h> 23 #include <asm/io_apic.h> 24 #include <asm/acpi.h> 25 #include <asm/irqdomain.h> 26 #include <asm/mtrr.h> 27 #include <asm/mpspec.h> 28 #include <asm/proto.h> 29 #include <asm/bios_ebda.h> 30 #include <asm/e820/api.h> 31 #include <asm/setup.h> 32 #include <asm/smp.h> 33 34 #include <asm/apic.h> 35 /* 36 * Checksum an MP configuration block. 37 */ 38 39 static int __init mpf_checksum(unsigned char *mp, int len) 40 { 41 int sum = 0; 42 43 while (len--) 44 sum += *mp++; 45 46 return sum & 0xFF; 47 } 48 49 static void __init MP_processor_info(struct mpc_cpu *m) 50 { 51 char *bootup_cpu = ""; 52 53 if (!(m->cpuflag & CPU_ENABLED)) { 54 disabled_cpus++; 55 return; 56 } 57 58 if (m->cpuflag & CPU_BOOTPROCESSOR) 59 bootup_cpu = " (Bootup-CPU)"; 60 61 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu); 62 generic_processor_info(m->apicid); 63 } 64 65 #ifdef CONFIG_X86_IO_APIC 66 static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str) 67 { 68 memcpy(str, m->bustype, 6); 69 str[6] = 0; 70 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); 71 } 72 73 static void __init MP_bus_info(struct mpc_bus *m) 74 { 75 char str[7]; 76 77 mpc_oem_bus_info(m, str); 78 79 #if MAX_MP_BUSSES < 256 80 if (m->busid >= MAX_MP_BUSSES) { 81 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n", 82 m->busid, str, MAX_MP_BUSSES - 1); 83 return; 84 } 85 #endif 86 87 set_bit(m->busid, mp_bus_not_pci); 88 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { 89 #ifdef CONFIG_EISA 90 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 91 #endif 92 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 93 clear_bit(m->busid, mp_bus_not_pci); 94 #ifdef CONFIG_EISA 95 mp_bus_id_to_type[m->busid] = MP_BUS_PCI; 96 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { 97 mp_bus_id_to_type[m->busid] = MP_BUS_EISA; 98 #endif 99 } else 100 pr_warn("Unknown bustype %s - ignoring\n", str); 101 } 102 103 static void __init MP_ioapic_info(struct mpc_ioapic *m) 104 { 105 struct ioapic_domain_cfg cfg = { 106 .type = IOAPIC_DOMAIN_LEGACY, 107 .ops = &mp_ioapic_irqdomain_ops, 108 }; 109 110 if (m->flags & MPC_APIC_USABLE) 111 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg); 112 } 113 114 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq) 115 { 116 apic_printk(APIC_VERBOSE, 117 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n", 118 mp_irq->irqtype, mp_irq->irqflag & 3, 119 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus, 120 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq); 121 } 122 123 #else /* CONFIG_X86_IO_APIC */ 124 static inline void __init MP_bus_info(struct mpc_bus *m) {} 125 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {} 126 #endif /* CONFIG_X86_IO_APIC */ 127 128 static void __init MP_lintsrc_info(struct mpc_lintsrc *m) 129 { 130 apic_printk(APIC_VERBOSE, 131 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n", 132 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, 133 m->srcbusirq, m->destapic, m->destapiclint); 134 } 135 136 /* 137 * Read/parse the MPC 138 */ 139 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) 140 { 141 142 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { 143 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n", 144 mpc->signature[0], mpc->signature[1], 145 mpc->signature[2], mpc->signature[3]); 146 return 0; 147 } 148 if (mpf_checksum((unsigned char *)mpc, mpc->length)) { 149 pr_err("MPTABLE: checksum error!\n"); 150 return 0; 151 } 152 if (mpc->spec != 0x01 && mpc->spec != 0x04) { 153 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec); 154 return 0; 155 } 156 if (!mpc->lapic) { 157 pr_err("MPTABLE: null local APIC address!\n"); 158 return 0; 159 } 160 memcpy(oem, mpc->oem, 8); 161 oem[8] = 0; 162 pr_info("MPTABLE: OEM ID: %s\n", oem); 163 164 memcpy(str, mpc->productid, 12); 165 str[12] = 0; 166 167 pr_info("MPTABLE: Product ID: %s\n", str); 168 169 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic); 170 171 return 1; 172 } 173 174 static void skip_entry(unsigned char **ptr, int *count, int size) 175 { 176 *ptr += size; 177 *count += size; 178 } 179 180 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) 181 { 182 pr_err("Your mptable is wrong, contact your HW vendor!\n"); 183 pr_cont("type %x\n", *mpt); 184 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, 185 1, mpc, mpc->length, 1); 186 } 187 188 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 189 { 190 char str[16]; 191 char oem[10]; 192 193 int count = sizeof(*mpc); 194 unsigned char *mpt = ((unsigned char *)mpc) + count; 195 196 if (!smp_check_mpc(mpc, oem, str)) 197 return 0; 198 199 /* Initialize the lapic mapping */ 200 if (!acpi_lapic) 201 register_lapic_address(mpc->lapic); 202 203 if (early) 204 return 1; 205 206 /* Now process the configuration blocks. */ 207 while (count < mpc->length) { 208 switch (*mpt) { 209 case MP_PROCESSOR: 210 /* ACPI may have already provided this data */ 211 if (!acpi_lapic) 212 MP_processor_info((struct mpc_cpu *)mpt); 213 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 214 break; 215 case MP_BUS: 216 MP_bus_info((struct mpc_bus *)mpt); 217 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 218 break; 219 case MP_IOAPIC: 220 MP_ioapic_info((struct mpc_ioapic *)mpt); 221 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 222 break; 223 case MP_INTSRC: 224 mp_save_irq((struct mpc_intsrc *)mpt); 225 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 226 break; 227 case MP_LINTSRC: 228 MP_lintsrc_info((struct mpc_lintsrc *)mpt); 229 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 230 break; 231 default: 232 /* wrong mptable */ 233 smp_dump_mptable(mpc, mpt); 234 count = mpc->length; 235 break; 236 } 237 } 238 239 if (!num_processors) 240 pr_err("MPTABLE: no processors registered!\n"); 241 return num_processors; 242 } 243 244 #ifdef CONFIG_X86_IO_APIC 245 246 static int __init ELCR_trigger(unsigned int irq) 247 { 248 unsigned int port; 249 250 port = PIC_ELCR1 + (irq >> 3); 251 return (inb(port) >> (irq & 7)) & 1; 252 } 253 254 static void __init construct_default_ioirq_mptable(int mpc_default_type) 255 { 256 struct mpc_intsrc intsrc; 257 int i; 258 int ELCR_fallback = 0; 259 260 intsrc.type = MP_INTSRC; 261 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT; 262 intsrc.srcbus = 0; 263 intsrc.dstapic = mpc_ioapic_id(0); 264 265 intsrc.irqtype = mp_INT; 266 267 /* 268 * If true, we have an ISA/PCI system with no IRQ entries 269 * in the MP table. To prevent the PCI interrupts from being set up 270 * incorrectly, we try to use the ELCR. The sanity check to see if 271 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can 272 * never be level sensitive, so we simply see if the ELCR agrees. 273 * If it does, we assume it's valid. 274 */ 275 if (mpc_default_type == 5) { 276 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 277 278 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || 279 ELCR_trigger(13)) 280 pr_err("ELCR contains invalid data... not using ELCR\n"); 281 else { 282 pr_info("Using ELCR to identify PCI interrupts\n"); 283 ELCR_fallback = 1; 284 } 285 } 286 287 for (i = 0; i < 16; i++) { 288 switch (mpc_default_type) { 289 case 2: 290 if (i == 0 || i == 13) 291 continue; /* IRQ0 & IRQ13 not connected */ 292 fallthrough; 293 default: 294 if (i == 2) 295 continue; /* IRQ2 is never connected */ 296 } 297 298 if (ELCR_fallback) { 299 /* 300 * If the ELCR indicates a level-sensitive interrupt, we 301 * copy that information over to the MP table in the 302 * irqflag field (level sensitive, active high polarity). 303 */ 304 if (ELCR_trigger(i)) { 305 intsrc.irqflag = MP_IRQTRIG_LEVEL | 306 MP_IRQPOL_ACTIVE_HIGH; 307 } else { 308 intsrc.irqflag = MP_IRQTRIG_DEFAULT | 309 MP_IRQPOL_DEFAULT; 310 } 311 } 312 313 intsrc.srcbusirq = i; 314 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 315 mp_save_irq(&intsrc); 316 } 317 318 intsrc.irqtype = mp_ExtINT; 319 intsrc.srcbusirq = 0; 320 intsrc.dstirq = 0; /* 8259A to INTIN0 */ 321 mp_save_irq(&intsrc); 322 } 323 324 325 static void __init construct_ioapic_table(int mpc_default_type) 326 { 327 struct mpc_ioapic ioapic; 328 struct mpc_bus bus; 329 330 bus.type = MP_BUS; 331 bus.busid = 0; 332 switch (mpc_default_type) { 333 default: 334 pr_err("???\nUnknown standard configuration %d\n", 335 mpc_default_type); 336 fallthrough; 337 case 1: 338 case 5: 339 memcpy(bus.bustype, "ISA ", 6); 340 break; 341 case 2: 342 case 6: 343 case 3: 344 memcpy(bus.bustype, "EISA ", 6); 345 break; 346 } 347 MP_bus_info(&bus); 348 if (mpc_default_type > 4) { 349 bus.busid = 1; 350 memcpy(bus.bustype, "PCI ", 6); 351 MP_bus_info(&bus); 352 } 353 354 ioapic.type = MP_IOAPIC; 355 ioapic.apicid = 2; 356 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 357 ioapic.flags = MPC_APIC_USABLE; 358 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; 359 MP_ioapic_info(&ioapic); 360 361 /* 362 * We set up most of the low 16 IO-APIC pins according to MPS rules. 363 */ 364 construct_default_ioirq_mptable(mpc_default_type); 365 } 366 #else 367 static inline void __init construct_ioapic_table(int mpc_default_type) { } 368 #endif 369 370 static inline void __init construct_default_ISA_mptable(int mpc_default_type) 371 { 372 struct mpc_cpu processor; 373 struct mpc_lintsrc lintsrc; 374 int linttypes[2] = { mp_ExtINT, mp_NMI }; 375 int i; 376 377 /* 378 * 2 CPUs, numbered 0 & 1. 379 */ 380 processor.type = MP_PROCESSOR; 381 /* Either an integrated APIC or a discrete 82489DX. */ 382 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 383 processor.cpuflag = CPU_ENABLED; 384 processor.cpufeature = (boot_cpu_data.x86 << 8) | 385 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; 386 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; 387 processor.reserved[0] = 0; 388 processor.reserved[1] = 0; 389 for (i = 0; i < 2; i++) { 390 processor.apicid = i; 391 MP_processor_info(&processor); 392 } 393 394 construct_ioapic_table(mpc_default_type); 395 396 lintsrc.type = MP_LINTSRC; 397 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT; 398 lintsrc.srcbusid = 0; 399 lintsrc.srcbusirq = 0; 400 lintsrc.destapic = MP_APIC_ALL; 401 for (i = 0; i < 2; i++) { 402 lintsrc.irqtype = linttypes[i]; 403 lintsrc.destapiclint = i; 404 MP_lintsrc_info(&lintsrc); 405 } 406 } 407 408 static unsigned long mpf_base; 409 static bool mpf_found; 410 411 static unsigned long __init get_mpc_size(unsigned long physptr) 412 { 413 struct mpc_table *mpc; 414 unsigned long size; 415 416 mpc = early_memremap(physptr, PAGE_SIZE); 417 size = mpc->length; 418 early_memunmap(mpc, PAGE_SIZE); 419 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size); 420 421 return size; 422 } 423 424 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early) 425 { 426 struct mpc_table *mpc; 427 unsigned long size; 428 429 size = get_mpc_size(mpf->physptr); 430 mpc = early_memremap(mpf->physptr, size); 431 432 /* 433 * Read the physical hardware table. Anything here will 434 * override the defaults. 435 */ 436 if (!smp_read_mpc(mpc, early)) { 437 #ifdef CONFIG_X86_LOCAL_APIC 438 smp_found_config = 0; 439 #endif 440 pr_err("BIOS bug, MP table errors detected!...\n"); 441 pr_cont("... disabling SMP support. (tell your hw vendor)\n"); 442 early_memunmap(mpc, size); 443 return -1; 444 } 445 early_memunmap(mpc, size); 446 447 if (early) 448 return -1; 449 450 #ifdef CONFIG_X86_IO_APIC 451 /* 452 * If there are no explicit MP IRQ entries, then we are 453 * broken. We set up most of the low 16 IO-APIC pins to 454 * ISA defaults and hope it will work. 455 */ 456 if (!mp_irq_entries) { 457 struct mpc_bus bus; 458 459 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 460 461 bus.type = MP_BUS; 462 bus.busid = 0; 463 memcpy(bus.bustype, "ISA ", 6); 464 MP_bus_info(&bus); 465 466 construct_default_ioirq_mptable(0); 467 } 468 #endif 469 470 return 0; 471 } 472 473 /* 474 * Scan the memory blocks for an SMP configuration block. 475 */ 476 static __init void mpparse_get_smp_config(unsigned int early) 477 { 478 struct mpf_intel *mpf; 479 480 if (!smp_found_config) 481 return; 482 483 if (!mpf_found) 484 return; 485 486 if (acpi_lapic && early) 487 return; 488 489 /* 490 * MPS doesn't support hyperthreading, aka only have 491 * thread 0 apic id in MPS table 492 */ 493 if (acpi_lapic && acpi_ioapic) 494 return; 495 496 mpf = early_memremap(mpf_base, sizeof(*mpf)); 497 if (!mpf) { 498 pr_err("MPTABLE: error mapping MP table\n"); 499 return; 500 } 501 502 pr_info("Intel MultiProcessor Specification v1.%d\n", 503 mpf->specification); 504 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 505 if (mpf->feature2 & (1 << 7)) { 506 pr_info(" IMCR and PIC compatibility mode.\n"); 507 pic_mode = 1; 508 } else { 509 pr_info(" Virtual Wire compatibility mode.\n"); 510 pic_mode = 0; 511 } 512 #endif 513 /* 514 * Now see if we need to read further. 515 */ 516 if (mpf->feature1) { 517 if (early) { 518 /* Local APIC has default address */ 519 register_lapic_address(APIC_DEFAULT_PHYS_BASE); 520 goto out; 521 } 522 523 pr_info("Default MP configuration #%d\n", mpf->feature1); 524 construct_default_ISA_mptable(mpf->feature1); 525 526 } else if (mpf->physptr) { 527 if (check_physptr(mpf, early)) 528 goto out; 529 } else 530 BUG(); 531 532 if (!early) 533 pr_info("Processors: %d\n", num_processors); 534 /* 535 * Only use the first configuration found. 536 */ 537 out: 538 early_memunmap(mpf, sizeof(*mpf)); 539 } 540 541 void __init mpparse_parse_early_smp_config(void) 542 { 543 mpparse_get_smp_config(true); 544 } 545 546 void __init mpparse_parse_smp_config(void) 547 { 548 mpparse_get_smp_config(false); 549 } 550 551 static void __init smp_reserve_memory(struct mpf_intel *mpf) 552 { 553 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr)); 554 } 555 556 static int __init smp_scan_config(unsigned long base, unsigned long length) 557 { 558 unsigned int *bp; 559 struct mpf_intel *mpf; 560 int ret = 0; 561 562 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n", 563 base, base + length - 1); 564 BUILD_BUG_ON(sizeof(*mpf) != 16); 565 566 while (length > 0) { 567 bp = early_memremap(base, length); 568 mpf = (struct mpf_intel *)bp; 569 if ((*bp == SMP_MAGIC_IDENT) && 570 (mpf->length == 1) && 571 !mpf_checksum((unsigned char *)bp, 16) && 572 ((mpf->specification == 1) 573 || (mpf->specification == 4))) { 574 #ifdef CONFIG_X86_LOCAL_APIC 575 smp_found_config = 1; 576 #endif 577 mpf_base = base; 578 mpf_found = true; 579 580 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n", 581 base, base + sizeof(*mpf) - 1); 582 583 memblock_reserve(base, sizeof(*mpf)); 584 if (mpf->physptr) 585 smp_reserve_memory(mpf); 586 587 ret = 1; 588 } 589 early_memunmap(bp, length); 590 591 if (ret) 592 break; 593 594 base += 16; 595 length -= 16; 596 } 597 return ret; 598 } 599 600 void __init mpparse_find_mptable(void) 601 { 602 unsigned int address; 603 604 /* 605 * FIXME: Linux assumes you have 640K of base ram.. 606 * this continues the error... 607 * 608 * 1) Scan the bottom 1K for a signature 609 * 2) Scan the top 1K of base RAM 610 * 3) Scan the 64K of bios 611 */ 612 if (smp_scan_config(0x0, 0x400) || 613 smp_scan_config(639 * 0x400, 0x400) || 614 smp_scan_config(0xF0000, 0x10000)) 615 return; 616 /* 617 * If it is an SMP machine we should know now, unless the 618 * configuration is in an EISA bus machine with an 619 * extended bios data area. 620 * 621 * there is a real-mode segmented pointer pointing to the 622 * 4K EBDA area at 0x40E, calculate and scan it here. 623 * 624 * NOTE! There are Linux loaders that will corrupt the EBDA 625 * area, and as such this kind of SMP config may be less 626 * trustworthy, simply because the SMP table may have been 627 * stomped on during early boot. These loaders are buggy and 628 * should be fixed. 629 * 630 * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 631 */ 632 633 address = get_bios_ebda(); 634 if (address) 635 smp_scan_config(address, 0x400); 636 } 637 638 #ifdef CONFIG_X86_IO_APIC 639 static u8 __initdata irq_used[MAX_IRQ_SOURCES]; 640 641 static int __init get_MP_intsrc_index(struct mpc_intsrc *m) 642 { 643 int i; 644 645 if (m->irqtype != mp_INT) 646 return 0; 647 648 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW)) 649 return 0; 650 651 /* not legacy */ 652 653 for (i = 0; i < mp_irq_entries; i++) { 654 if (mp_irqs[i].irqtype != mp_INT) 655 continue; 656 657 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL | 658 MP_IRQPOL_ACTIVE_LOW)) 659 continue; 660 661 if (mp_irqs[i].srcbus != m->srcbus) 662 continue; 663 if (mp_irqs[i].srcbusirq != m->srcbusirq) 664 continue; 665 if (irq_used[i]) { 666 /* already claimed */ 667 return -2; 668 } 669 irq_used[i] = 1; 670 return i; 671 } 672 673 /* not found */ 674 return -1; 675 } 676 677 #define SPARE_SLOT_NUM 20 678 679 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; 680 681 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) 682 { 683 int i; 684 685 apic_printk(APIC_VERBOSE, "OLD "); 686 print_mp_irq_info(m); 687 688 i = get_MP_intsrc_index(m); 689 if (i > 0) { 690 memcpy(m, &mp_irqs[i], sizeof(*m)); 691 apic_printk(APIC_VERBOSE, "NEW "); 692 print_mp_irq_info(&mp_irqs[i]); 693 return; 694 } 695 if (!i) { 696 /* legacy, do nothing */ 697 return; 698 } 699 if (*nr_m_spare < SPARE_SLOT_NUM) { 700 /* 701 * not found (-1), or duplicated (-2) are invalid entries, 702 * we need to use the slot later 703 */ 704 m_spare[*nr_m_spare] = m; 705 *nr_m_spare += 1; 706 } 707 } 708 709 static int __init 710 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) 711 { 712 if (!mpc_new_phys || count <= mpc_new_length) { 713 WARN(1, "update_mptable: No spare slots (length: %x)\n", count); 714 return -1; 715 } 716 717 return 0; 718 } 719 #else /* CONFIG_X86_IO_APIC */ 720 static 721 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} 722 #endif /* CONFIG_X86_IO_APIC */ 723 724 static int __init replace_intsrc_all(struct mpc_table *mpc, 725 unsigned long mpc_new_phys, 726 unsigned long mpc_new_length) 727 { 728 #ifdef CONFIG_X86_IO_APIC 729 int i; 730 #endif 731 int count = sizeof(*mpc); 732 int nr_m_spare = 0; 733 unsigned char *mpt = ((unsigned char *)mpc) + count; 734 735 pr_info("mpc_length %x\n", mpc->length); 736 while (count < mpc->length) { 737 switch (*mpt) { 738 case MP_PROCESSOR: 739 skip_entry(&mpt, &count, sizeof(struct mpc_cpu)); 740 break; 741 case MP_BUS: 742 skip_entry(&mpt, &count, sizeof(struct mpc_bus)); 743 break; 744 case MP_IOAPIC: 745 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic)); 746 break; 747 case MP_INTSRC: 748 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare); 749 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc)); 750 break; 751 case MP_LINTSRC: 752 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc)); 753 break; 754 default: 755 /* wrong mptable */ 756 smp_dump_mptable(mpc, mpt); 757 goto out; 758 } 759 } 760 761 #ifdef CONFIG_X86_IO_APIC 762 for (i = 0; i < mp_irq_entries; i++) { 763 if (irq_used[i]) 764 continue; 765 766 if (mp_irqs[i].irqtype != mp_INT) 767 continue; 768 769 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL | 770 MP_IRQPOL_ACTIVE_LOW)) 771 continue; 772 773 if (nr_m_spare > 0) { 774 apic_printk(APIC_VERBOSE, "*NEW* found\n"); 775 nr_m_spare--; 776 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i])); 777 m_spare[nr_m_spare] = NULL; 778 } else { 779 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; 780 count += sizeof(struct mpc_intsrc); 781 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) 782 goto out; 783 memcpy(m, &mp_irqs[i], sizeof(*m)); 784 mpc->length = count; 785 mpt += sizeof(struct mpc_intsrc); 786 } 787 print_mp_irq_info(&mp_irqs[i]); 788 } 789 #endif 790 out: 791 /* update checksum */ 792 mpc->checksum = 0; 793 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); 794 795 return 0; 796 } 797 798 int enable_update_mptable; 799 800 static int __init update_mptable_setup(char *str) 801 { 802 enable_update_mptable = 1; 803 #ifdef CONFIG_PCI 804 pci_routeirq = 1; 805 #endif 806 return 0; 807 } 808 early_param("update_mptable", update_mptable_setup); 809 810 static unsigned long __initdata mpc_new_phys; 811 static unsigned long mpc_new_length __initdata = 4096; 812 813 /* alloc_mptable or alloc_mptable=4k */ 814 static int __initdata alloc_mptable; 815 static int __init parse_alloc_mptable_opt(char *p) 816 { 817 enable_update_mptable = 1; 818 #ifdef CONFIG_PCI 819 pci_routeirq = 1; 820 #endif 821 alloc_mptable = 1; 822 if (!p) 823 return 0; 824 mpc_new_length = memparse(p, &p); 825 return 0; 826 } 827 early_param("alloc_mptable", parse_alloc_mptable_opt); 828 829 void __init e820__memblock_alloc_reserved_mpc_new(void) 830 { 831 if (enable_update_mptable && alloc_mptable) 832 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4); 833 } 834 835 static int __init update_mp_table(void) 836 { 837 char str[16]; 838 char oem[10]; 839 struct mpf_intel *mpf; 840 struct mpc_table *mpc, *mpc_new; 841 unsigned long size; 842 843 if (!enable_update_mptable) 844 return 0; 845 846 if (!mpf_found) 847 return 0; 848 849 mpf = early_memremap(mpf_base, sizeof(*mpf)); 850 if (!mpf) { 851 pr_err("MPTABLE: mpf early_memremap() failed\n"); 852 return 0; 853 } 854 855 /* 856 * Now see if we need to go further. 857 */ 858 if (mpf->feature1) 859 goto do_unmap_mpf; 860 861 if (!mpf->physptr) 862 goto do_unmap_mpf; 863 864 size = get_mpc_size(mpf->physptr); 865 mpc = early_memremap(mpf->physptr, size); 866 if (!mpc) { 867 pr_err("MPTABLE: mpc early_memremap() failed\n"); 868 goto do_unmap_mpf; 869 } 870 871 if (!smp_check_mpc(mpc, oem, str)) 872 goto do_unmap_mpc; 873 874 pr_info("mpf: %llx\n", (u64)mpf_base); 875 pr_info("physptr: %x\n", mpf->physptr); 876 877 if (mpc_new_phys && mpc->length > mpc_new_length) { 878 mpc_new_phys = 0; 879 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n", 880 mpc_new_length); 881 } 882 883 if (!mpc_new_phys) { 884 unsigned char old, new; 885 /* check if we can change the position */ 886 mpc->checksum = 0; 887 old = mpf_checksum((unsigned char *)mpc, mpc->length); 888 mpc->checksum = 0xff; 889 new = mpf_checksum((unsigned char *)mpc, mpc->length); 890 if (old == new) { 891 pr_info("mpc is readonly, please try alloc_mptable instead\n"); 892 goto do_unmap_mpc; 893 } 894 pr_info("use in-position replacing\n"); 895 } else { 896 mpc_new = early_memremap(mpc_new_phys, mpc_new_length); 897 if (!mpc_new) { 898 pr_err("MPTABLE: new mpc early_memremap() failed\n"); 899 goto do_unmap_mpc; 900 } 901 mpf->physptr = mpc_new_phys; 902 memcpy(mpc_new, mpc, mpc->length); 903 early_memunmap(mpc, size); 904 mpc = mpc_new; 905 size = mpc_new_length; 906 /* check if we can modify that */ 907 if (mpc_new_phys - mpf->physptr) { 908 struct mpf_intel *mpf_new; 909 /* steal 16 bytes from [0, 1k) */ 910 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new)); 911 if (!mpf_new) { 912 pr_err("MPTABLE: new mpf early_memremap() failed\n"); 913 goto do_unmap_mpc; 914 } 915 pr_info("mpf new: %x\n", 0x400 - 16); 916 memcpy(mpf_new, mpf, 16); 917 early_memunmap(mpf, sizeof(*mpf)); 918 mpf = mpf_new; 919 mpf->physptr = mpc_new_phys; 920 } 921 mpf->checksum = 0; 922 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16); 923 pr_info("physptr new: %x\n", mpf->physptr); 924 } 925 926 /* 927 * only replace the one with mp_INT and 928 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW, 929 * already in mp_irqs , stored by ... and mp_config_acpi_gsi, 930 * may need pci=routeirq for all coverage 931 */ 932 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length); 933 934 do_unmap_mpc: 935 early_memunmap(mpc, size); 936 937 do_unmap_mpf: 938 early_memunmap(mpf, sizeof(*mpf)); 939 940 return 0; 941 } 942 943 late_initcall(update_mp_table); 944