xref: /linux/arch/x86/kernel/mpparse.c (revision 8c994eff8fcfe8ecb1f1dbebed25b4d7bb75be12)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/i8259.h>
23 #include <asm/io_apic.h>
24 #include <asm/acpi.h>
25 #include <asm/irqdomain.h>
26 #include <asm/mtrr.h>
27 #include <asm/mpspec.h>
28 #include <asm/proto.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/e820/api.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 
34 #include <asm/apic.h>
35 /*
36  * Checksum an MP configuration block.
37  */
38 
39 static int __init mpf_checksum(unsigned char *mp, int len)
40 {
41 	int sum = 0;
42 
43 	while (len--)
44 		sum += *mp++;
45 
46 	return sum & 0xFF;
47 }
48 
49 static void __init MP_processor_info(struct mpc_cpu *m)
50 {
51 	char *bootup_cpu = "";
52 
53 	if (!(m->cpuflag & CPU_ENABLED)) {
54 		disabled_cpus++;
55 		return;
56 	}
57 
58 	if (m->cpuflag & CPU_BOOTPROCESSOR)
59 		bootup_cpu = " (Bootup-CPU)";
60 
61 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
62 	generic_processor_info(m->apicid);
63 }
64 
65 #ifdef CONFIG_X86_IO_APIC
66 static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
67 {
68 	memcpy(str, m->bustype, 6);
69 	str[6] = 0;
70 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
71 }
72 
73 static void __init MP_bus_info(struct mpc_bus *m)
74 {
75 	char str[7];
76 
77 	mpc_oem_bus_info(m, str);
78 
79 #if MAX_MP_BUSSES < 256
80 	if (m->busid >= MAX_MP_BUSSES) {
81 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
82 			m->busid, str, MAX_MP_BUSSES - 1);
83 		return;
84 	}
85 #endif
86 
87 	set_bit(m->busid, mp_bus_not_pci);
88 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
89 #ifdef CONFIG_EISA
90 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
91 #endif
92 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
93 		clear_bit(m->busid, mp_bus_not_pci);
94 #ifdef CONFIG_EISA
95 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
96 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
97 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
98 #endif
99 	} else
100 		pr_warn("Unknown bustype %s - ignoring\n", str);
101 }
102 
103 static void __init MP_ioapic_info(struct mpc_ioapic *m)
104 {
105 	struct ioapic_domain_cfg cfg = {
106 		.type = IOAPIC_DOMAIN_LEGACY,
107 		.ops = &mp_ioapic_irqdomain_ops,
108 	};
109 
110 	if (m->flags & MPC_APIC_USABLE)
111 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
112 }
113 
114 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
115 {
116 	apic_printk(APIC_VERBOSE,
117 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
118 		mp_irq->irqtype, mp_irq->irqflag & 3,
119 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
120 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
121 }
122 
123 #else /* CONFIG_X86_IO_APIC */
124 static inline void __init MP_bus_info(struct mpc_bus *m) {}
125 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
126 #endif /* CONFIG_X86_IO_APIC */
127 
128 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
129 {
130 	apic_printk(APIC_VERBOSE,
131 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
132 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
133 		m->srcbusirq, m->destapic, m->destapiclint);
134 }
135 
136 /*
137  * Read/parse the MPC
138  */
139 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
140 {
141 
142 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
143 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
144 		       mpc->signature[0], mpc->signature[1],
145 		       mpc->signature[2], mpc->signature[3]);
146 		return 0;
147 	}
148 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
149 		pr_err("MPTABLE: checksum error!\n");
150 		return 0;
151 	}
152 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
153 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
154 		return 0;
155 	}
156 	if (!mpc->lapic) {
157 		pr_err("MPTABLE: null local APIC address!\n");
158 		return 0;
159 	}
160 	memcpy(oem, mpc->oem, 8);
161 	oem[8] = 0;
162 	pr_info("MPTABLE: OEM ID: %s\n", oem);
163 
164 	memcpy(str, mpc->productid, 12);
165 	str[12] = 0;
166 
167 	pr_info("MPTABLE: Product ID: %s\n", str);
168 
169 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
170 
171 	return 1;
172 }
173 
174 static void skip_entry(unsigned char **ptr, int *count, int size)
175 {
176 	*ptr += size;
177 	*count += size;
178 }
179 
180 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
181 {
182 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
183 	pr_cont("type %x\n", *mpt);
184 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
185 			1, mpc, mpc->length, 1);
186 }
187 
188 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
189 {
190 	char str[16];
191 	char oem[10];
192 
193 	int count = sizeof(*mpc);
194 	unsigned char *mpt = ((unsigned char *)mpc) + count;
195 
196 	if (!smp_check_mpc(mpc, oem, str))
197 		return 0;
198 
199 	/* Initialize the lapic mapping */
200 	if (!acpi_lapic)
201 		register_lapic_address(mpc->lapic);
202 
203 	if (early)
204 		return 1;
205 
206 	/* Now process the configuration blocks. */
207 	while (count < mpc->length) {
208 		switch (*mpt) {
209 		case MP_PROCESSOR:
210 			/* ACPI may have already provided this data */
211 			if (!acpi_lapic)
212 				MP_processor_info((struct mpc_cpu *)mpt);
213 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
214 			break;
215 		case MP_BUS:
216 			MP_bus_info((struct mpc_bus *)mpt);
217 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
218 			break;
219 		case MP_IOAPIC:
220 			MP_ioapic_info((struct mpc_ioapic *)mpt);
221 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
222 			break;
223 		case MP_INTSRC:
224 			mp_save_irq((struct mpc_intsrc *)mpt);
225 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
226 			break;
227 		case MP_LINTSRC:
228 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
229 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
230 			break;
231 		default:
232 			/* wrong mptable */
233 			smp_dump_mptable(mpc, mpt);
234 			count = mpc->length;
235 			break;
236 		}
237 	}
238 
239 	if (!num_processors)
240 		pr_err("MPTABLE: no processors registered!\n");
241 	return num_processors;
242 }
243 
244 #ifdef CONFIG_X86_IO_APIC
245 
246 static int __init ELCR_trigger(unsigned int irq)
247 {
248 	unsigned int port;
249 
250 	port = PIC_ELCR1 + (irq >> 3);
251 	return (inb(port) >> (irq & 7)) & 1;
252 }
253 
254 static void __init construct_default_ioirq_mptable(int mpc_default_type)
255 {
256 	struct mpc_intsrc intsrc;
257 	int i;
258 	int ELCR_fallback = 0;
259 
260 	intsrc.type = MP_INTSRC;
261 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
262 	intsrc.srcbus = 0;
263 	intsrc.dstapic = mpc_ioapic_id(0);
264 
265 	intsrc.irqtype = mp_INT;
266 
267 	/*
268 	 *  If true, we have an ISA/PCI system with no IRQ entries
269 	 *  in the MP table. To prevent the PCI interrupts from being set up
270 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
271 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
272 	 *  never be level sensitive, so we simply see if the ELCR agrees.
273 	 *  If it does, we assume it's valid.
274 	 */
275 	if (mpc_default_type == 5) {
276 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
277 
278 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
279 		    ELCR_trigger(13))
280 			pr_err("ELCR contains invalid data... not using ELCR\n");
281 		else {
282 			pr_info("Using ELCR to identify PCI interrupts\n");
283 			ELCR_fallback = 1;
284 		}
285 	}
286 
287 	for (i = 0; i < 16; i++) {
288 		switch (mpc_default_type) {
289 		case 2:
290 			if (i == 0 || i == 13)
291 				continue;	/* IRQ0 & IRQ13 not connected */
292 			fallthrough;
293 		default:
294 			if (i == 2)
295 				continue;	/* IRQ2 is never connected */
296 		}
297 
298 		if (ELCR_fallback) {
299 			/*
300 			 *  If the ELCR indicates a level-sensitive interrupt, we
301 			 *  copy that information over to the MP table in the
302 			 *  irqflag field (level sensitive, active high polarity).
303 			 */
304 			if (ELCR_trigger(i)) {
305 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
306 						 MP_IRQPOL_ACTIVE_HIGH;
307 			} else {
308 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
309 						 MP_IRQPOL_DEFAULT;
310 			}
311 		}
312 
313 		intsrc.srcbusirq = i;
314 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
315 		mp_save_irq(&intsrc);
316 	}
317 
318 	intsrc.irqtype = mp_ExtINT;
319 	intsrc.srcbusirq = 0;
320 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
321 	mp_save_irq(&intsrc);
322 }
323 
324 
325 static void __init construct_ioapic_table(int mpc_default_type)
326 {
327 	struct mpc_ioapic ioapic;
328 	struct mpc_bus bus;
329 
330 	bus.type = MP_BUS;
331 	bus.busid = 0;
332 	switch (mpc_default_type) {
333 	default:
334 		pr_err("???\nUnknown standard configuration %d\n",
335 		       mpc_default_type);
336 		fallthrough;
337 	case 1:
338 	case 5:
339 		memcpy(bus.bustype, "ISA   ", 6);
340 		break;
341 	case 2:
342 	case 6:
343 	case 3:
344 		memcpy(bus.bustype, "EISA  ", 6);
345 		break;
346 	}
347 	MP_bus_info(&bus);
348 	if (mpc_default_type > 4) {
349 		bus.busid = 1;
350 		memcpy(bus.bustype, "PCI   ", 6);
351 		MP_bus_info(&bus);
352 	}
353 
354 	ioapic.type	= MP_IOAPIC;
355 	ioapic.apicid	= 2;
356 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
357 	ioapic.flags	= MPC_APIC_USABLE;
358 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
359 	MP_ioapic_info(&ioapic);
360 
361 	/*
362 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
363 	 */
364 	construct_default_ioirq_mptable(mpc_default_type);
365 }
366 #else
367 static inline void __init construct_ioapic_table(int mpc_default_type) { }
368 #endif
369 
370 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
371 {
372 	struct mpc_cpu processor;
373 	struct mpc_lintsrc lintsrc;
374 	int linttypes[2] = { mp_ExtINT, mp_NMI };
375 	int i;
376 
377 	/*
378 	 * 2 CPUs, numbered 0 & 1.
379 	 */
380 	processor.type = MP_PROCESSOR;
381 	/* Either an integrated APIC or a discrete 82489DX. */
382 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
383 	processor.cpuflag = CPU_ENABLED;
384 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
385 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
386 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
387 	processor.reserved[0] = 0;
388 	processor.reserved[1] = 0;
389 	for (i = 0; i < 2; i++) {
390 		processor.apicid = i;
391 		MP_processor_info(&processor);
392 	}
393 
394 	construct_ioapic_table(mpc_default_type);
395 
396 	lintsrc.type = MP_LINTSRC;
397 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
398 	lintsrc.srcbusid = 0;
399 	lintsrc.srcbusirq = 0;
400 	lintsrc.destapic = MP_APIC_ALL;
401 	for (i = 0; i < 2; i++) {
402 		lintsrc.irqtype = linttypes[i];
403 		lintsrc.destapiclint = i;
404 		MP_lintsrc_info(&lintsrc);
405 	}
406 }
407 
408 static unsigned long mpf_base;
409 static bool mpf_found;
410 
411 static unsigned long __init get_mpc_size(unsigned long physptr)
412 {
413 	struct mpc_table *mpc;
414 	unsigned long size;
415 
416 	mpc = early_memremap(physptr, PAGE_SIZE);
417 	size = mpc->length;
418 	early_memunmap(mpc, PAGE_SIZE);
419 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
420 
421 	return size;
422 }
423 
424 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
425 {
426 	struct mpc_table *mpc;
427 	unsigned long size;
428 
429 	size = get_mpc_size(mpf->physptr);
430 	mpc = early_memremap(mpf->physptr, size);
431 
432 	/*
433 	 * Read the physical hardware table.  Anything here will
434 	 * override the defaults.
435 	 */
436 	if (!smp_read_mpc(mpc, early)) {
437 #ifdef CONFIG_X86_LOCAL_APIC
438 		smp_found_config = 0;
439 #endif
440 		pr_err("BIOS bug, MP table errors detected!...\n");
441 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
442 		early_memunmap(mpc, size);
443 		return -1;
444 	}
445 	early_memunmap(mpc, size);
446 
447 	if (early)
448 		return -1;
449 
450 #ifdef CONFIG_X86_IO_APIC
451 	/*
452 	 * If there are no explicit MP IRQ entries, then we are
453 	 * broken.  We set up most of the low 16 IO-APIC pins to
454 	 * ISA defaults and hope it will work.
455 	 */
456 	if (!mp_irq_entries) {
457 		struct mpc_bus bus;
458 
459 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
460 
461 		bus.type = MP_BUS;
462 		bus.busid = 0;
463 		memcpy(bus.bustype, "ISA   ", 6);
464 		MP_bus_info(&bus);
465 
466 		construct_default_ioirq_mptable(0);
467 	}
468 #endif
469 
470 	return 0;
471 }
472 
473 /*
474  * Scan the memory blocks for an SMP configuration block.
475  */
476 void __init default_get_smp_config(unsigned int early)
477 {
478 	struct mpf_intel *mpf;
479 
480 	if (!smp_found_config)
481 		return;
482 
483 	if (!mpf_found)
484 		return;
485 
486 	if (acpi_lapic && early)
487 		return;
488 
489 	/*
490 	 * MPS doesn't support hyperthreading, aka only have
491 	 * thread 0 apic id in MPS table
492 	 */
493 	if (acpi_lapic && acpi_ioapic)
494 		return;
495 
496 	mpf = early_memremap(mpf_base, sizeof(*mpf));
497 	if (!mpf) {
498 		pr_err("MPTABLE: error mapping MP table\n");
499 		return;
500 	}
501 
502 	pr_info("Intel MultiProcessor Specification v1.%d\n",
503 		mpf->specification);
504 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
505 	if (mpf->feature2 & (1 << 7)) {
506 		pr_info("    IMCR and PIC compatibility mode.\n");
507 		pic_mode = 1;
508 	} else {
509 		pr_info("    Virtual Wire compatibility mode.\n");
510 		pic_mode = 0;
511 	}
512 #endif
513 	/*
514 	 * Now see if we need to read further.
515 	 */
516 	if (mpf->feature1) {
517 		if (early) {
518 			/* Local APIC has default address */
519 			register_lapic_address(APIC_DEFAULT_PHYS_BASE);
520 			goto out;
521 		}
522 
523 		pr_info("Default MP configuration #%d\n", mpf->feature1);
524 		construct_default_ISA_mptable(mpf->feature1);
525 
526 	} else if (mpf->physptr) {
527 		if (check_physptr(mpf, early))
528 			goto out;
529 	} else
530 		BUG();
531 
532 	if (!early)
533 		pr_info("Processors: %d\n", num_processors);
534 	/*
535 	 * Only use the first configuration found.
536 	 */
537 out:
538 	early_memunmap(mpf, sizeof(*mpf));
539 }
540 
541 static void __init smp_reserve_memory(struct mpf_intel *mpf)
542 {
543 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
544 }
545 
546 static int __init smp_scan_config(unsigned long base, unsigned long length)
547 {
548 	unsigned int *bp;
549 	struct mpf_intel *mpf;
550 	int ret = 0;
551 
552 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
553 		    base, base + length - 1);
554 	BUILD_BUG_ON(sizeof(*mpf) != 16);
555 
556 	while (length > 0) {
557 		bp = early_memremap(base, length);
558 		mpf = (struct mpf_intel *)bp;
559 		if ((*bp == SMP_MAGIC_IDENT) &&
560 		    (mpf->length == 1) &&
561 		    !mpf_checksum((unsigned char *)bp, 16) &&
562 		    ((mpf->specification == 1)
563 		     || (mpf->specification == 4))) {
564 #ifdef CONFIG_X86_LOCAL_APIC
565 			smp_found_config = 1;
566 #endif
567 			mpf_base = base;
568 			mpf_found = true;
569 
570 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
571 				base, base + sizeof(*mpf) - 1);
572 
573 			memblock_reserve(base, sizeof(*mpf));
574 			if (mpf->physptr)
575 				smp_reserve_memory(mpf);
576 
577 			ret = 1;
578 		}
579 		early_memunmap(bp, length);
580 
581 		if (ret)
582 			break;
583 
584 		base += 16;
585 		length -= 16;
586 	}
587 	return ret;
588 }
589 
590 void __init default_find_smp_config(void)
591 {
592 	unsigned int address;
593 
594 	/*
595 	 * FIXME: Linux assumes you have 640K of base ram..
596 	 * this continues the error...
597 	 *
598 	 * 1) Scan the bottom 1K for a signature
599 	 * 2) Scan the top 1K of base RAM
600 	 * 3) Scan the 64K of bios
601 	 */
602 	if (smp_scan_config(0x0, 0x400) ||
603 	    smp_scan_config(639 * 0x400, 0x400) ||
604 	    smp_scan_config(0xF0000, 0x10000))
605 		return;
606 	/*
607 	 * If it is an SMP machine we should know now, unless the
608 	 * configuration is in an EISA bus machine with an
609 	 * extended bios data area.
610 	 *
611 	 * there is a real-mode segmented pointer pointing to the
612 	 * 4K EBDA area at 0x40E, calculate and scan it here.
613 	 *
614 	 * NOTE! There are Linux loaders that will corrupt the EBDA
615 	 * area, and as such this kind of SMP config may be less
616 	 * trustworthy, simply because the SMP table may have been
617 	 * stomped on during early boot. These loaders are buggy and
618 	 * should be fixed.
619 	 *
620 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
621 	 */
622 
623 	address = get_bios_ebda();
624 	if (address)
625 		smp_scan_config(address, 0x400);
626 }
627 
628 #ifdef CONFIG_X86_IO_APIC
629 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
630 
631 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
632 {
633 	int i;
634 
635 	if (m->irqtype != mp_INT)
636 		return 0;
637 
638 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
639 		return 0;
640 
641 	/* not legacy */
642 
643 	for (i = 0; i < mp_irq_entries; i++) {
644 		if (mp_irqs[i].irqtype != mp_INT)
645 			continue;
646 
647 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
648 					   MP_IRQPOL_ACTIVE_LOW))
649 			continue;
650 
651 		if (mp_irqs[i].srcbus != m->srcbus)
652 			continue;
653 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
654 			continue;
655 		if (irq_used[i]) {
656 			/* already claimed */
657 			return -2;
658 		}
659 		irq_used[i] = 1;
660 		return i;
661 	}
662 
663 	/* not found */
664 	return -1;
665 }
666 
667 #define SPARE_SLOT_NUM 20
668 
669 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
670 
671 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
672 {
673 	int i;
674 
675 	apic_printk(APIC_VERBOSE, "OLD ");
676 	print_mp_irq_info(m);
677 
678 	i = get_MP_intsrc_index(m);
679 	if (i > 0) {
680 		memcpy(m, &mp_irqs[i], sizeof(*m));
681 		apic_printk(APIC_VERBOSE, "NEW ");
682 		print_mp_irq_info(&mp_irqs[i]);
683 		return;
684 	}
685 	if (!i) {
686 		/* legacy, do nothing */
687 		return;
688 	}
689 	if (*nr_m_spare < SPARE_SLOT_NUM) {
690 		/*
691 		 * not found (-1), or duplicated (-2) are invalid entries,
692 		 * we need to use the slot later
693 		 */
694 		m_spare[*nr_m_spare] = m;
695 		*nr_m_spare += 1;
696 	}
697 }
698 
699 static int __init
700 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
701 {
702 	if (!mpc_new_phys || count <= mpc_new_length) {
703 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
704 		return -1;
705 	}
706 
707 	return 0;
708 }
709 #else /* CONFIG_X86_IO_APIC */
710 static
711 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
712 #endif /* CONFIG_X86_IO_APIC */
713 
714 static int  __init replace_intsrc_all(struct mpc_table *mpc,
715 					unsigned long mpc_new_phys,
716 					unsigned long mpc_new_length)
717 {
718 #ifdef CONFIG_X86_IO_APIC
719 	int i;
720 #endif
721 	int count = sizeof(*mpc);
722 	int nr_m_spare = 0;
723 	unsigned char *mpt = ((unsigned char *)mpc) + count;
724 
725 	pr_info("mpc_length %x\n", mpc->length);
726 	while (count < mpc->length) {
727 		switch (*mpt) {
728 		case MP_PROCESSOR:
729 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
730 			break;
731 		case MP_BUS:
732 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
733 			break;
734 		case MP_IOAPIC:
735 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
736 			break;
737 		case MP_INTSRC:
738 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
739 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
740 			break;
741 		case MP_LINTSRC:
742 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
743 			break;
744 		default:
745 			/* wrong mptable */
746 			smp_dump_mptable(mpc, mpt);
747 			goto out;
748 		}
749 	}
750 
751 #ifdef CONFIG_X86_IO_APIC
752 	for (i = 0; i < mp_irq_entries; i++) {
753 		if (irq_used[i])
754 			continue;
755 
756 		if (mp_irqs[i].irqtype != mp_INT)
757 			continue;
758 
759 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
760 					   MP_IRQPOL_ACTIVE_LOW))
761 			continue;
762 
763 		if (nr_m_spare > 0) {
764 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
765 			nr_m_spare--;
766 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
767 			m_spare[nr_m_spare] = NULL;
768 		} else {
769 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
770 			count += sizeof(struct mpc_intsrc);
771 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
772 				goto out;
773 			memcpy(m, &mp_irqs[i], sizeof(*m));
774 			mpc->length = count;
775 			mpt += sizeof(struct mpc_intsrc);
776 		}
777 		print_mp_irq_info(&mp_irqs[i]);
778 	}
779 #endif
780 out:
781 	/* update checksum */
782 	mpc->checksum = 0;
783 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
784 
785 	return 0;
786 }
787 
788 int enable_update_mptable;
789 
790 static int __init update_mptable_setup(char *str)
791 {
792 	enable_update_mptable = 1;
793 #ifdef CONFIG_PCI
794 	pci_routeirq = 1;
795 #endif
796 	return 0;
797 }
798 early_param("update_mptable", update_mptable_setup);
799 
800 static unsigned long __initdata mpc_new_phys;
801 static unsigned long mpc_new_length __initdata = 4096;
802 
803 /* alloc_mptable or alloc_mptable=4k */
804 static int __initdata alloc_mptable;
805 static int __init parse_alloc_mptable_opt(char *p)
806 {
807 	enable_update_mptable = 1;
808 #ifdef CONFIG_PCI
809 	pci_routeirq = 1;
810 #endif
811 	alloc_mptable = 1;
812 	if (!p)
813 		return 0;
814 	mpc_new_length = memparse(p, &p);
815 	return 0;
816 }
817 early_param("alloc_mptable", parse_alloc_mptable_opt);
818 
819 void __init e820__memblock_alloc_reserved_mpc_new(void)
820 {
821 	if (enable_update_mptable && alloc_mptable)
822 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
823 }
824 
825 static int __init update_mp_table(void)
826 {
827 	char str[16];
828 	char oem[10];
829 	struct mpf_intel *mpf;
830 	struct mpc_table *mpc, *mpc_new;
831 	unsigned long size;
832 
833 	if (!enable_update_mptable)
834 		return 0;
835 
836 	if (!mpf_found)
837 		return 0;
838 
839 	mpf = early_memremap(mpf_base, sizeof(*mpf));
840 	if (!mpf) {
841 		pr_err("MPTABLE: mpf early_memremap() failed\n");
842 		return 0;
843 	}
844 
845 	/*
846 	 * Now see if we need to go further.
847 	 */
848 	if (mpf->feature1)
849 		goto do_unmap_mpf;
850 
851 	if (!mpf->physptr)
852 		goto do_unmap_mpf;
853 
854 	size = get_mpc_size(mpf->physptr);
855 	mpc = early_memremap(mpf->physptr, size);
856 	if (!mpc) {
857 		pr_err("MPTABLE: mpc early_memremap() failed\n");
858 		goto do_unmap_mpf;
859 	}
860 
861 	if (!smp_check_mpc(mpc, oem, str))
862 		goto do_unmap_mpc;
863 
864 	pr_info("mpf: %llx\n", (u64)mpf_base);
865 	pr_info("physptr: %x\n", mpf->physptr);
866 
867 	if (mpc_new_phys && mpc->length > mpc_new_length) {
868 		mpc_new_phys = 0;
869 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
870 			mpc_new_length);
871 	}
872 
873 	if (!mpc_new_phys) {
874 		unsigned char old, new;
875 		/* check if we can change the position */
876 		mpc->checksum = 0;
877 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
878 		mpc->checksum = 0xff;
879 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
880 		if (old == new) {
881 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
882 			goto do_unmap_mpc;
883 		}
884 		pr_info("use in-position replacing\n");
885 	} else {
886 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
887 		if (!mpc_new) {
888 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
889 			goto do_unmap_mpc;
890 		}
891 		mpf->physptr = mpc_new_phys;
892 		memcpy(mpc_new, mpc, mpc->length);
893 		early_memunmap(mpc, size);
894 		mpc = mpc_new;
895 		size = mpc_new_length;
896 		/* check if we can modify that */
897 		if (mpc_new_phys - mpf->physptr) {
898 			struct mpf_intel *mpf_new;
899 			/* steal 16 bytes from [0, 1k) */
900 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
901 			if (!mpf_new) {
902 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
903 				goto do_unmap_mpc;
904 			}
905 			pr_info("mpf new: %x\n", 0x400 - 16);
906 			memcpy(mpf_new, mpf, 16);
907 			early_memunmap(mpf, sizeof(*mpf));
908 			mpf = mpf_new;
909 			mpf->physptr = mpc_new_phys;
910 		}
911 		mpf->checksum = 0;
912 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
913 		pr_info("physptr new: %x\n", mpf->physptr);
914 	}
915 
916 	/*
917 	 * only replace the one with mp_INT and
918 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
919 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
920 	 * may need pci=routeirq for all coverage
921 	 */
922 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
923 
924 do_unmap_mpc:
925 	early_memunmap(mpc, size);
926 
927 do_unmap_mpf:
928 	early_memunmap(mpf, sizeof(*mpf));
929 
930 	return 0;
931 }
932 
933 late_initcall(update_mp_table);
934