xref: /linux/arch/x86/kernel/mpparse.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/i8259.h>
23 #include <asm/io_apic.h>
24 #include <asm/acpi.h>
25 #include <asm/irqdomain.h>
26 #include <asm/mtrr.h>
27 #include <asm/mpspec.h>
28 #include <asm/proto.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/e820/api.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 
34 #include <asm/apic.h>
35 /*
36  * Checksum an MP configuration block.
37  */
38 
39 static unsigned int num_procs __initdata;
40 
41 static int __init mpf_checksum(unsigned char *mp, int len)
42 {
43 	int sum = 0;
44 
45 	while (len--)
46 		sum += *mp++;
47 
48 	return sum & 0xFF;
49 }
50 
51 static void __init MP_processor_info(struct mpc_cpu *m)
52 {
53 	char *bootup_cpu = "";
54 
55 	topology_register_apic(m->apicid, CPU_ACPIID_INVALID, m->cpuflag & CPU_ENABLED);
56 	if (!(m->cpuflag & CPU_ENABLED))
57 		return;
58 
59 	if (m->cpuflag & CPU_BOOTPROCESSOR)
60 		bootup_cpu = " (Bootup-CPU)";
61 
62 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
63 	num_procs++;
64 }
65 
66 #ifdef CONFIG_X86_IO_APIC
67 static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
68 {
69 	memcpy(str, m->bustype, 6);
70 	str[6] = 0;
71 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
72 }
73 
74 static void __init MP_bus_info(struct mpc_bus *m)
75 {
76 	char str[7];
77 
78 	mpc_oem_bus_info(m, str);
79 
80 #if MAX_MP_BUSSES < 256
81 	if (m->busid >= MAX_MP_BUSSES) {
82 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
83 			m->busid, str, MAX_MP_BUSSES - 1);
84 		return;
85 	}
86 #endif
87 
88 	set_bit(m->busid, mp_bus_not_pci);
89 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
90 #ifdef CONFIG_EISA
91 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
92 #endif
93 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
94 		clear_bit(m->busid, mp_bus_not_pci);
95 #ifdef CONFIG_EISA
96 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
97 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
98 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
99 #endif
100 	} else
101 		pr_warn("Unknown bustype %s - ignoring\n", str);
102 }
103 
104 static void __init MP_ioapic_info(struct mpc_ioapic *m)
105 {
106 	struct ioapic_domain_cfg cfg = {
107 		.type = IOAPIC_DOMAIN_LEGACY,
108 		.ops = &mp_ioapic_irqdomain_ops,
109 	};
110 
111 	if (m->flags & MPC_APIC_USABLE)
112 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
113 }
114 
115 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
116 {
117 	apic_printk(APIC_VERBOSE,
118 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
119 		mp_irq->irqtype, mp_irq->irqflag & 3,
120 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
121 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
122 }
123 
124 #else /* CONFIG_X86_IO_APIC */
125 static inline void __init MP_bus_info(struct mpc_bus *m) {}
126 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
127 #endif /* CONFIG_X86_IO_APIC */
128 
129 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
130 {
131 	apic_printk(APIC_VERBOSE,
132 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
133 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
134 		m->srcbusirq, m->destapic, m->destapiclint);
135 }
136 
137 /*
138  * Read/parse the MPC
139  */
140 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
141 {
142 
143 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
144 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
145 		       mpc->signature[0], mpc->signature[1],
146 		       mpc->signature[2], mpc->signature[3]);
147 		return 0;
148 	}
149 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
150 		pr_err("MPTABLE: checksum error!\n");
151 		return 0;
152 	}
153 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
154 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
155 		return 0;
156 	}
157 	if (!mpc->lapic) {
158 		pr_err("MPTABLE: null local APIC address!\n");
159 		return 0;
160 	}
161 	memcpy(oem, mpc->oem, 8);
162 	oem[8] = 0;
163 	pr_info("MPTABLE: OEM ID: %s\n", oem);
164 
165 	memcpy(str, mpc->productid, 12);
166 	str[12] = 0;
167 
168 	pr_info("MPTABLE: Product ID: %s\n", str);
169 
170 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
171 
172 	return 1;
173 }
174 
175 static void skip_entry(unsigned char **ptr, int *count, int size)
176 {
177 	*ptr += size;
178 	*count += size;
179 }
180 
181 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
182 {
183 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
184 	pr_cont("type %x\n", *mpt);
185 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
186 			1, mpc, mpc->length, 1);
187 }
188 
189 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
190 {
191 	char str[16];
192 	char oem[10];
193 
194 	int count = sizeof(*mpc);
195 	unsigned char *mpt = ((unsigned char *)mpc) + count;
196 
197 	if (!smp_check_mpc(mpc, oem, str))
198 		return 0;
199 
200 	if (early) {
201 		/* Initialize the lapic mapping */
202 		if (!acpi_lapic)
203 			register_lapic_address(mpc->lapic);
204 		return 1;
205 	}
206 
207 	/* Now process the configuration blocks. */
208 	while (count < mpc->length) {
209 		switch (*mpt) {
210 		case MP_PROCESSOR:
211 			/* ACPI may have already provided this data */
212 			if (!acpi_lapic)
213 				MP_processor_info((struct mpc_cpu *)mpt);
214 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
215 			break;
216 		case MP_BUS:
217 			MP_bus_info((struct mpc_bus *)mpt);
218 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
219 			break;
220 		case MP_IOAPIC:
221 			MP_ioapic_info((struct mpc_ioapic *)mpt);
222 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
223 			break;
224 		case MP_INTSRC:
225 			mp_save_irq((struct mpc_intsrc *)mpt);
226 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
227 			break;
228 		case MP_LINTSRC:
229 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
230 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
231 			break;
232 		default:
233 			/* wrong mptable */
234 			smp_dump_mptable(mpc, mpt);
235 			count = mpc->length;
236 			break;
237 		}
238 	}
239 
240 	if (!num_procs && !acpi_lapic)
241 		pr_err("MPTABLE: no processors registered!\n");
242 	return num_procs || acpi_lapic;
243 }
244 
245 #ifdef CONFIG_X86_IO_APIC
246 
247 static int __init ELCR_trigger(unsigned int irq)
248 {
249 	unsigned int port;
250 
251 	port = PIC_ELCR1 + (irq >> 3);
252 	return (inb(port) >> (irq & 7)) & 1;
253 }
254 
255 static void __init construct_default_ioirq_mptable(int mpc_default_type)
256 {
257 	struct mpc_intsrc intsrc;
258 	int i;
259 	int ELCR_fallback = 0;
260 
261 	intsrc.type = MP_INTSRC;
262 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
263 	intsrc.srcbus = 0;
264 	intsrc.dstapic = mpc_ioapic_id(0);
265 
266 	intsrc.irqtype = mp_INT;
267 
268 	/*
269 	 *  If true, we have an ISA/PCI system with no IRQ entries
270 	 *  in the MP table. To prevent the PCI interrupts from being set up
271 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
272 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
273 	 *  never be level sensitive, so we simply see if the ELCR agrees.
274 	 *  If it does, we assume it's valid.
275 	 */
276 	if (mpc_default_type == 5) {
277 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
278 
279 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
280 		    ELCR_trigger(13))
281 			pr_err("ELCR contains invalid data... not using ELCR\n");
282 		else {
283 			pr_info("Using ELCR to identify PCI interrupts\n");
284 			ELCR_fallback = 1;
285 		}
286 	}
287 
288 	for (i = 0; i < 16; i++) {
289 		switch (mpc_default_type) {
290 		case 2:
291 			if (i == 0 || i == 13)
292 				continue;	/* IRQ0 & IRQ13 not connected */
293 			fallthrough;
294 		default:
295 			if (i == 2)
296 				continue;	/* IRQ2 is never connected */
297 		}
298 
299 		if (ELCR_fallback) {
300 			/*
301 			 *  If the ELCR indicates a level-sensitive interrupt, we
302 			 *  copy that information over to the MP table in the
303 			 *  irqflag field (level sensitive, active high polarity).
304 			 */
305 			if (ELCR_trigger(i)) {
306 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
307 						 MP_IRQPOL_ACTIVE_HIGH;
308 			} else {
309 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
310 						 MP_IRQPOL_DEFAULT;
311 			}
312 		}
313 
314 		intsrc.srcbusirq = i;
315 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
316 		mp_save_irq(&intsrc);
317 	}
318 
319 	intsrc.irqtype = mp_ExtINT;
320 	intsrc.srcbusirq = 0;
321 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
322 	mp_save_irq(&intsrc);
323 }
324 
325 
326 static void __init construct_ioapic_table(int mpc_default_type)
327 {
328 	struct mpc_ioapic ioapic;
329 	struct mpc_bus bus;
330 
331 	bus.type = MP_BUS;
332 	bus.busid = 0;
333 	switch (mpc_default_type) {
334 	default:
335 		pr_err("???\nUnknown standard configuration %d\n",
336 		       mpc_default_type);
337 		fallthrough;
338 	case 1:
339 	case 5:
340 		memcpy(bus.bustype, "ISA   ", 6);
341 		break;
342 	case 2:
343 	case 6:
344 	case 3:
345 		memcpy(bus.bustype, "EISA  ", 6);
346 		break;
347 	}
348 	MP_bus_info(&bus);
349 	if (mpc_default_type > 4) {
350 		bus.busid = 1;
351 		memcpy(bus.bustype, "PCI   ", 6);
352 		MP_bus_info(&bus);
353 	}
354 
355 	ioapic.type	= MP_IOAPIC;
356 	ioapic.apicid	= 2;
357 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
358 	ioapic.flags	= MPC_APIC_USABLE;
359 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
360 	MP_ioapic_info(&ioapic);
361 
362 	/*
363 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
364 	 */
365 	construct_default_ioirq_mptable(mpc_default_type);
366 }
367 #else
368 static inline void __init construct_ioapic_table(int mpc_default_type) { }
369 #endif
370 
371 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
372 {
373 	struct mpc_cpu processor;
374 	struct mpc_lintsrc lintsrc;
375 	int linttypes[2] = { mp_ExtINT, mp_NMI };
376 	int i;
377 
378 	/*
379 	 * 2 CPUs, numbered 0 & 1.
380 	 */
381 	processor.type = MP_PROCESSOR;
382 	/* Either an integrated APIC or a discrete 82489DX. */
383 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
384 	processor.cpuflag = CPU_ENABLED;
385 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
386 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
387 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
388 	processor.reserved[0] = 0;
389 	processor.reserved[1] = 0;
390 	for (i = 0; i < 2; i++) {
391 		processor.apicid = i;
392 		MP_processor_info(&processor);
393 	}
394 
395 	construct_ioapic_table(mpc_default_type);
396 
397 	lintsrc.type = MP_LINTSRC;
398 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
399 	lintsrc.srcbusid = 0;
400 	lintsrc.srcbusirq = 0;
401 	lintsrc.destapic = MP_APIC_ALL;
402 	for (i = 0; i < 2; i++) {
403 		lintsrc.irqtype = linttypes[i];
404 		lintsrc.destapiclint = i;
405 		MP_lintsrc_info(&lintsrc);
406 	}
407 }
408 
409 static unsigned long mpf_base;
410 static bool mpf_found;
411 
412 static unsigned long __init get_mpc_size(unsigned long physptr)
413 {
414 	struct mpc_table *mpc;
415 	unsigned long size;
416 
417 	mpc = early_memremap(physptr, PAGE_SIZE);
418 	size = mpc->length;
419 	early_memunmap(mpc, PAGE_SIZE);
420 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
421 
422 	return size;
423 }
424 
425 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
426 {
427 	struct mpc_table *mpc;
428 	unsigned long size;
429 
430 	size = get_mpc_size(mpf->physptr);
431 	mpc = early_memremap(mpf->physptr, size);
432 
433 	/*
434 	 * Read the physical hardware table.  Anything here will
435 	 * override the defaults.
436 	 */
437 	if (!smp_read_mpc(mpc, early)) {
438 #ifdef CONFIG_X86_LOCAL_APIC
439 		smp_found_config = 0;
440 #endif
441 		pr_err("BIOS bug, MP table errors detected!...\n");
442 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
443 		early_memunmap(mpc, size);
444 		return -1;
445 	}
446 	early_memunmap(mpc, size);
447 
448 	if (early)
449 		return -1;
450 
451 #ifdef CONFIG_X86_IO_APIC
452 	/*
453 	 * If there are no explicit MP IRQ entries, then we are
454 	 * broken.  We set up most of the low 16 IO-APIC pins to
455 	 * ISA defaults and hope it will work.
456 	 */
457 	if (!mp_irq_entries) {
458 		struct mpc_bus bus;
459 
460 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
461 
462 		bus.type = MP_BUS;
463 		bus.busid = 0;
464 		memcpy(bus.bustype, "ISA   ", 6);
465 		MP_bus_info(&bus);
466 
467 		construct_default_ioirq_mptable(0);
468 	}
469 #endif
470 
471 	return 0;
472 }
473 
474 /*
475  * Scan the memory blocks for an SMP configuration block.
476  */
477 static __init void mpparse_get_smp_config(unsigned int early)
478 {
479 	struct mpf_intel *mpf;
480 
481 	if (!smp_found_config)
482 		return;
483 
484 	if (!mpf_found)
485 		return;
486 
487 	if (acpi_lapic && early)
488 		return;
489 
490 	/*
491 	 * MPS doesn't support hyperthreading, aka only have
492 	 * thread 0 apic id in MPS table
493 	 */
494 	if (acpi_lapic && acpi_ioapic)
495 		return;
496 
497 	mpf = early_memremap(mpf_base, sizeof(*mpf));
498 	if (!mpf) {
499 		pr_err("MPTABLE: error mapping MP table\n");
500 		return;
501 	}
502 
503 	pr_info("Intel MultiProcessor Specification v1.%d\n",
504 		mpf->specification);
505 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
506 	if (mpf->feature2 & (1 << 7)) {
507 		pr_info("    IMCR and PIC compatibility mode.\n");
508 		pic_mode = 1;
509 	} else {
510 		pr_info("    Virtual Wire compatibility mode.\n");
511 		pic_mode = 0;
512 	}
513 #endif
514 	/*
515 	 * Now see if we need to read further.
516 	 */
517 	if (mpf->feature1) {
518 		if (early) {
519 			/* Local APIC has default address */
520 			register_lapic_address(APIC_DEFAULT_PHYS_BASE);
521 			goto out;
522 		}
523 
524 		pr_info("Default MP configuration #%d\n", mpf->feature1);
525 		construct_default_ISA_mptable(mpf->feature1);
526 
527 	} else if (mpf->physptr) {
528 		if (check_physptr(mpf, early))
529 			goto out;
530 	} else
531 		BUG();
532 
533 	if (!early && !acpi_lapic)
534 		pr_info("Processors: %d\n", num_procs);
535 	/*
536 	 * Only use the first configuration found.
537 	 */
538 out:
539 	early_memunmap(mpf, sizeof(*mpf));
540 }
541 
542 void __init mpparse_parse_early_smp_config(void)
543 {
544 	mpparse_get_smp_config(true);
545 }
546 
547 void __init mpparse_parse_smp_config(void)
548 {
549 	mpparse_get_smp_config(false);
550 }
551 
552 static void __init smp_reserve_memory(struct mpf_intel *mpf)
553 {
554 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
555 }
556 
557 static int __init smp_scan_config(unsigned long base, unsigned long length)
558 {
559 	unsigned int *bp;
560 	struct mpf_intel *mpf;
561 	int ret = 0;
562 
563 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
564 		    base, base + length - 1);
565 	BUILD_BUG_ON(sizeof(*mpf) != 16);
566 
567 	while (length > 0) {
568 		bp = early_memremap(base, length);
569 		mpf = (struct mpf_intel *)bp;
570 		if ((*bp == SMP_MAGIC_IDENT) &&
571 		    (mpf->length == 1) &&
572 		    !mpf_checksum((unsigned char *)bp, 16) &&
573 		    ((mpf->specification == 1)
574 		     || (mpf->specification == 4))) {
575 #ifdef CONFIG_X86_LOCAL_APIC
576 			smp_found_config = 1;
577 #endif
578 			mpf_base = base;
579 			mpf_found = true;
580 
581 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
582 				base, base + sizeof(*mpf) - 1);
583 
584 			memblock_reserve(base, sizeof(*mpf));
585 			if (mpf->physptr)
586 				smp_reserve_memory(mpf);
587 
588 			ret = 1;
589 		}
590 		early_memunmap(bp, length);
591 
592 		if (ret)
593 			break;
594 
595 		base += 16;
596 		length -= 16;
597 	}
598 	return ret;
599 }
600 
601 void __init mpparse_find_mptable(void)
602 {
603 	unsigned int address;
604 
605 	/*
606 	 * FIXME: Linux assumes you have 640K of base ram..
607 	 * this continues the error...
608 	 *
609 	 * 1) Scan the bottom 1K for a signature
610 	 * 2) Scan the top 1K of base RAM
611 	 * 3) Scan the 64K of bios
612 	 */
613 	if (smp_scan_config(0x0, 0x400) ||
614 	    smp_scan_config(639 * 0x400, 0x400) ||
615 	    smp_scan_config(0xF0000, 0x10000))
616 		return;
617 	/*
618 	 * If it is an SMP machine we should know now, unless the
619 	 * configuration is in an EISA bus machine with an
620 	 * extended bios data area.
621 	 *
622 	 * there is a real-mode segmented pointer pointing to the
623 	 * 4K EBDA area at 0x40E, calculate and scan it here.
624 	 *
625 	 * NOTE! There are Linux loaders that will corrupt the EBDA
626 	 * area, and as such this kind of SMP config may be less
627 	 * trustworthy, simply because the SMP table may have been
628 	 * stomped on during early boot. These loaders are buggy and
629 	 * should be fixed.
630 	 *
631 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
632 	 */
633 
634 	address = get_bios_ebda();
635 	if (address)
636 		smp_scan_config(address, 0x400);
637 }
638 
639 #ifdef CONFIG_X86_IO_APIC
640 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
641 
642 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
643 {
644 	int i;
645 
646 	if (m->irqtype != mp_INT)
647 		return 0;
648 
649 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
650 		return 0;
651 
652 	/* not legacy */
653 
654 	for (i = 0; i < mp_irq_entries; i++) {
655 		if (mp_irqs[i].irqtype != mp_INT)
656 			continue;
657 
658 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
659 					   MP_IRQPOL_ACTIVE_LOW))
660 			continue;
661 
662 		if (mp_irqs[i].srcbus != m->srcbus)
663 			continue;
664 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
665 			continue;
666 		if (irq_used[i]) {
667 			/* already claimed */
668 			return -2;
669 		}
670 		irq_used[i] = 1;
671 		return i;
672 	}
673 
674 	/* not found */
675 	return -1;
676 }
677 
678 #define SPARE_SLOT_NUM 20
679 
680 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
681 
682 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
683 {
684 	int i;
685 
686 	apic_printk(APIC_VERBOSE, "OLD ");
687 	print_mp_irq_info(m);
688 
689 	i = get_MP_intsrc_index(m);
690 	if (i > 0) {
691 		memcpy(m, &mp_irqs[i], sizeof(*m));
692 		apic_printk(APIC_VERBOSE, "NEW ");
693 		print_mp_irq_info(&mp_irqs[i]);
694 		return;
695 	}
696 	if (!i) {
697 		/* legacy, do nothing */
698 		return;
699 	}
700 	if (*nr_m_spare < SPARE_SLOT_NUM) {
701 		/*
702 		 * not found (-1), or duplicated (-2) are invalid entries,
703 		 * we need to use the slot later
704 		 */
705 		m_spare[*nr_m_spare] = m;
706 		*nr_m_spare += 1;
707 	}
708 }
709 
710 static int __init
711 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
712 {
713 	if (!mpc_new_phys || count <= mpc_new_length) {
714 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
715 		return -1;
716 	}
717 
718 	return 0;
719 }
720 #else /* CONFIG_X86_IO_APIC */
721 static
722 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
723 #endif /* CONFIG_X86_IO_APIC */
724 
725 static int  __init replace_intsrc_all(struct mpc_table *mpc,
726 					unsigned long mpc_new_phys,
727 					unsigned long mpc_new_length)
728 {
729 #ifdef CONFIG_X86_IO_APIC
730 	int i;
731 #endif
732 	int count = sizeof(*mpc);
733 	int nr_m_spare = 0;
734 	unsigned char *mpt = ((unsigned char *)mpc) + count;
735 
736 	pr_info("mpc_length %x\n", mpc->length);
737 	while (count < mpc->length) {
738 		switch (*mpt) {
739 		case MP_PROCESSOR:
740 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
741 			break;
742 		case MP_BUS:
743 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
744 			break;
745 		case MP_IOAPIC:
746 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
747 			break;
748 		case MP_INTSRC:
749 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
750 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
751 			break;
752 		case MP_LINTSRC:
753 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
754 			break;
755 		default:
756 			/* wrong mptable */
757 			smp_dump_mptable(mpc, mpt);
758 			goto out;
759 		}
760 	}
761 
762 #ifdef CONFIG_X86_IO_APIC
763 	for (i = 0; i < mp_irq_entries; i++) {
764 		if (irq_used[i])
765 			continue;
766 
767 		if (mp_irqs[i].irqtype != mp_INT)
768 			continue;
769 
770 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
771 					   MP_IRQPOL_ACTIVE_LOW))
772 			continue;
773 
774 		if (nr_m_spare > 0) {
775 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
776 			nr_m_spare--;
777 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
778 			m_spare[nr_m_spare] = NULL;
779 		} else {
780 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
781 			count += sizeof(struct mpc_intsrc);
782 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
783 				goto out;
784 			memcpy(m, &mp_irqs[i], sizeof(*m));
785 			mpc->length = count;
786 			mpt += sizeof(struct mpc_intsrc);
787 		}
788 		print_mp_irq_info(&mp_irqs[i]);
789 	}
790 #endif
791 out:
792 	/* update checksum */
793 	mpc->checksum = 0;
794 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
795 
796 	return 0;
797 }
798 
799 int enable_update_mptable;
800 
801 static int __init update_mptable_setup(char *str)
802 {
803 	enable_update_mptable = 1;
804 #ifdef CONFIG_PCI
805 	pci_routeirq = 1;
806 #endif
807 	return 0;
808 }
809 early_param("update_mptable", update_mptable_setup);
810 
811 static unsigned long __initdata mpc_new_phys;
812 static unsigned long mpc_new_length __initdata = 4096;
813 
814 /* alloc_mptable or alloc_mptable=4k */
815 static int __initdata alloc_mptable;
816 static int __init parse_alloc_mptable_opt(char *p)
817 {
818 	enable_update_mptable = 1;
819 #ifdef CONFIG_PCI
820 	pci_routeirq = 1;
821 #endif
822 	alloc_mptable = 1;
823 	if (!p)
824 		return 0;
825 	mpc_new_length = memparse(p, &p);
826 	return 0;
827 }
828 early_param("alloc_mptable", parse_alloc_mptable_opt);
829 
830 void __init e820__memblock_alloc_reserved_mpc_new(void)
831 {
832 	if (enable_update_mptable && alloc_mptable)
833 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
834 }
835 
836 static int __init update_mp_table(void)
837 {
838 	char str[16];
839 	char oem[10];
840 	struct mpf_intel *mpf;
841 	struct mpc_table *mpc, *mpc_new;
842 	unsigned long size;
843 
844 	if (!enable_update_mptable)
845 		return 0;
846 
847 	if (!mpf_found)
848 		return 0;
849 
850 	mpf = early_memremap(mpf_base, sizeof(*mpf));
851 	if (!mpf) {
852 		pr_err("MPTABLE: mpf early_memremap() failed\n");
853 		return 0;
854 	}
855 
856 	/*
857 	 * Now see if we need to go further.
858 	 */
859 	if (mpf->feature1)
860 		goto do_unmap_mpf;
861 
862 	if (!mpf->physptr)
863 		goto do_unmap_mpf;
864 
865 	size = get_mpc_size(mpf->physptr);
866 	mpc = early_memremap(mpf->physptr, size);
867 	if (!mpc) {
868 		pr_err("MPTABLE: mpc early_memremap() failed\n");
869 		goto do_unmap_mpf;
870 	}
871 
872 	if (!smp_check_mpc(mpc, oem, str))
873 		goto do_unmap_mpc;
874 
875 	pr_info("mpf: %llx\n", (u64)mpf_base);
876 	pr_info("physptr: %x\n", mpf->physptr);
877 
878 	if (mpc_new_phys && mpc->length > mpc_new_length) {
879 		mpc_new_phys = 0;
880 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
881 			mpc_new_length);
882 	}
883 
884 	if (!mpc_new_phys) {
885 		unsigned char old, new;
886 		/* check if we can change the position */
887 		mpc->checksum = 0;
888 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
889 		mpc->checksum = 0xff;
890 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
891 		if (old == new) {
892 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
893 			goto do_unmap_mpc;
894 		}
895 		pr_info("use in-position replacing\n");
896 	} else {
897 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
898 		if (!mpc_new) {
899 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
900 			goto do_unmap_mpc;
901 		}
902 		mpf->physptr = mpc_new_phys;
903 		memcpy(mpc_new, mpc, mpc->length);
904 		early_memunmap(mpc, size);
905 		mpc = mpc_new;
906 		size = mpc_new_length;
907 		/* check if we can modify that */
908 		if (mpc_new_phys - mpf->physptr) {
909 			struct mpf_intel *mpf_new;
910 			/* steal 16 bytes from [0, 1k) */
911 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
912 			if (!mpf_new) {
913 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
914 				goto do_unmap_mpc;
915 			}
916 			pr_info("mpf new: %x\n", 0x400 - 16);
917 			memcpy(mpf_new, mpf, 16);
918 			early_memunmap(mpf, sizeof(*mpf));
919 			mpf = mpf_new;
920 			mpf->physptr = mpc_new_phys;
921 		}
922 		mpf->checksum = 0;
923 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
924 		pr_info("physptr new: %x\n", mpf->physptr);
925 	}
926 
927 	/*
928 	 * only replace the one with mp_INT and
929 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
930 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
931 	 * may need pci=routeirq for all coverage
932 	 */
933 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
934 
935 do_unmap_mpc:
936 	early_memunmap(mpc, size);
937 
938 do_unmap_mpf:
939 	early_memunmap(mpf, sizeof(*mpf));
940 
941 	return 0;
942 }
943 
944 late_initcall(update_mp_table);
945