xref: /linux/arch/x86/kernel/mpparse.c (revision ce615f5c1f73537c8267035d58b3d0c70e19b8da)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/irqdomain.h>
23 #include <asm/mtrr.h>
24 #include <asm/mpspec.h>
25 #include <asm/io_apic.h>
26 #include <asm/proto.h>
27 #include <asm/bios_ebda.h>
28 #include <asm/e820/api.h>
29 #include <asm/setup.h>
30 #include <asm/smp.h>
31 
32 #include <asm/apic.h>
33 /*
34  * Checksum an MP configuration block.
35  */
36 
37 static int __init mpf_checksum(unsigned char *mp, int len)
38 {
39 	int sum = 0;
40 
41 	while (len--)
42 		sum += *mp++;
43 
44 	return sum & 0xFF;
45 }
46 
47 int __init default_mpc_apic_id(struct mpc_cpu *m)
48 {
49 	return m->apicid;
50 }
51 
52 static void __init MP_processor_info(struct mpc_cpu *m)
53 {
54 	int apicid;
55 	char *bootup_cpu = "";
56 
57 	if (!(m->cpuflag & CPU_ENABLED)) {
58 		disabled_cpus++;
59 		return;
60 	}
61 
62 	apicid = x86_init.mpparse.mpc_apic_id(m);
63 
64 	if (m->cpuflag & CPU_BOOTPROCESSOR) {
65 		bootup_cpu = " (Bootup-CPU)";
66 		boot_cpu_physical_apicid = m->apicid;
67 	}
68 
69 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
70 	generic_processor_info(apicid, m->apicver);
71 }
72 
73 #ifdef CONFIG_X86_IO_APIC
74 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
75 {
76 	memcpy(str, m->bustype, 6);
77 	str[6] = 0;
78 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
79 }
80 
81 static void __init MP_bus_info(struct mpc_bus *m)
82 {
83 	char str[7];
84 
85 	x86_init.mpparse.mpc_oem_bus_info(m, str);
86 
87 #if MAX_MP_BUSSES < 256
88 	if (m->busid >= MAX_MP_BUSSES) {
89 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
90 			m->busid, str, MAX_MP_BUSSES - 1);
91 		return;
92 	}
93 #endif
94 
95 	set_bit(m->busid, mp_bus_not_pci);
96 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
97 #ifdef CONFIG_EISA
98 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
99 #endif
100 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
101 		if (x86_init.mpparse.mpc_oem_pci_bus)
102 			x86_init.mpparse.mpc_oem_pci_bus(m);
103 
104 		clear_bit(m->busid, mp_bus_not_pci);
105 #ifdef CONFIG_EISA
106 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
107 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
108 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
109 #endif
110 	} else
111 		pr_warn("Unknown bustype %s - ignoring\n", str);
112 }
113 
114 static void __init MP_ioapic_info(struct mpc_ioapic *m)
115 {
116 	struct ioapic_domain_cfg cfg = {
117 		.type = IOAPIC_DOMAIN_LEGACY,
118 		.ops = &mp_ioapic_irqdomain_ops,
119 	};
120 
121 	if (m->flags & MPC_APIC_USABLE)
122 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
123 }
124 
125 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
126 {
127 	apic_printk(APIC_VERBOSE,
128 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
129 		mp_irq->irqtype, mp_irq->irqflag & 3,
130 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
131 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
132 }
133 
134 #else /* CONFIG_X86_IO_APIC */
135 static inline void __init MP_bus_info(struct mpc_bus *m) {}
136 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
137 #endif /* CONFIG_X86_IO_APIC */
138 
139 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
140 {
141 	apic_printk(APIC_VERBOSE,
142 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
143 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
144 		m->srcbusirq, m->destapic, m->destapiclint);
145 }
146 
147 /*
148  * Read/parse the MPC
149  */
150 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
151 {
152 
153 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
154 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
155 		       mpc->signature[0], mpc->signature[1],
156 		       mpc->signature[2], mpc->signature[3]);
157 		return 0;
158 	}
159 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
160 		pr_err("MPTABLE: checksum error!\n");
161 		return 0;
162 	}
163 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
164 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
165 		return 0;
166 	}
167 	if (!mpc->lapic) {
168 		pr_err("MPTABLE: null local APIC address!\n");
169 		return 0;
170 	}
171 	memcpy(oem, mpc->oem, 8);
172 	oem[8] = 0;
173 	pr_info("MPTABLE: OEM ID: %s\n", oem);
174 
175 	memcpy(str, mpc->productid, 12);
176 	str[12] = 0;
177 
178 	pr_info("MPTABLE: Product ID: %s\n", str);
179 
180 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
181 
182 	return 1;
183 }
184 
185 static void skip_entry(unsigned char **ptr, int *count, int size)
186 {
187 	*ptr += size;
188 	*count += size;
189 }
190 
191 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
192 {
193 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
194 	pr_cont("type %x\n", *mpt);
195 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
196 			1, mpc, mpc->length, 1);
197 }
198 
199 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
200 
201 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
202 {
203 	char str[16];
204 	char oem[10];
205 
206 	int count = sizeof(*mpc);
207 	unsigned char *mpt = ((unsigned char *)mpc) + count;
208 
209 	if (!smp_check_mpc(mpc, oem, str))
210 		return 0;
211 
212 	/* Initialize the lapic mapping */
213 	if (!acpi_lapic)
214 		register_lapic_address(mpc->lapic);
215 
216 	if (early)
217 		return 1;
218 
219 	if (mpc->oemptr)
220 		x86_init.mpparse.smp_read_mpc_oem(mpc);
221 
222 	/*
223 	 *      Now process the configuration blocks.
224 	 */
225 	x86_init.mpparse.mpc_record(0);
226 
227 	while (count < mpc->length) {
228 		switch (*mpt) {
229 		case MP_PROCESSOR:
230 			/* ACPI may have already provided this data */
231 			if (!acpi_lapic)
232 				MP_processor_info((struct mpc_cpu *)mpt);
233 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
234 			break;
235 		case MP_BUS:
236 			MP_bus_info((struct mpc_bus *)mpt);
237 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
238 			break;
239 		case MP_IOAPIC:
240 			MP_ioapic_info((struct mpc_ioapic *)mpt);
241 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
242 			break;
243 		case MP_INTSRC:
244 			mp_save_irq((struct mpc_intsrc *)mpt);
245 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
246 			break;
247 		case MP_LINTSRC:
248 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
249 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
250 			break;
251 		default:
252 			/* wrong mptable */
253 			smp_dump_mptable(mpc, mpt);
254 			count = mpc->length;
255 			break;
256 		}
257 		x86_init.mpparse.mpc_record(1);
258 	}
259 
260 	if (!num_processors)
261 		pr_err("MPTABLE: no processors registered!\n");
262 	return num_processors;
263 }
264 
265 #ifdef CONFIG_X86_IO_APIC
266 
267 static int __init ELCR_trigger(unsigned int irq)
268 {
269 	unsigned int port;
270 
271 	port = 0x4d0 + (irq >> 3);
272 	return (inb(port) >> (irq & 7)) & 1;
273 }
274 
275 static void __init construct_default_ioirq_mptable(int mpc_default_type)
276 {
277 	struct mpc_intsrc intsrc;
278 	int i;
279 	int ELCR_fallback = 0;
280 
281 	intsrc.type = MP_INTSRC;
282 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
283 	intsrc.srcbus = 0;
284 	intsrc.dstapic = mpc_ioapic_id(0);
285 
286 	intsrc.irqtype = mp_INT;
287 
288 	/*
289 	 *  If true, we have an ISA/PCI system with no IRQ entries
290 	 *  in the MP table. To prevent the PCI interrupts from being set up
291 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
292 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
293 	 *  never be level sensitive, so we simply see if the ELCR agrees.
294 	 *  If it does, we assume it's valid.
295 	 */
296 	if (mpc_default_type == 5) {
297 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
298 
299 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
300 		    ELCR_trigger(13))
301 			pr_err("ELCR contains invalid data... not using ELCR\n");
302 		else {
303 			pr_info("Using ELCR to identify PCI interrupts\n");
304 			ELCR_fallback = 1;
305 		}
306 	}
307 
308 	for (i = 0; i < 16; i++) {
309 		switch (mpc_default_type) {
310 		case 2:
311 			if (i == 0 || i == 13)
312 				continue;	/* IRQ0 & IRQ13 not connected */
313 			/* fall through */
314 		default:
315 			if (i == 2)
316 				continue;	/* IRQ2 is never connected */
317 		}
318 
319 		if (ELCR_fallback) {
320 			/*
321 			 *  If the ELCR indicates a level-sensitive interrupt, we
322 			 *  copy that information over to the MP table in the
323 			 *  irqflag field (level sensitive, active high polarity).
324 			 */
325 			if (ELCR_trigger(i)) {
326 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
327 						 MP_IRQPOL_ACTIVE_HIGH;
328 			} else {
329 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
330 						 MP_IRQPOL_DEFAULT;
331 			}
332 		}
333 
334 		intsrc.srcbusirq = i;
335 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
336 		mp_save_irq(&intsrc);
337 	}
338 
339 	intsrc.irqtype = mp_ExtINT;
340 	intsrc.srcbusirq = 0;
341 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
342 	mp_save_irq(&intsrc);
343 }
344 
345 
346 static void __init construct_ioapic_table(int mpc_default_type)
347 {
348 	struct mpc_ioapic ioapic;
349 	struct mpc_bus bus;
350 
351 	bus.type = MP_BUS;
352 	bus.busid = 0;
353 	switch (mpc_default_type) {
354 	default:
355 		pr_err("???\nUnknown standard configuration %d\n",
356 		       mpc_default_type);
357 		/* fall through */
358 	case 1:
359 	case 5:
360 		memcpy(bus.bustype, "ISA   ", 6);
361 		break;
362 	case 2:
363 	case 6:
364 	case 3:
365 		memcpy(bus.bustype, "EISA  ", 6);
366 		break;
367 	}
368 	MP_bus_info(&bus);
369 	if (mpc_default_type > 4) {
370 		bus.busid = 1;
371 		memcpy(bus.bustype, "PCI   ", 6);
372 		MP_bus_info(&bus);
373 	}
374 
375 	ioapic.type	= MP_IOAPIC;
376 	ioapic.apicid	= 2;
377 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
378 	ioapic.flags	= MPC_APIC_USABLE;
379 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
380 	MP_ioapic_info(&ioapic);
381 
382 	/*
383 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
384 	 */
385 	construct_default_ioirq_mptable(mpc_default_type);
386 }
387 #else
388 static inline void __init construct_ioapic_table(int mpc_default_type) { }
389 #endif
390 
391 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
392 {
393 	struct mpc_cpu processor;
394 	struct mpc_lintsrc lintsrc;
395 	int linttypes[2] = { mp_ExtINT, mp_NMI };
396 	int i;
397 
398 	/*
399 	 * local APIC has default address
400 	 */
401 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
402 
403 	/*
404 	 * 2 CPUs, numbered 0 & 1.
405 	 */
406 	processor.type = MP_PROCESSOR;
407 	/* Either an integrated APIC or a discrete 82489DX. */
408 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
409 	processor.cpuflag = CPU_ENABLED;
410 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
411 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
412 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
413 	processor.reserved[0] = 0;
414 	processor.reserved[1] = 0;
415 	for (i = 0; i < 2; i++) {
416 		processor.apicid = i;
417 		MP_processor_info(&processor);
418 	}
419 
420 	construct_ioapic_table(mpc_default_type);
421 
422 	lintsrc.type = MP_LINTSRC;
423 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
424 	lintsrc.srcbusid = 0;
425 	lintsrc.srcbusirq = 0;
426 	lintsrc.destapic = MP_APIC_ALL;
427 	for (i = 0; i < 2; i++) {
428 		lintsrc.irqtype = linttypes[i];
429 		lintsrc.destapiclint = i;
430 		MP_lintsrc_info(&lintsrc);
431 	}
432 }
433 
434 static unsigned long mpf_base;
435 static bool mpf_found;
436 
437 static unsigned long __init get_mpc_size(unsigned long physptr)
438 {
439 	struct mpc_table *mpc;
440 	unsigned long size;
441 
442 	mpc = early_memremap(physptr, PAGE_SIZE);
443 	size = mpc->length;
444 	early_memunmap(mpc, PAGE_SIZE);
445 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
446 
447 	return size;
448 }
449 
450 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
451 {
452 	struct mpc_table *mpc;
453 	unsigned long size;
454 
455 	size = get_mpc_size(mpf->physptr);
456 	mpc = early_memremap(mpf->physptr, size);
457 
458 	/*
459 	 * Read the physical hardware table.  Anything here will
460 	 * override the defaults.
461 	 */
462 	if (!smp_read_mpc(mpc, early)) {
463 #ifdef CONFIG_X86_LOCAL_APIC
464 		smp_found_config = 0;
465 #endif
466 		pr_err("BIOS bug, MP table errors detected!...\n");
467 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
468 		early_memunmap(mpc, size);
469 		return -1;
470 	}
471 	early_memunmap(mpc, size);
472 
473 	if (early)
474 		return -1;
475 
476 #ifdef CONFIG_X86_IO_APIC
477 	/*
478 	 * If there are no explicit MP IRQ entries, then we are
479 	 * broken.  We set up most of the low 16 IO-APIC pins to
480 	 * ISA defaults and hope it will work.
481 	 */
482 	if (!mp_irq_entries) {
483 		struct mpc_bus bus;
484 
485 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
486 
487 		bus.type = MP_BUS;
488 		bus.busid = 0;
489 		memcpy(bus.bustype, "ISA   ", 6);
490 		MP_bus_info(&bus);
491 
492 		construct_default_ioirq_mptable(0);
493 	}
494 #endif
495 
496 	return 0;
497 }
498 
499 /*
500  * Scan the memory blocks for an SMP configuration block.
501  */
502 void __init default_get_smp_config(unsigned int early)
503 {
504 	struct mpf_intel *mpf;
505 
506 	if (!smp_found_config)
507 		return;
508 
509 	if (!mpf_found)
510 		return;
511 
512 	if (acpi_lapic && early)
513 		return;
514 
515 	/*
516 	 * MPS doesn't support hyperthreading, aka only have
517 	 * thread 0 apic id in MPS table
518 	 */
519 	if (acpi_lapic && acpi_ioapic)
520 		return;
521 
522 	mpf = early_memremap(mpf_base, sizeof(*mpf));
523 	if (!mpf) {
524 		pr_err("MPTABLE: error mapping MP table\n");
525 		return;
526 	}
527 
528 	pr_info("Intel MultiProcessor Specification v1.%d\n",
529 		mpf->specification);
530 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
531 	if (mpf->feature2 & (1 << 7)) {
532 		pr_info("    IMCR and PIC compatibility mode.\n");
533 		pic_mode = 1;
534 	} else {
535 		pr_info("    Virtual Wire compatibility mode.\n");
536 		pic_mode = 0;
537 	}
538 #endif
539 	/*
540 	 * Now see if we need to read further.
541 	 */
542 	if (mpf->feature1) {
543 		if (early) {
544 			/*
545 			 * local APIC has default address
546 			 */
547 			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
548 			goto out;
549 		}
550 
551 		pr_info("Default MP configuration #%d\n", mpf->feature1);
552 		construct_default_ISA_mptable(mpf->feature1);
553 
554 	} else if (mpf->physptr) {
555 		if (check_physptr(mpf, early))
556 			goto out;
557 	} else
558 		BUG();
559 
560 	if (!early)
561 		pr_info("Processors: %d\n", num_processors);
562 	/*
563 	 * Only use the first configuration found.
564 	 */
565 out:
566 	early_memunmap(mpf, sizeof(*mpf));
567 }
568 
569 static void __init smp_reserve_memory(struct mpf_intel *mpf)
570 {
571 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
572 }
573 
574 static int __init smp_scan_config(unsigned long base, unsigned long length)
575 {
576 	unsigned int *bp;
577 	struct mpf_intel *mpf;
578 	int ret = 0;
579 
580 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
581 		    base, base + length - 1);
582 	BUILD_BUG_ON(sizeof(*mpf) != 16);
583 
584 	while (length > 0) {
585 		bp = early_memremap(base, length);
586 		mpf = (struct mpf_intel *)bp;
587 		if ((*bp == SMP_MAGIC_IDENT) &&
588 		    (mpf->length == 1) &&
589 		    !mpf_checksum((unsigned char *)bp, 16) &&
590 		    ((mpf->specification == 1)
591 		     || (mpf->specification == 4))) {
592 #ifdef CONFIG_X86_LOCAL_APIC
593 			smp_found_config = 1;
594 #endif
595 			mpf_base = base;
596 			mpf_found = true;
597 
598 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
599 				base, base + sizeof(*mpf) - 1);
600 
601 			memblock_reserve(base, sizeof(*mpf));
602 			if (mpf->physptr)
603 				smp_reserve_memory(mpf);
604 
605 			ret = 1;
606 		}
607 		early_memunmap(bp, length);
608 
609 		if (ret)
610 			break;
611 
612 		base += 16;
613 		length -= 16;
614 	}
615 	return ret;
616 }
617 
618 void __init default_find_smp_config(void)
619 {
620 	unsigned int address;
621 
622 	/*
623 	 * FIXME: Linux assumes you have 640K of base ram..
624 	 * this continues the error...
625 	 *
626 	 * 1) Scan the bottom 1K for a signature
627 	 * 2) Scan the top 1K of base RAM
628 	 * 3) Scan the 64K of bios
629 	 */
630 	if (smp_scan_config(0x0, 0x400) ||
631 	    smp_scan_config(639 * 0x400, 0x400) ||
632 	    smp_scan_config(0xF0000, 0x10000))
633 		return;
634 	/*
635 	 * If it is an SMP machine we should know now, unless the
636 	 * configuration is in an EISA bus machine with an
637 	 * extended bios data area.
638 	 *
639 	 * there is a real-mode segmented pointer pointing to the
640 	 * 4K EBDA area at 0x40E, calculate and scan it here.
641 	 *
642 	 * NOTE! There are Linux loaders that will corrupt the EBDA
643 	 * area, and as such this kind of SMP config may be less
644 	 * trustworthy, simply because the SMP table may have been
645 	 * stomped on during early boot. These loaders are buggy and
646 	 * should be fixed.
647 	 *
648 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
649 	 */
650 
651 	address = get_bios_ebda();
652 	if (address)
653 		smp_scan_config(address, 0x400);
654 }
655 
656 #ifdef CONFIG_X86_IO_APIC
657 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
658 
659 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
660 {
661 	int i;
662 
663 	if (m->irqtype != mp_INT)
664 		return 0;
665 
666 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
667 		return 0;
668 
669 	/* not legacy */
670 
671 	for (i = 0; i < mp_irq_entries; i++) {
672 		if (mp_irqs[i].irqtype != mp_INT)
673 			continue;
674 
675 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
676 					   MP_IRQPOL_ACTIVE_LOW))
677 			continue;
678 
679 		if (mp_irqs[i].srcbus != m->srcbus)
680 			continue;
681 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
682 			continue;
683 		if (irq_used[i]) {
684 			/* already claimed */
685 			return -2;
686 		}
687 		irq_used[i] = 1;
688 		return i;
689 	}
690 
691 	/* not found */
692 	return -1;
693 }
694 
695 #define SPARE_SLOT_NUM 20
696 
697 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
698 
699 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
700 {
701 	int i;
702 
703 	apic_printk(APIC_VERBOSE, "OLD ");
704 	print_mp_irq_info(m);
705 
706 	i = get_MP_intsrc_index(m);
707 	if (i > 0) {
708 		memcpy(m, &mp_irqs[i], sizeof(*m));
709 		apic_printk(APIC_VERBOSE, "NEW ");
710 		print_mp_irq_info(&mp_irqs[i]);
711 		return;
712 	}
713 	if (!i) {
714 		/* legacy, do nothing */
715 		return;
716 	}
717 	if (*nr_m_spare < SPARE_SLOT_NUM) {
718 		/*
719 		 * not found (-1), or duplicated (-2) are invalid entries,
720 		 * we need to use the slot later
721 		 */
722 		m_spare[*nr_m_spare] = m;
723 		*nr_m_spare += 1;
724 	}
725 }
726 
727 static int __init
728 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
729 {
730 	if (!mpc_new_phys || count <= mpc_new_length) {
731 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
732 		return -1;
733 	}
734 
735 	return 0;
736 }
737 #else /* CONFIG_X86_IO_APIC */
738 static
739 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
740 #endif /* CONFIG_X86_IO_APIC */
741 
742 static int  __init replace_intsrc_all(struct mpc_table *mpc,
743 					unsigned long mpc_new_phys,
744 					unsigned long mpc_new_length)
745 {
746 #ifdef CONFIG_X86_IO_APIC
747 	int i;
748 #endif
749 	int count = sizeof(*mpc);
750 	int nr_m_spare = 0;
751 	unsigned char *mpt = ((unsigned char *)mpc) + count;
752 
753 	pr_info("mpc_length %x\n", mpc->length);
754 	while (count < mpc->length) {
755 		switch (*mpt) {
756 		case MP_PROCESSOR:
757 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
758 			break;
759 		case MP_BUS:
760 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
761 			break;
762 		case MP_IOAPIC:
763 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
764 			break;
765 		case MP_INTSRC:
766 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
767 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
768 			break;
769 		case MP_LINTSRC:
770 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
771 			break;
772 		default:
773 			/* wrong mptable */
774 			smp_dump_mptable(mpc, mpt);
775 			goto out;
776 		}
777 	}
778 
779 #ifdef CONFIG_X86_IO_APIC
780 	for (i = 0; i < mp_irq_entries; i++) {
781 		if (irq_used[i])
782 			continue;
783 
784 		if (mp_irqs[i].irqtype != mp_INT)
785 			continue;
786 
787 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
788 					   MP_IRQPOL_ACTIVE_LOW))
789 			continue;
790 
791 		if (nr_m_spare > 0) {
792 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
793 			nr_m_spare--;
794 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
795 			m_spare[nr_m_spare] = NULL;
796 		} else {
797 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
798 			count += sizeof(struct mpc_intsrc);
799 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
800 				goto out;
801 			memcpy(m, &mp_irqs[i], sizeof(*m));
802 			mpc->length = count;
803 			mpt += sizeof(struct mpc_intsrc);
804 		}
805 		print_mp_irq_info(&mp_irqs[i]);
806 	}
807 #endif
808 out:
809 	/* update checksum */
810 	mpc->checksum = 0;
811 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
812 
813 	return 0;
814 }
815 
816 int enable_update_mptable;
817 
818 static int __init update_mptable_setup(char *str)
819 {
820 	enable_update_mptable = 1;
821 #ifdef CONFIG_PCI
822 	pci_routeirq = 1;
823 #endif
824 	return 0;
825 }
826 early_param("update_mptable", update_mptable_setup);
827 
828 static unsigned long __initdata mpc_new_phys;
829 static unsigned long mpc_new_length __initdata = 4096;
830 
831 /* alloc_mptable or alloc_mptable=4k */
832 static int __initdata alloc_mptable;
833 static int __init parse_alloc_mptable_opt(char *p)
834 {
835 	enable_update_mptable = 1;
836 #ifdef CONFIG_PCI
837 	pci_routeirq = 1;
838 #endif
839 	alloc_mptable = 1;
840 	if (!p)
841 		return 0;
842 	mpc_new_length = memparse(p, &p);
843 	return 0;
844 }
845 early_param("alloc_mptable", parse_alloc_mptable_opt);
846 
847 void __init e820__memblock_alloc_reserved_mpc_new(void)
848 {
849 	if (enable_update_mptable && alloc_mptable)
850 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
851 }
852 
853 static int __init update_mp_table(void)
854 {
855 	char str[16];
856 	char oem[10];
857 	struct mpf_intel *mpf;
858 	struct mpc_table *mpc, *mpc_new;
859 	unsigned long size;
860 
861 	if (!enable_update_mptable)
862 		return 0;
863 
864 	if (!mpf_found)
865 		return 0;
866 
867 	mpf = early_memremap(mpf_base, sizeof(*mpf));
868 	if (!mpf) {
869 		pr_err("MPTABLE: mpf early_memremap() failed\n");
870 		return 0;
871 	}
872 
873 	/*
874 	 * Now see if we need to go further.
875 	 */
876 	if (mpf->feature1)
877 		goto do_unmap_mpf;
878 
879 	if (!mpf->physptr)
880 		goto do_unmap_mpf;
881 
882 	size = get_mpc_size(mpf->physptr);
883 	mpc = early_memremap(mpf->physptr, size);
884 	if (!mpc) {
885 		pr_err("MPTABLE: mpc early_memremap() failed\n");
886 		goto do_unmap_mpf;
887 	}
888 
889 	if (!smp_check_mpc(mpc, oem, str))
890 		goto do_unmap_mpc;
891 
892 	pr_info("mpf: %llx\n", (u64)mpf_base);
893 	pr_info("physptr: %x\n", mpf->physptr);
894 
895 	if (mpc_new_phys && mpc->length > mpc_new_length) {
896 		mpc_new_phys = 0;
897 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
898 			mpc_new_length);
899 	}
900 
901 	if (!mpc_new_phys) {
902 		unsigned char old, new;
903 		/* check if we can change the position */
904 		mpc->checksum = 0;
905 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
906 		mpc->checksum = 0xff;
907 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
908 		if (old == new) {
909 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
910 			goto do_unmap_mpc;
911 		}
912 		pr_info("use in-position replacing\n");
913 	} else {
914 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
915 		if (!mpc_new) {
916 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
917 			goto do_unmap_mpc;
918 		}
919 		mpf->physptr = mpc_new_phys;
920 		memcpy(mpc_new, mpc, mpc->length);
921 		early_memunmap(mpc, size);
922 		mpc = mpc_new;
923 		size = mpc_new_length;
924 		/* check if we can modify that */
925 		if (mpc_new_phys - mpf->physptr) {
926 			struct mpf_intel *mpf_new;
927 			/* steal 16 bytes from [0, 1k) */
928 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
929 			if (!mpf_new) {
930 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
931 				goto do_unmap_mpc;
932 			}
933 			pr_info("mpf new: %x\n", 0x400 - 16);
934 			memcpy(mpf_new, mpf, 16);
935 			early_memunmap(mpf, sizeof(*mpf));
936 			mpf = mpf_new;
937 			mpf->physptr = mpc_new_phys;
938 		}
939 		mpf->checksum = 0;
940 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
941 		pr_info("physptr new: %x\n", mpf->physptr);
942 	}
943 
944 	/*
945 	 * only replace the one with mp_INT and
946 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
947 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
948 	 * may need pci=routeirq for all coverage
949 	 */
950 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
951 
952 do_unmap_mpc:
953 	early_memunmap(mpc, size);
954 
955 do_unmap_mpf:
956 	early_memunmap(mpf, sizeof(*mpf));
957 
958 	return 0;
959 }
960 
961 late_initcall(update_mp_table);
962