xref: /linux/arch/x86/kernel/mpparse.c (revision 6c8c1406a6d6a3f2e61ac590f5c0994231bc6be7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/i8259.h>
23 #include <asm/io_apic.h>
24 #include <asm/acpi.h>
25 #include <asm/irqdomain.h>
26 #include <asm/mtrr.h>
27 #include <asm/mpspec.h>
28 #include <asm/proto.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/e820/api.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 
34 #include <asm/apic.h>
35 /*
36  * Checksum an MP configuration block.
37  */
38 
39 static int __init mpf_checksum(unsigned char *mp, int len)
40 {
41 	int sum = 0;
42 
43 	while (len--)
44 		sum += *mp++;
45 
46 	return sum & 0xFF;
47 }
48 
49 static void __init MP_processor_info(struct mpc_cpu *m)
50 {
51 	int apicid;
52 	char *bootup_cpu = "";
53 
54 	if (!(m->cpuflag & CPU_ENABLED)) {
55 		disabled_cpus++;
56 		return;
57 	}
58 
59 	apicid = m->apicid;
60 
61 	if (m->cpuflag & CPU_BOOTPROCESSOR) {
62 		bootup_cpu = " (Bootup-CPU)";
63 		boot_cpu_physical_apicid = m->apicid;
64 	}
65 
66 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
67 	generic_processor_info(apicid, m->apicver);
68 }
69 
70 #ifdef CONFIG_X86_IO_APIC
71 static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
72 {
73 	memcpy(str, m->bustype, 6);
74 	str[6] = 0;
75 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
76 }
77 
78 static void __init MP_bus_info(struct mpc_bus *m)
79 {
80 	char str[7];
81 
82 	mpc_oem_bus_info(m, str);
83 
84 #if MAX_MP_BUSSES < 256
85 	if (m->busid >= MAX_MP_BUSSES) {
86 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
87 			m->busid, str, MAX_MP_BUSSES - 1);
88 		return;
89 	}
90 #endif
91 
92 	set_bit(m->busid, mp_bus_not_pci);
93 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
94 #ifdef CONFIG_EISA
95 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
96 #endif
97 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
98 		clear_bit(m->busid, mp_bus_not_pci);
99 #ifdef CONFIG_EISA
100 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
101 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
102 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
103 #endif
104 	} else
105 		pr_warn("Unknown bustype %s - ignoring\n", str);
106 }
107 
108 static void __init MP_ioapic_info(struct mpc_ioapic *m)
109 {
110 	struct ioapic_domain_cfg cfg = {
111 		.type = IOAPIC_DOMAIN_LEGACY,
112 		.ops = &mp_ioapic_irqdomain_ops,
113 	};
114 
115 	if (m->flags & MPC_APIC_USABLE)
116 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
117 }
118 
119 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
120 {
121 	apic_printk(APIC_VERBOSE,
122 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
123 		mp_irq->irqtype, mp_irq->irqflag & 3,
124 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
125 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
126 }
127 
128 #else /* CONFIG_X86_IO_APIC */
129 static inline void __init MP_bus_info(struct mpc_bus *m) {}
130 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
131 #endif /* CONFIG_X86_IO_APIC */
132 
133 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
134 {
135 	apic_printk(APIC_VERBOSE,
136 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
137 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
138 		m->srcbusirq, m->destapic, m->destapiclint);
139 }
140 
141 /*
142  * Read/parse the MPC
143  */
144 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
145 {
146 
147 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
148 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
149 		       mpc->signature[0], mpc->signature[1],
150 		       mpc->signature[2], mpc->signature[3]);
151 		return 0;
152 	}
153 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
154 		pr_err("MPTABLE: checksum error!\n");
155 		return 0;
156 	}
157 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
158 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
159 		return 0;
160 	}
161 	if (!mpc->lapic) {
162 		pr_err("MPTABLE: null local APIC address!\n");
163 		return 0;
164 	}
165 	memcpy(oem, mpc->oem, 8);
166 	oem[8] = 0;
167 	pr_info("MPTABLE: OEM ID: %s\n", oem);
168 
169 	memcpy(str, mpc->productid, 12);
170 	str[12] = 0;
171 
172 	pr_info("MPTABLE: Product ID: %s\n", str);
173 
174 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
175 
176 	return 1;
177 }
178 
179 static void skip_entry(unsigned char **ptr, int *count, int size)
180 {
181 	*ptr += size;
182 	*count += size;
183 }
184 
185 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
186 {
187 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
188 	pr_cont("type %x\n", *mpt);
189 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
190 			1, mpc, mpc->length, 1);
191 }
192 
193 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
194 {
195 	char str[16];
196 	char oem[10];
197 
198 	int count = sizeof(*mpc);
199 	unsigned char *mpt = ((unsigned char *)mpc) + count;
200 
201 	if (!smp_check_mpc(mpc, oem, str))
202 		return 0;
203 
204 	/* Initialize the lapic mapping */
205 	if (!acpi_lapic)
206 		register_lapic_address(mpc->lapic);
207 
208 	if (early)
209 		return 1;
210 
211 	/* Now process the configuration blocks. */
212 	while (count < mpc->length) {
213 		switch (*mpt) {
214 		case MP_PROCESSOR:
215 			/* ACPI may have already provided this data */
216 			if (!acpi_lapic)
217 				MP_processor_info((struct mpc_cpu *)mpt);
218 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
219 			break;
220 		case MP_BUS:
221 			MP_bus_info((struct mpc_bus *)mpt);
222 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
223 			break;
224 		case MP_IOAPIC:
225 			MP_ioapic_info((struct mpc_ioapic *)mpt);
226 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
227 			break;
228 		case MP_INTSRC:
229 			mp_save_irq((struct mpc_intsrc *)mpt);
230 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
231 			break;
232 		case MP_LINTSRC:
233 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
234 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
235 			break;
236 		default:
237 			/* wrong mptable */
238 			smp_dump_mptable(mpc, mpt);
239 			count = mpc->length;
240 			break;
241 		}
242 	}
243 
244 	if (!num_processors)
245 		pr_err("MPTABLE: no processors registered!\n");
246 	return num_processors;
247 }
248 
249 #ifdef CONFIG_X86_IO_APIC
250 
251 static int __init ELCR_trigger(unsigned int irq)
252 {
253 	unsigned int port;
254 
255 	port = PIC_ELCR1 + (irq >> 3);
256 	return (inb(port) >> (irq & 7)) & 1;
257 }
258 
259 static void __init construct_default_ioirq_mptable(int mpc_default_type)
260 {
261 	struct mpc_intsrc intsrc;
262 	int i;
263 	int ELCR_fallback = 0;
264 
265 	intsrc.type = MP_INTSRC;
266 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
267 	intsrc.srcbus = 0;
268 	intsrc.dstapic = mpc_ioapic_id(0);
269 
270 	intsrc.irqtype = mp_INT;
271 
272 	/*
273 	 *  If true, we have an ISA/PCI system with no IRQ entries
274 	 *  in the MP table. To prevent the PCI interrupts from being set up
275 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
276 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
277 	 *  never be level sensitive, so we simply see if the ELCR agrees.
278 	 *  If it does, we assume it's valid.
279 	 */
280 	if (mpc_default_type == 5) {
281 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
282 
283 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
284 		    ELCR_trigger(13))
285 			pr_err("ELCR contains invalid data... not using ELCR\n");
286 		else {
287 			pr_info("Using ELCR to identify PCI interrupts\n");
288 			ELCR_fallback = 1;
289 		}
290 	}
291 
292 	for (i = 0; i < 16; i++) {
293 		switch (mpc_default_type) {
294 		case 2:
295 			if (i == 0 || i == 13)
296 				continue;	/* IRQ0 & IRQ13 not connected */
297 			fallthrough;
298 		default:
299 			if (i == 2)
300 				continue;	/* IRQ2 is never connected */
301 		}
302 
303 		if (ELCR_fallback) {
304 			/*
305 			 *  If the ELCR indicates a level-sensitive interrupt, we
306 			 *  copy that information over to the MP table in the
307 			 *  irqflag field (level sensitive, active high polarity).
308 			 */
309 			if (ELCR_trigger(i)) {
310 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
311 						 MP_IRQPOL_ACTIVE_HIGH;
312 			} else {
313 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
314 						 MP_IRQPOL_DEFAULT;
315 			}
316 		}
317 
318 		intsrc.srcbusirq = i;
319 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
320 		mp_save_irq(&intsrc);
321 	}
322 
323 	intsrc.irqtype = mp_ExtINT;
324 	intsrc.srcbusirq = 0;
325 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
326 	mp_save_irq(&intsrc);
327 }
328 
329 
330 static void __init construct_ioapic_table(int mpc_default_type)
331 {
332 	struct mpc_ioapic ioapic;
333 	struct mpc_bus bus;
334 
335 	bus.type = MP_BUS;
336 	bus.busid = 0;
337 	switch (mpc_default_type) {
338 	default:
339 		pr_err("???\nUnknown standard configuration %d\n",
340 		       mpc_default_type);
341 		fallthrough;
342 	case 1:
343 	case 5:
344 		memcpy(bus.bustype, "ISA   ", 6);
345 		break;
346 	case 2:
347 	case 6:
348 	case 3:
349 		memcpy(bus.bustype, "EISA  ", 6);
350 		break;
351 	}
352 	MP_bus_info(&bus);
353 	if (mpc_default_type > 4) {
354 		bus.busid = 1;
355 		memcpy(bus.bustype, "PCI   ", 6);
356 		MP_bus_info(&bus);
357 	}
358 
359 	ioapic.type	= MP_IOAPIC;
360 	ioapic.apicid	= 2;
361 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
362 	ioapic.flags	= MPC_APIC_USABLE;
363 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
364 	MP_ioapic_info(&ioapic);
365 
366 	/*
367 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
368 	 */
369 	construct_default_ioirq_mptable(mpc_default_type);
370 }
371 #else
372 static inline void __init construct_ioapic_table(int mpc_default_type) { }
373 #endif
374 
375 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
376 {
377 	struct mpc_cpu processor;
378 	struct mpc_lintsrc lintsrc;
379 	int linttypes[2] = { mp_ExtINT, mp_NMI };
380 	int i;
381 
382 	/*
383 	 * local APIC has default address
384 	 */
385 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
386 
387 	/*
388 	 * 2 CPUs, numbered 0 & 1.
389 	 */
390 	processor.type = MP_PROCESSOR;
391 	/* Either an integrated APIC or a discrete 82489DX. */
392 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
393 	processor.cpuflag = CPU_ENABLED;
394 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
395 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
396 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
397 	processor.reserved[0] = 0;
398 	processor.reserved[1] = 0;
399 	for (i = 0; i < 2; i++) {
400 		processor.apicid = i;
401 		MP_processor_info(&processor);
402 	}
403 
404 	construct_ioapic_table(mpc_default_type);
405 
406 	lintsrc.type = MP_LINTSRC;
407 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
408 	lintsrc.srcbusid = 0;
409 	lintsrc.srcbusirq = 0;
410 	lintsrc.destapic = MP_APIC_ALL;
411 	for (i = 0; i < 2; i++) {
412 		lintsrc.irqtype = linttypes[i];
413 		lintsrc.destapiclint = i;
414 		MP_lintsrc_info(&lintsrc);
415 	}
416 }
417 
418 static unsigned long mpf_base;
419 static bool mpf_found;
420 
421 static unsigned long __init get_mpc_size(unsigned long physptr)
422 {
423 	struct mpc_table *mpc;
424 	unsigned long size;
425 
426 	mpc = early_memremap(physptr, PAGE_SIZE);
427 	size = mpc->length;
428 	early_memunmap(mpc, PAGE_SIZE);
429 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
430 
431 	return size;
432 }
433 
434 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
435 {
436 	struct mpc_table *mpc;
437 	unsigned long size;
438 
439 	size = get_mpc_size(mpf->physptr);
440 	mpc = early_memremap(mpf->physptr, size);
441 
442 	/*
443 	 * Read the physical hardware table.  Anything here will
444 	 * override the defaults.
445 	 */
446 	if (!smp_read_mpc(mpc, early)) {
447 #ifdef CONFIG_X86_LOCAL_APIC
448 		smp_found_config = 0;
449 #endif
450 		pr_err("BIOS bug, MP table errors detected!...\n");
451 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
452 		early_memunmap(mpc, size);
453 		return -1;
454 	}
455 	early_memunmap(mpc, size);
456 
457 	if (early)
458 		return -1;
459 
460 #ifdef CONFIG_X86_IO_APIC
461 	/*
462 	 * If there are no explicit MP IRQ entries, then we are
463 	 * broken.  We set up most of the low 16 IO-APIC pins to
464 	 * ISA defaults and hope it will work.
465 	 */
466 	if (!mp_irq_entries) {
467 		struct mpc_bus bus;
468 
469 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
470 
471 		bus.type = MP_BUS;
472 		bus.busid = 0;
473 		memcpy(bus.bustype, "ISA   ", 6);
474 		MP_bus_info(&bus);
475 
476 		construct_default_ioirq_mptable(0);
477 	}
478 #endif
479 
480 	return 0;
481 }
482 
483 /*
484  * Scan the memory blocks for an SMP configuration block.
485  */
486 void __init default_get_smp_config(unsigned int early)
487 {
488 	struct mpf_intel *mpf;
489 
490 	if (!smp_found_config)
491 		return;
492 
493 	if (!mpf_found)
494 		return;
495 
496 	if (acpi_lapic && early)
497 		return;
498 
499 	/*
500 	 * MPS doesn't support hyperthreading, aka only have
501 	 * thread 0 apic id in MPS table
502 	 */
503 	if (acpi_lapic && acpi_ioapic)
504 		return;
505 
506 	mpf = early_memremap(mpf_base, sizeof(*mpf));
507 	if (!mpf) {
508 		pr_err("MPTABLE: error mapping MP table\n");
509 		return;
510 	}
511 
512 	pr_info("Intel MultiProcessor Specification v1.%d\n",
513 		mpf->specification);
514 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
515 	if (mpf->feature2 & (1 << 7)) {
516 		pr_info("    IMCR and PIC compatibility mode.\n");
517 		pic_mode = 1;
518 	} else {
519 		pr_info("    Virtual Wire compatibility mode.\n");
520 		pic_mode = 0;
521 	}
522 #endif
523 	/*
524 	 * Now see if we need to read further.
525 	 */
526 	if (mpf->feature1) {
527 		if (early) {
528 			/*
529 			 * local APIC has default address
530 			 */
531 			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
532 			goto out;
533 		}
534 
535 		pr_info("Default MP configuration #%d\n", mpf->feature1);
536 		construct_default_ISA_mptable(mpf->feature1);
537 
538 	} else if (mpf->physptr) {
539 		if (check_physptr(mpf, early))
540 			goto out;
541 	} else
542 		BUG();
543 
544 	if (!early)
545 		pr_info("Processors: %d\n", num_processors);
546 	/*
547 	 * Only use the first configuration found.
548 	 */
549 out:
550 	early_memunmap(mpf, sizeof(*mpf));
551 }
552 
553 static void __init smp_reserve_memory(struct mpf_intel *mpf)
554 {
555 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
556 }
557 
558 static int __init smp_scan_config(unsigned long base, unsigned long length)
559 {
560 	unsigned int *bp;
561 	struct mpf_intel *mpf;
562 	int ret = 0;
563 
564 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
565 		    base, base + length - 1);
566 	BUILD_BUG_ON(sizeof(*mpf) != 16);
567 
568 	while (length > 0) {
569 		bp = early_memremap(base, length);
570 		mpf = (struct mpf_intel *)bp;
571 		if ((*bp == SMP_MAGIC_IDENT) &&
572 		    (mpf->length == 1) &&
573 		    !mpf_checksum((unsigned char *)bp, 16) &&
574 		    ((mpf->specification == 1)
575 		     || (mpf->specification == 4))) {
576 #ifdef CONFIG_X86_LOCAL_APIC
577 			smp_found_config = 1;
578 #endif
579 			mpf_base = base;
580 			mpf_found = true;
581 
582 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
583 				base, base + sizeof(*mpf) - 1);
584 
585 			memblock_reserve(base, sizeof(*mpf));
586 			if (mpf->physptr)
587 				smp_reserve_memory(mpf);
588 
589 			ret = 1;
590 		}
591 		early_memunmap(bp, length);
592 
593 		if (ret)
594 			break;
595 
596 		base += 16;
597 		length -= 16;
598 	}
599 	return ret;
600 }
601 
602 void __init default_find_smp_config(void)
603 {
604 	unsigned int address;
605 
606 	/*
607 	 * FIXME: Linux assumes you have 640K of base ram..
608 	 * this continues the error...
609 	 *
610 	 * 1) Scan the bottom 1K for a signature
611 	 * 2) Scan the top 1K of base RAM
612 	 * 3) Scan the 64K of bios
613 	 */
614 	if (smp_scan_config(0x0, 0x400) ||
615 	    smp_scan_config(639 * 0x400, 0x400) ||
616 	    smp_scan_config(0xF0000, 0x10000))
617 		return;
618 	/*
619 	 * If it is an SMP machine we should know now, unless the
620 	 * configuration is in an EISA bus machine with an
621 	 * extended bios data area.
622 	 *
623 	 * there is a real-mode segmented pointer pointing to the
624 	 * 4K EBDA area at 0x40E, calculate and scan it here.
625 	 *
626 	 * NOTE! There are Linux loaders that will corrupt the EBDA
627 	 * area, and as such this kind of SMP config may be less
628 	 * trustworthy, simply because the SMP table may have been
629 	 * stomped on during early boot. These loaders are buggy and
630 	 * should be fixed.
631 	 *
632 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
633 	 */
634 
635 	address = get_bios_ebda();
636 	if (address)
637 		smp_scan_config(address, 0x400);
638 }
639 
640 #ifdef CONFIG_X86_IO_APIC
641 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
642 
643 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
644 {
645 	int i;
646 
647 	if (m->irqtype != mp_INT)
648 		return 0;
649 
650 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
651 		return 0;
652 
653 	/* not legacy */
654 
655 	for (i = 0; i < mp_irq_entries; i++) {
656 		if (mp_irqs[i].irqtype != mp_INT)
657 			continue;
658 
659 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
660 					   MP_IRQPOL_ACTIVE_LOW))
661 			continue;
662 
663 		if (mp_irqs[i].srcbus != m->srcbus)
664 			continue;
665 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
666 			continue;
667 		if (irq_used[i]) {
668 			/* already claimed */
669 			return -2;
670 		}
671 		irq_used[i] = 1;
672 		return i;
673 	}
674 
675 	/* not found */
676 	return -1;
677 }
678 
679 #define SPARE_SLOT_NUM 20
680 
681 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
682 
683 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
684 {
685 	int i;
686 
687 	apic_printk(APIC_VERBOSE, "OLD ");
688 	print_mp_irq_info(m);
689 
690 	i = get_MP_intsrc_index(m);
691 	if (i > 0) {
692 		memcpy(m, &mp_irqs[i], sizeof(*m));
693 		apic_printk(APIC_VERBOSE, "NEW ");
694 		print_mp_irq_info(&mp_irqs[i]);
695 		return;
696 	}
697 	if (!i) {
698 		/* legacy, do nothing */
699 		return;
700 	}
701 	if (*nr_m_spare < SPARE_SLOT_NUM) {
702 		/*
703 		 * not found (-1), or duplicated (-2) are invalid entries,
704 		 * we need to use the slot later
705 		 */
706 		m_spare[*nr_m_spare] = m;
707 		*nr_m_spare += 1;
708 	}
709 }
710 
711 static int __init
712 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
713 {
714 	if (!mpc_new_phys || count <= mpc_new_length) {
715 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
716 		return -1;
717 	}
718 
719 	return 0;
720 }
721 #else /* CONFIG_X86_IO_APIC */
722 static
723 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
724 #endif /* CONFIG_X86_IO_APIC */
725 
726 static int  __init replace_intsrc_all(struct mpc_table *mpc,
727 					unsigned long mpc_new_phys,
728 					unsigned long mpc_new_length)
729 {
730 #ifdef CONFIG_X86_IO_APIC
731 	int i;
732 #endif
733 	int count = sizeof(*mpc);
734 	int nr_m_spare = 0;
735 	unsigned char *mpt = ((unsigned char *)mpc) + count;
736 
737 	pr_info("mpc_length %x\n", mpc->length);
738 	while (count < mpc->length) {
739 		switch (*mpt) {
740 		case MP_PROCESSOR:
741 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
742 			break;
743 		case MP_BUS:
744 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
745 			break;
746 		case MP_IOAPIC:
747 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
748 			break;
749 		case MP_INTSRC:
750 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
751 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
752 			break;
753 		case MP_LINTSRC:
754 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
755 			break;
756 		default:
757 			/* wrong mptable */
758 			smp_dump_mptable(mpc, mpt);
759 			goto out;
760 		}
761 	}
762 
763 #ifdef CONFIG_X86_IO_APIC
764 	for (i = 0; i < mp_irq_entries; i++) {
765 		if (irq_used[i])
766 			continue;
767 
768 		if (mp_irqs[i].irqtype != mp_INT)
769 			continue;
770 
771 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
772 					   MP_IRQPOL_ACTIVE_LOW))
773 			continue;
774 
775 		if (nr_m_spare > 0) {
776 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
777 			nr_m_spare--;
778 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
779 			m_spare[nr_m_spare] = NULL;
780 		} else {
781 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
782 			count += sizeof(struct mpc_intsrc);
783 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
784 				goto out;
785 			memcpy(m, &mp_irqs[i], sizeof(*m));
786 			mpc->length = count;
787 			mpt += sizeof(struct mpc_intsrc);
788 		}
789 		print_mp_irq_info(&mp_irqs[i]);
790 	}
791 #endif
792 out:
793 	/* update checksum */
794 	mpc->checksum = 0;
795 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
796 
797 	return 0;
798 }
799 
800 int enable_update_mptable;
801 
802 static int __init update_mptable_setup(char *str)
803 {
804 	enable_update_mptable = 1;
805 #ifdef CONFIG_PCI
806 	pci_routeirq = 1;
807 #endif
808 	return 0;
809 }
810 early_param("update_mptable", update_mptable_setup);
811 
812 static unsigned long __initdata mpc_new_phys;
813 static unsigned long mpc_new_length __initdata = 4096;
814 
815 /* alloc_mptable or alloc_mptable=4k */
816 static int __initdata alloc_mptable;
817 static int __init parse_alloc_mptable_opt(char *p)
818 {
819 	enable_update_mptable = 1;
820 #ifdef CONFIG_PCI
821 	pci_routeirq = 1;
822 #endif
823 	alloc_mptable = 1;
824 	if (!p)
825 		return 0;
826 	mpc_new_length = memparse(p, &p);
827 	return 0;
828 }
829 early_param("alloc_mptable", parse_alloc_mptable_opt);
830 
831 void __init e820__memblock_alloc_reserved_mpc_new(void)
832 {
833 	if (enable_update_mptable && alloc_mptable)
834 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
835 }
836 
837 static int __init update_mp_table(void)
838 {
839 	char str[16];
840 	char oem[10];
841 	struct mpf_intel *mpf;
842 	struct mpc_table *mpc, *mpc_new;
843 	unsigned long size;
844 
845 	if (!enable_update_mptable)
846 		return 0;
847 
848 	if (!mpf_found)
849 		return 0;
850 
851 	mpf = early_memremap(mpf_base, sizeof(*mpf));
852 	if (!mpf) {
853 		pr_err("MPTABLE: mpf early_memremap() failed\n");
854 		return 0;
855 	}
856 
857 	/*
858 	 * Now see if we need to go further.
859 	 */
860 	if (mpf->feature1)
861 		goto do_unmap_mpf;
862 
863 	if (!mpf->physptr)
864 		goto do_unmap_mpf;
865 
866 	size = get_mpc_size(mpf->physptr);
867 	mpc = early_memremap(mpf->physptr, size);
868 	if (!mpc) {
869 		pr_err("MPTABLE: mpc early_memremap() failed\n");
870 		goto do_unmap_mpf;
871 	}
872 
873 	if (!smp_check_mpc(mpc, oem, str))
874 		goto do_unmap_mpc;
875 
876 	pr_info("mpf: %llx\n", (u64)mpf_base);
877 	pr_info("physptr: %x\n", mpf->physptr);
878 
879 	if (mpc_new_phys && mpc->length > mpc_new_length) {
880 		mpc_new_phys = 0;
881 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
882 			mpc_new_length);
883 	}
884 
885 	if (!mpc_new_phys) {
886 		unsigned char old, new;
887 		/* check if we can change the position */
888 		mpc->checksum = 0;
889 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
890 		mpc->checksum = 0xff;
891 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
892 		if (old == new) {
893 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
894 			goto do_unmap_mpc;
895 		}
896 		pr_info("use in-position replacing\n");
897 	} else {
898 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
899 		if (!mpc_new) {
900 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
901 			goto do_unmap_mpc;
902 		}
903 		mpf->physptr = mpc_new_phys;
904 		memcpy(mpc_new, mpc, mpc->length);
905 		early_memunmap(mpc, size);
906 		mpc = mpc_new;
907 		size = mpc_new_length;
908 		/* check if we can modify that */
909 		if (mpc_new_phys - mpf->physptr) {
910 			struct mpf_intel *mpf_new;
911 			/* steal 16 bytes from [0, 1k) */
912 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
913 			if (!mpf_new) {
914 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
915 				goto do_unmap_mpc;
916 			}
917 			pr_info("mpf new: %x\n", 0x400 - 16);
918 			memcpy(mpf_new, mpf, 16);
919 			early_memunmap(mpf, sizeof(*mpf));
920 			mpf = mpf_new;
921 			mpf->physptr = mpc_new_phys;
922 		}
923 		mpf->checksum = 0;
924 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
925 		pr_info("physptr new: %x\n", mpf->physptr);
926 	}
927 
928 	/*
929 	 * only replace the one with mp_INT and
930 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
931 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
932 	 * may need pci=routeirq for all coverage
933 	 */
934 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
935 
936 do_unmap_mpc:
937 	early_memunmap(mpc, size);
938 
939 do_unmap_mpf:
940 	early_memunmap(mpf, sizeof(*mpf));
941 
942 	return 0;
943 }
944 
945 late_initcall(update_mp_table);
946