xref: /freebsd/sys/kern/subr_smp.c (revision 37b087a645354d60200c774d51b305b268e41c83)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_smp.h"
29 #include "opt_cpu.h"
30 #include "opt_user_ldt.h"
31 
32 #ifdef SMP
33 #include <machine/smptests.h>
34 #else
35 #error
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #ifdef BETTER_CLOCK
46 #include <sys/dkstat.h>
47 #endif
48 #include <sys/cons.h>	/* cngetc() */
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_extern.h>
55 #ifdef BETTER_CLOCK
56 #include <sys/lock.h>
57 #include <vm/vm_map.h>
58 #include <sys/user.h>
59 #ifdef GPROF
60 #include <sys/gmon.h>
61 #endif
62 #endif
63 
64 #include <machine/smp.h>
65 #include <machine/apic.h>
66 #include <machine/atomic.h>
67 #include <machine/cpufunc.h>
68 #include <machine/mpapic.h>
69 #include <machine/psl.h>
70 #include <machine/segments.h>
71 #include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
72 #include <machine/tss.h>
73 #include <machine/specialreg.h>
74 #include <machine/globaldata.h>
75 
76 #if defined(APIC_IO)
77 #include <machine/md_var.h>		/* setidt() */
78 #include <i386/isa/icu.h>		/* IPIs */
79 #include <i386/isa/intr_machdep.h>	/* IPIs */
80 #endif	/* APIC_IO */
81 
82 #if defined(TEST_DEFAULT_CONFIG)
83 #define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
84 #else
85 #define MPFPS_MPFB1	mpfps->mpfb1
86 #endif  /* TEST_DEFAULT_CONFIG */
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
90 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
91 
92 #ifdef PC98
93 #define BIOS_BASE		(0xe8000)
94 #define BIOS_SIZE		(0x18000)
95 #else
96 #define BIOS_BASE		(0xf0000)
97 #define BIOS_SIZE		(0x10000)
98 #endif
99 #define BIOS_COUNT		(BIOS_SIZE/4)
100 
101 #define CMOS_REG		(0x70)
102 #define CMOS_DATA		(0x71)
103 #define BIOS_RESET		(0x0f)
104 #define BIOS_WARM		(0x0a)
105 
106 #define PROCENTRY_FLAG_EN	0x01
107 #define PROCENTRY_FLAG_BP	0x02
108 #define IOAPICENTRY_FLAG_EN	0x01
109 
110 
111 /* MP Floating Pointer Structure */
112 typedef struct MPFPS {
113 	char    signature[4];
114 	void   *pap;
115 	u_char  length;
116 	u_char  spec_rev;
117 	u_char  checksum;
118 	u_char  mpfb1;
119 	u_char  mpfb2;
120 	u_char  mpfb3;
121 	u_char  mpfb4;
122 	u_char  mpfb5;
123 }      *mpfps_t;
124 
125 /* MP Configuration Table Header */
126 typedef struct MPCTH {
127 	char    signature[4];
128 	u_short base_table_length;
129 	u_char  spec_rev;
130 	u_char  checksum;
131 	u_char  oem_id[8];
132 	u_char  product_id[12];
133 	void   *oem_table_pointer;
134 	u_short oem_table_size;
135 	u_short entry_count;
136 	void   *apic_address;
137 	u_short extended_table_length;
138 	u_char  extended_table_checksum;
139 	u_char  reserved;
140 }      *mpcth_t;
141 
142 
143 typedef struct PROCENTRY {
144 	u_char  type;
145 	u_char  apic_id;
146 	u_char  apic_version;
147 	u_char  cpu_flags;
148 	u_long  cpu_signature;
149 	u_long  feature_flags;
150 	u_long  reserved1;
151 	u_long  reserved2;
152 }      *proc_entry_ptr;
153 
154 typedef struct BUSENTRY {
155 	u_char  type;
156 	u_char  bus_id;
157 	char    bus_type[6];
158 }      *bus_entry_ptr;
159 
160 typedef struct IOAPICENTRY {
161 	u_char  type;
162 	u_char  apic_id;
163 	u_char  apic_version;
164 	u_char  apic_flags;
165 	void   *apic_address;
166 }      *io_apic_entry_ptr;
167 
168 typedef struct INTENTRY {
169 	u_char  type;
170 	u_char  int_type;
171 	u_short int_flags;
172 	u_char  src_bus_id;
173 	u_char  src_bus_irq;
174 	u_char  dst_apic_id;
175 	u_char  dst_apic_int;
176 }      *int_entry_ptr;
177 
178 /* descriptions of MP basetable entries */
179 typedef struct BASETABLE_ENTRY {
180 	u_char  type;
181 	u_char  length;
182 	char    name[16];
183 }       basetable_entry;
184 
185 /*
186  * this code MUST be enabled here and in mpboot.s.
187  * it follows the very early stages of AP boot by placing values in CMOS ram.
188  * it NORMALLY will never be needed and thus the primitive method for enabling.
189  *
190 #define CHECK_POINTS
191  */
192 
193 #if defined(CHECK_POINTS) && !defined(PC98)
194 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
195 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
196 
197 #define CHECK_INIT(D);				\
198 	CHECK_WRITE(0x34, (D));			\
199 	CHECK_WRITE(0x35, (D));			\
200 	CHECK_WRITE(0x36, (D));			\
201 	CHECK_WRITE(0x37, (D));			\
202 	CHECK_WRITE(0x38, (D));			\
203 	CHECK_WRITE(0x39, (D));
204 
205 #define CHECK_PRINT(S);				\
206 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
207 	   (S),					\
208 	   CHECK_READ(0x34),			\
209 	   CHECK_READ(0x35),			\
210 	   CHECK_READ(0x36),			\
211 	   CHECK_READ(0x37),			\
212 	   CHECK_READ(0x38),			\
213 	   CHECK_READ(0x39));
214 
215 #else				/* CHECK_POINTS */
216 
217 #define CHECK_INIT(D)
218 #define CHECK_PRINT(S)
219 
220 #endif				/* CHECK_POINTS */
221 
222 /*
223  * Values to send to the POST hardware.
224  */
225 #define MP_BOOTADDRESS_POST	0x10
226 #define MP_PROBE_POST		0x11
227 #define MPTABLE_PASS1_POST	0x12
228 
229 #define MP_START_POST		0x13
230 #define MP_ENABLE_POST		0x14
231 #define MPTABLE_PASS2_POST	0x15
232 
233 #define START_ALL_APS_POST	0x16
234 #define INSTALL_AP_TRAMP_POST	0x17
235 #define START_AP_POST		0x18
236 
237 #define MP_ANNOUNCE_POST	0x19
238 
239 
240 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
241 int	current_postcode;
242 
243 /** XXX FIXME: what system files declare these??? */
244 extern struct region_descriptor r_gdt, r_idt;
245 
246 int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
247 int	mp_ncpus;		/* # of CPUs, including BSP */
248 int	mp_naps;		/* # of Applications processors */
249 int	mp_nbusses;		/* # of busses */
250 int	mp_napics;		/* # of IO APICs */
251 int	boot_cpu_id;		/* designated BSP */
252 vm_offset_t cpu_apic_address;
253 vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
254 extern	int nkpt;
255 
256 u_int32_t cpu_apic_versions[NCPU];
257 u_int32_t io_apic_versions[NAPIC];
258 
259 #ifdef APIC_INTR_DIAGNOSTIC
260 int apic_itrace_enter[32];
261 int apic_itrace_tryisrlock[32];
262 int apic_itrace_gotisrlock[32];
263 int apic_itrace_active[32];
264 int apic_itrace_masked[32];
265 int apic_itrace_noisrlock[32];
266 int apic_itrace_masked2[32];
267 int apic_itrace_unmask[32];
268 int apic_itrace_noforward[32];
269 int apic_itrace_leave[32];
270 int apic_itrace_enter2[32];
271 int apic_itrace_doreti[32];
272 int apic_itrace_splz[32];
273 int apic_itrace_eoi[32];
274 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
275 unsigned short apic_itrace_debugbuffer[32768];
276 int apic_itrace_debugbuffer_idx;
277 struct simplelock apic_itrace_debuglock;
278 #endif
279 #endif
280 
281 #ifdef APIC_INTR_REORDER
282 struct {
283 	volatile int *location;
284 	int bit;
285 } apic_isrbit_location[32];
286 #endif
287 
288 struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
289 
290 /*
291  * APIC ID logical/physical mapping structures.
292  * We oversize these to simplify boot-time config.
293  */
294 int     cpu_num_to_apic_id[NAPICID];
295 int     io_num_to_apic_id[NAPICID];
296 int     apic_id_to_logical[NAPICID];
297 
298 
299 /* Bitmap of all available CPUs */
300 u_int	all_cpus;
301 
302 /* AP uses this during bootstrap.  Do not staticize.  */
303 char *bootSTK;
304 static int bootAP;
305 
306 /* Hotwire a 0->4MB V==P mapping */
307 extern pt_entry_t *KPTphys;
308 
309 /* SMP page table page */
310 extern pt_entry_t *SMPpt;
311 
312 struct pcb stoppcbs[NCPU];
313 
314 int smp_started;		/* has the system started? */
315 
316 /*
317  * Local data and functions.
318  */
319 
320 static int	mp_capable;
321 static u_int	boot_address;
322 static u_int	base_memory;
323 
324 static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
325 static mpfps_t	mpfps;
326 static int	search_for_sig(u_int32_t target, int count);
327 static void	mp_enable(u_int boot_addr);
328 
329 static int	mptable_pass1(void);
330 static int	mptable_pass2(void);
331 static void	default_mp_table(int type);
332 static void	fix_mp_table(void);
333 static void	setup_apic_irq_mapping(void);
334 static void	init_locks(void);
335 static int	start_all_aps(u_int boot_addr);
336 static void	install_ap_tramp(u_int boot_addr);
337 static int	start_ap(int logicalCpu, u_int boot_addr);
338 static int	apic_int_is_bus_type(int intr, int bus_type);
339 
340 /*
341  * Calculate usable address in base memory for AP trampoline code.
342  */
343 u_int
344 mp_bootaddress(u_int basemem)
345 {
346 	POSTCODE(MP_BOOTADDRESS_POST);
347 
348 	base_memory = basemem * 1024;	/* convert to bytes */
349 
350 	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
351 	if ((base_memory - boot_address) < bootMP_size)
352 		boot_address -= 4096;	/* not enough, lower by 4k */
353 
354 	return boot_address;
355 }
356 
357 
358 /*
359  * Look for an Intel MP spec table (ie, SMP capable hardware).
360  */
361 int
362 mp_probe(void)
363 {
364 	int     x;
365 	u_long  segment;
366 	u_int32_t target;
367 
368 	POSTCODE(MP_PROBE_POST);
369 
370 	/* see if EBDA exists */
371 	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
372 		/* search first 1K of EBDA */
373 		target = (u_int32_t) (segment << 4);
374 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
375 			goto found;
376 	} else {
377 		/* last 1K of base memory, effective 'top of base' passed in */
378 		target = (u_int32_t) (base_memory - 0x400);
379 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
380 			goto found;
381 	}
382 
383 	/* search the BIOS */
384 	target = (u_int32_t) BIOS_BASE;
385 	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
386 		goto found;
387 
388 	/* nothing found */
389 	mpfps = (mpfps_t)0;
390 	mp_capable = 0;
391 	return 0;
392 
393 found:
394 	/* calculate needed resources */
395 	mpfps = (mpfps_t)x;
396 	if (mptable_pass1())
397 		panic("you must reconfigure your kernel");
398 
399 	/* flag fact that we are running multiple processors */
400 	mp_capable = 1;
401 	return 1;
402 }
403 
404 
405 /*
406  * Startup the SMP processors.
407  */
408 void
409 mp_start(void)
410 {
411 	POSTCODE(MP_START_POST);
412 
413 	/* look for MP capable motherboard */
414 	if (mp_capable)
415 		mp_enable(boot_address);
416 	else
417 		panic("MP hardware not found!");
418 }
419 
420 
421 /*
422  * Print various information about the SMP system hardware and setup.
423  */
424 void
425 mp_announce(void)
426 {
427 	int     x;
428 
429 	POSTCODE(MP_ANNOUNCE_POST);
430 
431 	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
432 	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
433 	printf(", version: 0x%08x", cpu_apic_versions[0]);
434 	printf(", at 0x%08x\n", cpu_apic_address);
435 	for (x = 1; x <= mp_naps; ++x) {
436 		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
437 		printf(", version: 0x%08x", cpu_apic_versions[x]);
438 		printf(", at 0x%08x\n", cpu_apic_address);
439 	}
440 
441 #if defined(APIC_IO)
442 	for (x = 0; x < mp_napics; ++x) {
443 		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
444 		printf(", version: 0x%08x", io_apic_versions[x]);
445 		printf(", at 0x%08x\n", io_apic_address[x]);
446 	}
447 #else
448 	printf(" Warning: APIC I/O disabled\n");
449 #endif	/* APIC_IO */
450 }
451 
452 /*
453  * AP cpu's call this to sync up protected mode.
454  */
455 void
456 init_secondary(void)
457 {
458 	int	gsel_tss;
459 	int	x, myid = bootAP;
460 
461 	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
462 	gdt_segs[GPROC0_SEL].ssd_base =
463 		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
464 	SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid];
465 
466 	for (x = 0; x < NGDT; x++) {
467 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
468 	}
469 
470 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
471 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
472 	lgdt(&r_gdt);			/* does magic intra-segment return */
473 
474 	lidt(&r_idt);
475 
476 	lldt(_default_ldt);
477 #ifdef USER_LDT
478 	currentldt = _default_ldt;
479 #endif
480 
481 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
482 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
483 	common_tss.tss_esp0 = 0;	/* not used until after switch */
484 	common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
485 	common_tss.tss_ioopt = (sizeof common_tss) << 16;
486 	tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
487 	common_tssd = *tss_gdt;
488 	ltr(gsel_tss);
489 
490 	pmap_set_opt();
491 }
492 
493 
494 #if defined(APIC_IO)
495 /*
496  * Final configuration of the BSP's local APIC:
497  *  - disable 'pic mode'.
498  *  - disable 'virtual wire mode'.
499  *  - enable NMI.
500  */
501 void
502 bsp_apic_configure(void)
503 {
504 	u_char		byte;
505 	u_int32_t	temp;
506 
507 	/* leave 'pic mode' if necessary */
508 	if (picmode) {
509 		outb(0x22, 0x70);	/* select IMCR */
510 		byte = inb(0x23);	/* current contents */
511 		byte |= 0x01;		/* mask external INTR */
512 		outb(0x23, byte);	/* disconnect 8259s/NMI */
513 	}
514 
515 	/* mask lint0 (the 8259 'virtual wire' connection) */
516 	temp = lapic.lvt_lint0;
517 	temp |= APIC_LVT_M;		/* set the mask */
518 	lapic.lvt_lint0 = temp;
519 
520         /* setup lint1 to handle NMI */
521         temp = lapic.lvt_lint1;
522         temp &= ~APIC_LVT_M;		/* clear the mask */
523         lapic.lvt_lint1 = temp;
524 
525 	if (bootverbose)
526 		apic_dump("bsp_apic_configure()");
527 }
528 #endif  /* APIC_IO */
529 
530 
531 /*******************************************************************
532  * local functions and data
533  */
534 
535 /*
536  * start the SMP system
537  */
538 static void
539 mp_enable(u_int boot_addr)
540 {
541 	int     x;
542 #if defined(APIC_IO)
543 	int     apic;
544 	u_int   ux;
545 #endif	/* APIC_IO */
546 
547 	POSTCODE(MP_ENABLE_POST);
548 
549 	/* turn on 4MB of V == P addressing so we can get to MP table */
550 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
551 	invltlb();
552 
553 	/* examine the MP table for needed info, uses physical addresses */
554 	x = mptable_pass2();
555 
556 	*(int *)PTD = 0;
557 	invltlb();
558 
559 	/* can't process default configs till the CPU APIC is pmapped */
560 	if (x)
561 		default_mp_table(x);
562 
563 	/* post scan cleanup */
564 	fix_mp_table();
565 	setup_apic_irq_mapping();
566 
567 #if defined(APIC_IO)
568 
569 	/* fill the LOGICAL io_apic_versions table */
570 	for (apic = 0; apic < mp_napics; ++apic) {
571 		ux = io_apic_read(apic, IOAPIC_VER);
572 		io_apic_versions[apic] = ux;
573 		io_apic_set_id(apic, IO_TO_ID(apic));
574 	}
575 
576 	/* program each IO APIC in the system */
577 	for (apic = 0; apic < mp_napics; ++apic)
578 		if (io_apic_setup(apic) < 0)
579 			panic("IO APIC setup failure");
580 
581 	/* install a 'Spurious INTerrupt' vector */
582 	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
583 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
584 
585 	/* install an inter-CPU IPI for TLB invalidation */
586 	setidt(XINVLTLB_OFFSET, Xinvltlb,
587 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
588 
589 #ifdef BETTER_CLOCK
590 	/* install an inter-CPU IPI for reading processor state */
591 	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
592 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
593 #endif
594 
595 	/* install an inter-CPU IPI for all-CPU rendezvous */
596 	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
597 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
598 
599 	/* install an inter-CPU IPI for forcing an additional software trap */
600 	setidt(XCPUAST_OFFSET, Xcpuast,
601 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
602 
603 	/* install an inter-CPU IPI for interrupt forwarding */
604 	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
605 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
606 
607 	/* install an inter-CPU IPI for CPU stop/restart */
608 	setidt(XCPUSTOP_OFFSET, Xcpustop,
609 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
610 
611 #if defined(TEST_TEST1)
612 	/* install a "fake hardware INTerrupt" vector */
613 	setidt(XTEST1_OFFSET, Xtest1,
614 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
615 #endif  /** TEST_TEST1 */
616 
617 #endif	/* APIC_IO */
618 
619 	/* initialize all SMP locks */
620 	init_locks();
621 
622 	/* start each Application Processor */
623 	start_all_aps(boot_addr);
624 }
625 
626 
627 /*
628  * look for the MP spec signature
629  */
630 
631 /* string defined by the Intel MP Spec as identifying the MP table */
632 #define MP_SIG		0x5f504d5f	/* _MP_ */
633 #define NEXT(X)		((X) += 4)
634 static int
635 search_for_sig(u_int32_t target, int count)
636 {
637 	int     x;
638 	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
639 
640 	for (x = 0; x < count; NEXT(x))
641 		if (addr[x] == MP_SIG)
642 			/* make array index a byte index */
643 			return (target + (x * sizeof(u_int32_t)));
644 
645 	return -1;
646 }
647 
648 
649 static basetable_entry basetable_entry_types[] =
650 {
651 	{0, 20, "Processor"},
652 	{1, 8, "Bus"},
653 	{2, 8, "I/O APIC"},
654 	{3, 8, "I/O INT"},
655 	{4, 8, "Local INT"}
656 };
657 
658 typedef struct BUSDATA {
659 	u_char  bus_id;
660 	enum busTypes bus_type;
661 }       bus_datum;
662 
663 typedef struct INTDATA {
664 	u_char  int_type;
665 	u_short int_flags;
666 	u_char  src_bus_id;
667 	u_char  src_bus_irq;
668 	u_char  dst_apic_id;
669 	u_char  dst_apic_int;
670 	u_char	int_vector;
671 }       io_int, local_int;
672 
673 typedef struct BUSTYPENAME {
674 	u_char  type;
675 	char    name[7];
676 }       bus_type_name;
677 
678 static bus_type_name bus_type_table[] =
679 {
680 	{CBUS, "CBUS"},
681 	{CBUSII, "CBUSII"},
682 	{EISA, "EISA"},
683 	{MCA, "MCA"},
684 	{UNKNOWN_BUSTYPE, "---"},
685 	{ISA, "ISA"},
686 	{MCA, "MCA"},
687 	{UNKNOWN_BUSTYPE, "---"},
688 	{UNKNOWN_BUSTYPE, "---"},
689 	{UNKNOWN_BUSTYPE, "---"},
690 	{UNKNOWN_BUSTYPE, "---"},
691 	{UNKNOWN_BUSTYPE, "---"},
692 	{PCI, "PCI"},
693 	{UNKNOWN_BUSTYPE, "---"},
694 	{UNKNOWN_BUSTYPE, "---"},
695 	{UNKNOWN_BUSTYPE, "---"},
696 	{UNKNOWN_BUSTYPE, "---"},
697 	{XPRESS, "XPRESS"},
698 	{UNKNOWN_BUSTYPE, "---"}
699 };
700 /* from MP spec v1.4, table 5-1 */
701 static int default_data[7][5] =
702 {
703 /*   nbus, id0, type0, id1, type1 */
704 	{1, 0, ISA, 255, 255},
705 	{1, 0, EISA, 255, 255},
706 	{1, 0, EISA, 255, 255},
707 	{1, 0, MCA, 255, 255},
708 	{2, 0, ISA, 1, PCI},
709 	{2, 0, EISA, 1, PCI},
710 	{2, 0, MCA, 1, PCI}
711 };
712 
713 
714 /* the bus data */
715 static bus_datum bus_data[NBUS];
716 
717 /* the IO INT data, one entry per possible APIC INTerrupt */
718 static io_int  io_apic_ints[NINTR];
719 
720 static int nintrs;
721 
722 static int processor_entry	__P((proc_entry_ptr entry, int cpu));
723 static int bus_entry		__P((bus_entry_ptr entry, int bus));
724 static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
725 static int int_entry		__P((int_entry_ptr entry, int intr));
726 static int lookup_bus_type	__P((char *name));
727 
728 
729 /*
730  * 1st pass on motherboard's Intel MP specification table.
731  *
732  * initializes:
733  *	mp_ncpus = 1
734  *
735  * determines:
736  *	cpu_apic_address (common to all CPUs)
737  *	io_apic_address[N]
738  *	mp_naps
739  *	mp_nbusses
740  *	mp_napics
741  *	nintrs
742  */
743 static int
744 mptable_pass1(void)
745 {
746 	int	x;
747 	mpcth_t	cth;
748 	int	totalSize;
749 	void*	position;
750 	int	count;
751 	int	type;
752 	int	mustpanic;
753 
754 	POSTCODE(MPTABLE_PASS1_POST);
755 
756 	mustpanic = 0;
757 
758 	/* clear various tables */
759 	for (x = 0; x < NAPICID; ++x) {
760 		io_apic_address[x] = ~0;	/* IO APIC address table */
761 	}
762 
763 	/* init everything to empty */
764 	mp_naps = 0;
765 	mp_nbusses = 0;
766 	mp_napics = 0;
767 	nintrs = 0;
768 
769 	/* check for use of 'default' configuration */
770 	if (MPFPS_MPFB1 != 0) {
771 		/* use default addresses */
772 		cpu_apic_address = DEFAULT_APIC_BASE;
773 		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
774 
775 		/* fill in with defaults */
776 		mp_naps = 2;		/* includes BSP */
777 		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
778 #if defined(APIC_IO)
779 		mp_napics = 1;
780 		nintrs = 16;
781 #endif	/* APIC_IO */
782 	}
783 	else {
784 		if ((cth = mpfps->pap) == 0)
785 			panic("MP Configuration Table Header MISSING!");
786 
787 		cpu_apic_address = (vm_offset_t) cth->apic_address;
788 
789 		/* walk the table, recording info of interest */
790 		totalSize = cth->base_table_length - sizeof(struct MPCTH);
791 		position = (u_char *) cth + sizeof(struct MPCTH);
792 		count = cth->entry_count;
793 
794 		while (count--) {
795 			switch (type = *(u_char *) position) {
796 			case 0: /* processor_entry */
797 				if (((proc_entry_ptr)position)->cpu_flags
798 					& PROCENTRY_FLAG_EN)
799 					++mp_naps;
800 				break;
801 			case 1: /* bus_entry */
802 				++mp_nbusses;
803 				break;
804 			case 2: /* io_apic_entry */
805 				if (((io_apic_entry_ptr)position)->apic_flags
806 					& IOAPICENTRY_FLAG_EN)
807 					io_apic_address[mp_napics++] =
808 					    (vm_offset_t)((io_apic_entry_ptr)
809 						position)->apic_address;
810 				break;
811 			case 3: /* int_entry */
812 				++nintrs;
813 				break;
814 			case 4:	/* int_entry */
815 				break;
816 			default:
817 				panic("mpfps Base Table HOSED!");
818 				/* NOTREACHED */
819 			}
820 
821 			totalSize -= basetable_entry_types[type].length;
822 			(u_char*)position += basetable_entry_types[type].length;
823 		}
824 	}
825 
826 	/* qualify the numbers */
827 	if (mp_naps > NCPU) {
828 		printf("Warning: only using %d of %d available CPUs!\n",
829 			NCPU, mp_naps);
830 		mp_naps = NCPU;
831 	}
832 	if (mp_nbusses > NBUS) {
833 		printf("found %d busses, increase NBUS\n", mp_nbusses);
834 		mustpanic = 1;
835 	}
836 	if (mp_napics > NAPIC) {
837 		printf("found %d apics, increase NAPIC\n", mp_napics);
838 		mustpanic = 1;
839 	}
840 	if (nintrs > NINTR) {
841 		printf("found %d intrs, increase NINTR\n", nintrs);
842 		mustpanic = 1;
843 	}
844 
845 	/*
846 	 * Count the BSP.
847 	 * This is also used as a counter while starting the APs.
848 	 */
849 	mp_ncpus = 1;
850 
851 	--mp_naps;	/* subtract the BSP */
852 
853 	return mustpanic;
854 }
855 
856 
857 /*
858  * 2nd pass on motherboard's Intel MP specification table.
859  *
860  * sets:
861  *	boot_cpu_id
862  *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
863  *	CPU_TO_ID(N), logical CPU to APIC ID table
864  *	IO_TO_ID(N), logical IO to APIC ID table
865  *	bus_data[N]
866  *	io_apic_ints[N]
867  */
868 static int
869 mptable_pass2(void)
870 {
871 	int     x;
872 	mpcth_t cth;
873 	int     totalSize;
874 	void*   position;
875 	int     count;
876 	int     type;
877 	int     apic, bus, cpu, intr;
878 
879 	POSTCODE(MPTABLE_PASS2_POST);
880 
881 	/* clear various tables */
882 	for (x = 0; x < NAPICID; ++x) {
883 		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
884 		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
885 		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
886 	}
887 
888 	/* clear bus data table */
889 	for (x = 0; x < NBUS; ++x)
890 		bus_data[x].bus_id = 0xff;
891 
892 	/* clear IO APIC INT table */
893 	for (x = 0; x < NINTR; ++x) {
894 		io_apic_ints[x].int_type = 0xff;
895 		io_apic_ints[x].int_vector = 0xff;
896 	}
897 
898 	/* setup the cpu/apic mapping arrays */
899 	boot_cpu_id = -1;
900 
901 	/* record whether PIC or virtual-wire mode */
902 	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
903 
904 	/* check for use of 'default' configuration */
905 	if (MPFPS_MPFB1 != 0)
906 		return MPFPS_MPFB1;	/* return default configuration type */
907 
908 	if ((cth = mpfps->pap) == 0)
909 		panic("MP Configuration Table Header MISSING!");
910 
911 	/* walk the table, recording info of interest */
912 	totalSize = cth->base_table_length - sizeof(struct MPCTH);
913 	position = (u_char *) cth + sizeof(struct MPCTH);
914 	count = cth->entry_count;
915 	apic = bus = intr = 0;
916 	cpu = 1;				/* pre-count the BSP */
917 
918 	while (count--) {
919 		switch (type = *(u_char *) position) {
920 		case 0:
921 			if (processor_entry(position, cpu))
922 				++cpu;
923 			break;
924 		case 1:
925 			if (bus_entry(position, bus))
926 				++bus;
927 			break;
928 		case 2:
929 			if (io_apic_entry(position, apic))
930 				++apic;
931 			break;
932 		case 3:
933 			if (int_entry(position, intr))
934 				++intr;
935 			break;
936 		case 4:
937 			/* int_entry(position); */
938 			break;
939 		default:
940 			panic("mpfps Base Table HOSED!");
941 			/* NOTREACHED */
942 		}
943 
944 		totalSize -= basetable_entry_types[type].length;
945 		(u_char *) position += basetable_entry_types[type].length;
946 	}
947 
948 	if (boot_cpu_id == -1)
949 		panic("NO BSP found!");
950 
951 	/* report fact that its NOT a default configuration */
952 	return 0;
953 }
954 
955 
956 void
957 assign_apic_irq(int apic, int intpin, int irq)
958 {
959 	int x;
960 
961 	if (int_to_apicintpin[irq].ioapic != -1)
962 		panic("assign_apic_irq: inconsistent table");
963 
964 	int_to_apicintpin[irq].ioapic = apic;
965 	int_to_apicintpin[irq].int_pin = intpin;
966 	int_to_apicintpin[irq].apic_address = ioapic[apic];
967 	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
968 
969 	for (x = 0; x < nintrs; x++) {
970 		if ((io_apic_ints[x].int_type == 0 ||
971 		     io_apic_ints[x].int_type == 3) &&
972 		    io_apic_ints[x].int_vector == 0xff &&
973 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
974 		    io_apic_ints[x].dst_apic_int == intpin)
975 			io_apic_ints[x].int_vector = irq;
976 	}
977 }
978 
979 void
980 revoke_apic_irq(int irq)
981 {
982 	int x;
983 	int oldapic;
984 	int oldintpin;
985 
986 	if (int_to_apicintpin[irq].ioapic == -1)
987 		panic("assign_apic_irq: inconsistent table");
988 
989 	oldapic = int_to_apicintpin[irq].ioapic;
990 	oldintpin = int_to_apicintpin[irq].int_pin;
991 
992 	int_to_apicintpin[irq].ioapic = -1;
993 	int_to_apicintpin[irq].int_pin = 0;
994 	int_to_apicintpin[irq].apic_address = NULL;
995 	int_to_apicintpin[irq].redirindex = 0;
996 
997 	for (x = 0; x < nintrs; x++) {
998 		if ((io_apic_ints[x].int_type == 0 ||
999 		     io_apic_ints[x].int_type == 3) &&
1000 		    io_apic_ints[x].int_vector == 0xff &&
1001 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1002 		    io_apic_ints[x].dst_apic_int == oldintpin)
1003 			io_apic_ints[x].int_vector = 0xff;
1004 	}
1005 }
1006 
1007 
1008 
1009 static void
1010 swap_apic_id(int apic, int oldid, int newid)
1011 {
1012 	int x;
1013 	int oapic;
1014 
1015 
1016 	if (oldid == newid)
1017 		return;			/* Nothing to do */
1018 
1019 	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1020 	       apic, oldid, newid);
1021 
1022 	/* Swap physical APIC IDs in interrupt entries */
1023 	for (x = 0; x < nintrs; x++) {
1024 		if (io_apic_ints[x].dst_apic_id == oldid)
1025 			io_apic_ints[x].dst_apic_id = newid;
1026 		else if (io_apic_ints[x].dst_apic_id == newid)
1027 			io_apic_ints[x].dst_apic_id = oldid;
1028 	}
1029 
1030 	/* Swap physical APIC IDs in IO_TO_ID mappings */
1031 	for (oapic = 0; oapic < mp_napics; oapic++)
1032 		if (IO_TO_ID(oapic) == newid)
1033 			break;
1034 
1035 	if (oapic < mp_napics) {
1036 		printf("Changing APIC ID for IO APIC #%d from "
1037 		       "%d to %d in MP table\n",
1038 		       oapic, newid, oldid);
1039 		IO_TO_ID(oapic) = oldid;
1040 	}
1041 	IO_TO_ID(apic) = newid;
1042 }
1043 
1044 
1045 static void
1046 fix_id_to_io_mapping(void)
1047 {
1048 	int x;
1049 
1050 	for (x = 0; x < NAPICID; x++)
1051 		ID_TO_IO(x) = -1;
1052 
1053 	for (x = 0; x <= mp_naps; x++)
1054 		if (CPU_TO_ID(x) < NAPICID)
1055 			ID_TO_IO(CPU_TO_ID(x)) = x;
1056 
1057 	for (x = 0; x < mp_napics; x++)
1058 		if (IO_TO_ID(x) < NAPICID)
1059 			ID_TO_IO(IO_TO_ID(x)) = x;
1060 }
1061 
1062 
1063 static int
1064 first_free_apic_id(void)
1065 {
1066 	int freeid, x;
1067 
1068 	for (freeid = 0; freeid < NAPICID; freeid++) {
1069 		for (x = 0; x <= mp_naps; x++)
1070 			if (CPU_TO_ID(x) == freeid)
1071 				break;
1072 		if (x <= mp_naps)
1073 			continue;
1074 		for (x = 0; x < mp_napics; x++)
1075 			if (IO_TO_ID(x) == freeid)
1076 				break;
1077 		if (x < mp_napics)
1078 			continue;
1079 		return freeid;
1080 	}
1081 	return freeid;
1082 }
1083 
1084 
1085 static int
1086 io_apic_id_acceptable(int apic, int id)
1087 {
1088 	int cpu;		/* Logical CPU number */
1089 	int oapic;		/* Logical IO APIC number for other IO APIC */
1090 
1091 	if (id >= NAPICID)
1092 		return 0;	/* Out of range */
1093 
1094 	for (cpu = 0; cpu <= mp_naps; cpu++)
1095 		if (CPU_TO_ID(cpu) == id)
1096 			return 0;	/* Conflict with CPU */
1097 
1098 	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1099 		if (IO_TO_ID(oapic) == id)
1100 			return 0;	/* Conflict with other APIC */
1101 
1102 	return 1;		/* ID is acceptable for IO APIC */
1103 }
1104 
1105 
1106 /*
1107  * parse an Intel MP specification table
1108  */
1109 static void
1110 fix_mp_table(void)
1111 {
1112 	int	x;
1113 	int	id;
1114 	int	bus_0 = 0;	/* Stop GCC warning */
1115 	int	bus_pci = 0;	/* Stop GCC warning */
1116 	int	num_pci_bus;
1117 	int	apic;		/* IO APIC unit number */
1118 	int     freeid;		/* Free physical APIC ID */
1119 	int	physid;		/* Current physical IO APIC ID */
1120 
1121 	/*
1122 	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1123 	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1124 	 * exists the BIOS must begin with bus entries for the PCI bus and use
1125 	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1126 	 * exists the BIOS can choose to ignore this ordering, and indeed many
1127 	 * MP motherboards do ignore it.  This causes a problem when the PCI
1128 	 * sub-system makes requests of the MP sub-system based on PCI bus
1129 	 * numbers.	So here we look for the situation and renumber the
1130 	 * busses and associated INTs in an effort to "make it right".
1131 	 */
1132 
1133 	/* find bus 0, PCI bus, count the number of PCI busses */
1134 	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1135 		if (bus_data[x].bus_id == 0) {
1136 			bus_0 = x;
1137 		}
1138 		if (bus_data[x].bus_type == PCI) {
1139 			++num_pci_bus;
1140 			bus_pci = x;
1141 		}
1142 	}
1143 	/*
1144 	 * bus_0 == slot of bus with ID of 0
1145 	 * bus_pci == slot of last PCI bus encountered
1146 	 */
1147 
1148 	/* check the 1 PCI bus case for sanity */
1149 	/* if it is number 0 all is well */
1150 	if (num_pci_bus == 1 &&
1151 	    bus_data[bus_pci].bus_id != 0) {
1152 
1153 		/* mis-numbered, swap with whichever bus uses slot 0 */
1154 
1155 		/* swap the bus entry types */
1156 		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1157 		bus_data[bus_0].bus_type = PCI;
1158 
1159 		/* swap each relavant INTerrupt entry */
1160 		id = bus_data[bus_pci].bus_id;
1161 		for (x = 0; x < nintrs; ++x) {
1162 			if (io_apic_ints[x].src_bus_id == id) {
1163 				io_apic_ints[x].src_bus_id = 0;
1164 			}
1165 			else if (io_apic_ints[x].src_bus_id == 0) {
1166 				io_apic_ints[x].src_bus_id = id;
1167 			}
1168 		}
1169 	}
1170 
1171 	/* Assign IO APIC IDs.
1172 	 *
1173 	 * First try the existing ID. If a conflict is detected, try
1174 	 * the ID in the MP table.  If a conflict is still detected, find
1175 	 * a free id.
1176 	 *
1177 	 * We cannot use the ID_TO_IO table before all conflicts has been
1178 	 * resolved and the table has been corrected.
1179 	 */
1180 	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1181 
1182 		/* First try to use the value set by the BIOS */
1183 		physid = io_apic_get_id(apic);
1184 		if (io_apic_id_acceptable(apic, physid)) {
1185 			if (IO_TO_ID(apic) != physid)
1186 				swap_apic_id(apic, IO_TO_ID(apic), physid);
1187 			continue;
1188 		}
1189 
1190 		/* Then check if the value in the MP table is acceptable */
1191 		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1192 			continue;
1193 
1194 		/* Last resort, find a free APIC ID and use it */
1195 		freeid = first_free_apic_id();
1196 		if (freeid >= NAPICID)
1197 			panic("No free physical APIC IDs found");
1198 
1199 		if (io_apic_id_acceptable(apic, freeid)) {
1200 			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1201 			continue;
1202 		}
1203 		panic("Free physical APIC ID not usable");
1204 	}
1205 	fix_id_to_io_mapping();
1206 }
1207 
1208 
1209 /* Assign low level interrupt handlers */
1210 static void
1211 setup_apic_irq_mapping(void)
1212 {
1213 	int	x;
1214 	int	int_vector;
1215 
1216 	/* Clear array */
1217 	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1218 		int_to_apicintpin[x].ioapic = -1;
1219 		int_to_apicintpin[x].int_pin = 0;
1220 		int_to_apicintpin[x].apic_address = NULL;
1221 		int_to_apicintpin[x].redirindex = 0;
1222 	}
1223 
1224 	/* First assign ISA/EISA interrupts */
1225 	for (x = 0; x < nintrs; x++) {
1226 		int_vector = io_apic_ints[x].src_bus_irq;
1227 		if (int_vector < APIC_INTMAPSIZE &&
1228 		    io_apic_ints[x].int_vector == 0xff &&
1229 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1230 		    (apic_int_is_bus_type(x, ISA) ||
1231 		     apic_int_is_bus_type(x, EISA)) &&
1232 		    io_apic_ints[x].int_type == 0) {
1233 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1234 					io_apic_ints[x].dst_apic_int,
1235 					int_vector);
1236 		}
1237 	}
1238 
1239 	/* Assign interrupts on first 24 intpins on IOAPIC #0 */
1240 	for (x = 0; x < nintrs; x++) {
1241 		int_vector = io_apic_ints[x].dst_apic_int;
1242 		if (int_vector < APIC_INTMAPSIZE &&
1243 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1244 		    io_apic_ints[x].int_vector == 0xff &&
1245 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1246 		    (io_apic_ints[x].int_type == 0 ||
1247 		     io_apic_ints[x].int_type == 3)) {
1248 			assign_apic_irq(0,
1249 					io_apic_ints[x].dst_apic_int,
1250 					int_vector);
1251 		}
1252 	}
1253 	/*
1254 	 * Assign interrupts for remaining intpins.
1255 	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1256 	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1257 	 * due to 8254 interrupts not being delivered can reuse that low level
1258 	 * interrupt handler.
1259 	 */
1260 	int_vector = 0;
1261 	while (int_vector < APIC_INTMAPSIZE &&
1262 	       int_to_apicintpin[int_vector].ioapic != -1)
1263 		int_vector++;
1264 	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1265 		if ((io_apic_ints[x].int_type == 0 ||
1266 		     (io_apic_ints[x].int_type == 3 &&
1267 		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1268 		       io_apic_ints[x].dst_apic_int != 0))) &&
1269 		    io_apic_ints[x].int_vector == 0xff) {
1270 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1271 					io_apic_ints[x].dst_apic_int,
1272 					int_vector);
1273 			int_vector++;
1274 			while (int_vector < APIC_INTMAPSIZE &&
1275 			       int_to_apicintpin[int_vector].ioapic != -1)
1276 				int_vector++;
1277 		}
1278 	}
1279 }
1280 
1281 
1282 static int
1283 processor_entry(proc_entry_ptr entry, int cpu)
1284 {
1285 	/* check for usability */
1286 	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1287 		return 0;
1288 
1289 	if(entry->apic_id >= NAPICID)
1290 		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1291 	/* check for BSP flag */
1292 	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1293 		boot_cpu_id = entry->apic_id;
1294 		CPU_TO_ID(0) = entry->apic_id;
1295 		ID_TO_CPU(entry->apic_id) = 0;
1296 		return 0;	/* its already been counted */
1297 	}
1298 
1299 	/* add another AP to list, if less than max number of CPUs */
1300 	else if (cpu < NCPU) {
1301 		CPU_TO_ID(cpu) = entry->apic_id;
1302 		ID_TO_CPU(entry->apic_id) = cpu;
1303 		return 1;
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 
1310 static int
1311 bus_entry(bus_entry_ptr entry, int bus)
1312 {
1313 	int     x;
1314 	char    c, name[8];
1315 
1316 	/* encode the name into an index */
1317 	for (x = 0; x < 6; ++x) {
1318 		if ((c = entry->bus_type[x]) == ' ')
1319 			break;
1320 		name[x] = c;
1321 	}
1322 	name[x] = '\0';
1323 
1324 	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1325 		panic("unknown bus type: '%s'", name);
1326 
1327 	bus_data[bus].bus_id = entry->bus_id;
1328 	bus_data[bus].bus_type = x;
1329 
1330 	return 1;
1331 }
1332 
1333 
1334 static int
1335 io_apic_entry(io_apic_entry_ptr entry, int apic)
1336 {
1337 	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1338 		return 0;
1339 
1340 	IO_TO_ID(apic) = entry->apic_id;
1341 	if (entry->apic_id < NAPICID)
1342 		ID_TO_IO(entry->apic_id) = apic;
1343 
1344 	return 1;
1345 }
1346 
1347 
1348 static int
1349 lookup_bus_type(char *name)
1350 {
1351 	int     x;
1352 
1353 	for (x = 0; x < MAX_BUSTYPE; ++x)
1354 		if (strcmp(bus_type_table[x].name, name) == 0)
1355 			return bus_type_table[x].type;
1356 
1357 	return UNKNOWN_BUSTYPE;
1358 }
1359 
1360 
1361 static int
1362 int_entry(int_entry_ptr entry, int intr)
1363 {
1364 	int apic;
1365 
1366 	io_apic_ints[intr].int_type = entry->int_type;
1367 	io_apic_ints[intr].int_flags = entry->int_flags;
1368 	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1369 	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1370 	if (entry->dst_apic_id == 255) {
1371 		/* This signal goes to all IO APICS.  Select an IO APIC
1372 		   with sufficient number of interrupt pins */
1373 		for (apic = 0; apic < mp_napics; apic++)
1374 			if (((io_apic_read(apic, IOAPIC_VER) &
1375 			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1376 			    entry->dst_apic_int)
1377 				break;
1378 		if (apic < mp_napics)
1379 			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1380 		else
1381 			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1382 	} else
1383 		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1384 	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1385 
1386 	return 1;
1387 }
1388 
1389 
1390 static int
1391 apic_int_is_bus_type(int intr, int bus_type)
1392 {
1393 	int     bus;
1394 
1395 	for (bus = 0; bus < mp_nbusses; ++bus)
1396 		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1397 		    && ((int) bus_data[bus].bus_type == bus_type))
1398 			return 1;
1399 
1400 	return 0;
1401 }
1402 
1403 
1404 /*
1405  * Given a traditional ISA INT mask, return an APIC mask.
1406  */
1407 u_int
1408 isa_apic_mask(u_int isa_mask)
1409 {
1410 	int isa_irq;
1411 	int apic_pin;
1412 
1413 #if defined(SKIP_IRQ15_REDIRECT)
1414 	if (isa_mask == (1 << 15)) {
1415 		printf("skipping ISA IRQ15 redirect\n");
1416 		return isa_mask;
1417 	}
1418 #endif  /* SKIP_IRQ15_REDIRECT */
1419 
1420 	isa_irq = ffs(isa_mask);		/* find its bit position */
1421 	if (isa_irq == 0)			/* doesn't exist */
1422 		return 0;
1423 	--isa_irq;				/* make it zero based */
1424 
1425 	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1426 	if (apic_pin == -1)
1427 		return 0;
1428 
1429 	return (1 << apic_pin);			/* convert pin# to a mask */
1430 }
1431 
1432 
1433 /*
1434  * Determine which APIC pin an ISA/EISA INT is attached to.
1435  */
1436 #define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1437 #define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1438 #define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1439 #define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1440 
1441 #define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1442 int
1443 isa_apic_irq(int isa_irq)
1444 {
1445 	int     intr;
1446 
1447 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1448 		if (INTTYPE(intr) == 0) {		/* standard INT */
1449 			if (SRCBUSIRQ(intr) == isa_irq) {
1450 				if (apic_int_is_bus_type(intr, ISA) ||
1451 			            apic_int_is_bus_type(intr, EISA))
1452 					return INTIRQ(intr);	/* found */
1453 			}
1454 		}
1455 	}
1456 	return -1;					/* NOT found */
1457 }
1458 
1459 
1460 /*
1461  * Determine which APIC pin a PCI INT is attached to.
1462  */
1463 #define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1464 #define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1465 #define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1466 int
1467 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1468 {
1469 	int     intr;
1470 
1471 	--pciInt;					/* zero based */
1472 
1473 	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1474 		if ((INTTYPE(intr) == 0)		/* standard INT */
1475 		    && (SRCBUSID(intr) == pciBus)
1476 		    && (SRCBUSDEVICE(intr) == pciDevice)
1477 		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1478 			if (apic_int_is_bus_type(intr, PCI))
1479 				return INTIRQ(intr);	/* exact match */
1480 
1481 	return -1;					/* NOT found */
1482 }
1483 
1484 int
1485 next_apic_irq(int irq)
1486 {
1487 	int intr, ointr;
1488 	int bus, bustype;
1489 
1490 	bus = 0;
1491 	bustype = 0;
1492 	for (intr = 0; intr < nintrs; intr++) {
1493 		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1494 			continue;
1495 		bus = SRCBUSID(intr);
1496 		bustype = apic_bus_type(bus);
1497 		if (bustype != ISA &&
1498 		    bustype != EISA &&
1499 		    bustype != PCI)
1500 			continue;
1501 		break;
1502 	}
1503 	if (intr >= nintrs) {
1504 		return -1;
1505 	}
1506 	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1507 		if (INTTYPE(ointr) != 0)
1508 			continue;
1509 		if (bus != SRCBUSID(ointr))
1510 			continue;
1511 		if (bustype == PCI) {
1512 			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1513 				continue;
1514 			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1515 				continue;
1516 		}
1517 		if (bustype == ISA || bustype == EISA) {
1518 			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1519 				continue;
1520 		}
1521 		if (INTPIN(intr) == INTPIN(ointr))
1522 			continue;
1523 		break;
1524 	}
1525 	if (ointr >= nintrs) {
1526 		return -1;
1527 	}
1528 	return INTIRQ(ointr);
1529 }
1530 #undef SRCBUSLINE
1531 #undef SRCBUSDEVICE
1532 #undef SRCBUSID
1533 #undef SRCBUSIRQ
1534 
1535 #undef INTPIN
1536 #undef INTIRQ
1537 #undef INTAPIC
1538 #undef INTTYPE
1539 
1540 
1541 /*
1542  * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1543  *
1544  * XXX FIXME:
1545  *  Exactly what this means is unclear at this point.  It is a solution
1546  *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1547  *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1548  *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1549  *  option.
1550  */
1551 int
1552 undirect_isa_irq(int rirq)
1553 {
1554 #if defined(READY)
1555 	if (bootverbose)
1556 	    printf("Freeing redirected ISA irq %d.\n", rirq);
1557 	/** FIXME: tickle the MB redirector chip */
1558 	return ???;
1559 #else
1560 	if (bootverbose)
1561 	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1562 	return 0;
1563 #endif  /* READY */
1564 }
1565 
1566 
1567 /*
1568  * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1569  */
1570 int
1571 undirect_pci_irq(int rirq)
1572 {
1573 #if defined(READY)
1574 	if (bootverbose)
1575 		printf("Freeing redirected PCI irq %d.\n", rirq);
1576 
1577 	/** FIXME: tickle the MB redirector chip */
1578 	return ???;
1579 #else
1580 	if (bootverbose)
1581 		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1582 		       rirq);
1583 	return 0;
1584 #endif  /* READY */
1585 }
1586 
1587 
1588 /*
1589  * given a bus ID, return:
1590  *  the bus type if found
1591  *  -1 if NOT found
1592  */
1593 int
1594 apic_bus_type(int id)
1595 {
1596 	int     x;
1597 
1598 	for (x = 0; x < mp_nbusses; ++x)
1599 		if (bus_data[x].bus_id == id)
1600 			return bus_data[x].bus_type;
1601 
1602 	return -1;
1603 }
1604 
1605 
1606 /*
1607  * given a LOGICAL APIC# and pin#, return:
1608  *  the associated src bus ID if found
1609  *  -1 if NOT found
1610  */
1611 int
1612 apic_src_bus_id(int apic, int pin)
1613 {
1614 	int     x;
1615 
1616 	/* search each of the possible INTerrupt sources */
1617 	for (x = 0; x < nintrs; ++x)
1618 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1619 		    (pin == io_apic_ints[x].dst_apic_int))
1620 			return (io_apic_ints[x].src_bus_id);
1621 
1622 	return -1;		/* NOT found */
1623 }
1624 
1625 
1626 /*
1627  * given a LOGICAL APIC# and pin#, return:
1628  *  the associated src bus IRQ if found
1629  *  -1 if NOT found
1630  */
1631 int
1632 apic_src_bus_irq(int apic, int pin)
1633 {
1634 	int     x;
1635 
1636 	for (x = 0; x < nintrs; x++)
1637 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1638 		    (pin == io_apic_ints[x].dst_apic_int))
1639 			return (io_apic_ints[x].src_bus_irq);
1640 
1641 	return -1;		/* NOT found */
1642 }
1643 
1644 
1645 /*
1646  * given a LOGICAL APIC# and pin#, return:
1647  *  the associated INTerrupt type if found
1648  *  -1 if NOT found
1649  */
1650 int
1651 apic_int_type(int apic, int pin)
1652 {
1653 	int     x;
1654 
1655 	/* search each of the possible INTerrupt sources */
1656 	for (x = 0; x < nintrs; ++x)
1657 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1658 		    (pin == io_apic_ints[x].dst_apic_int))
1659 			return (io_apic_ints[x].int_type);
1660 
1661 	return -1;		/* NOT found */
1662 }
1663 
1664 int
1665 apic_irq(int apic, int pin)
1666 {
1667 	int x;
1668 	int res;
1669 
1670 	for (x = 0; x < nintrs; ++x)
1671 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1672 		    (pin == io_apic_ints[x].dst_apic_int)) {
1673 			res = io_apic_ints[x].int_vector;
1674 			if (res == 0xff)
1675 				return -1;
1676 			if (apic != int_to_apicintpin[res].ioapic)
1677 				panic("apic_irq: inconsistent table");
1678 			if (pin != int_to_apicintpin[res].int_pin)
1679 				panic("apic_irq inconsistent table (2)");
1680 			return res;
1681 		}
1682 	return -1;
1683 }
1684 
1685 
1686 /*
1687  * given a LOGICAL APIC# and pin#, return:
1688  *  the associated trigger mode if found
1689  *  -1 if NOT found
1690  */
1691 int
1692 apic_trigger(int apic, int pin)
1693 {
1694 	int     x;
1695 
1696 	/* search each of the possible INTerrupt sources */
1697 	for (x = 0; x < nintrs; ++x)
1698 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1699 		    (pin == io_apic_ints[x].dst_apic_int))
1700 			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1701 
1702 	return -1;		/* NOT found */
1703 }
1704 
1705 
1706 /*
1707  * given a LOGICAL APIC# and pin#, return:
1708  *  the associated 'active' level if found
1709  *  -1 if NOT found
1710  */
1711 int
1712 apic_polarity(int apic, int pin)
1713 {
1714 	int     x;
1715 
1716 	/* search each of the possible INTerrupt sources */
1717 	for (x = 0; x < nintrs; ++x)
1718 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1719 		    (pin == io_apic_ints[x].dst_apic_int))
1720 			return (io_apic_ints[x].int_flags & 0x03);
1721 
1722 	return -1;		/* NOT found */
1723 }
1724 
1725 
1726 /*
1727  * set data according to MP defaults
1728  * FIXME: probably not complete yet...
1729  */
1730 static void
1731 default_mp_table(int type)
1732 {
1733 	int     ap_cpu_id;
1734 #if defined(APIC_IO)
1735 	int     io_apic_id;
1736 	int     pin;
1737 #endif	/* APIC_IO */
1738 
1739 #if 0
1740 	printf("  MP default config type: %d\n", type);
1741 	switch (type) {
1742 	case 1:
1743 		printf("   bus: ISA, APIC: 82489DX\n");
1744 		break;
1745 	case 2:
1746 		printf("   bus: EISA, APIC: 82489DX\n");
1747 		break;
1748 	case 3:
1749 		printf("   bus: EISA, APIC: 82489DX\n");
1750 		break;
1751 	case 4:
1752 		printf("   bus: MCA, APIC: 82489DX\n");
1753 		break;
1754 	case 5:
1755 		printf("   bus: ISA+PCI, APIC: Integrated\n");
1756 		break;
1757 	case 6:
1758 		printf("   bus: EISA+PCI, APIC: Integrated\n");
1759 		break;
1760 	case 7:
1761 		printf("   bus: MCA+PCI, APIC: Integrated\n");
1762 		break;
1763 	default:
1764 		printf("   future type\n");
1765 		break;
1766 		/* NOTREACHED */
1767 	}
1768 #endif	/* 0 */
1769 
1770 	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1771 	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1772 
1773 	/* BSP */
1774 	CPU_TO_ID(0) = boot_cpu_id;
1775 	ID_TO_CPU(boot_cpu_id) = 0;
1776 
1777 	/* one and only AP */
1778 	CPU_TO_ID(1) = ap_cpu_id;
1779 	ID_TO_CPU(ap_cpu_id) = 1;
1780 
1781 #if defined(APIC_IO)
1782 	/* one and only IO APIC */
1783 	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1784 
1785 	/*
1786 	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1787 	 * necessary as some hardware isn't properly setting up the IO APIC
1788 	 */
1789 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1790 	if (io_apic_id != 2) {
1791 #else
1792 	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1793 #endif	/* REALLY_ANAL_IOAPICID_VALUE */
1794 		io_apic_set_id(0, 2);
1795 		io_apic_id = 2;
1796 	}
1797 	IO_TO_ID(0) = io_apic_id;
1798 	ID_TO_IO(io_apic_id) = 0;
1799 #endif	/* APIC_IO */
1800 
1801 	/* fill out bus entries */
1802 	switch (type) {
1803 	case 1:
1804 	case 2:
1805 	case 3:
1806 	case 4:
1807 	case 5:
1808 	case 6:
1809 	case 7:
1810 		bus_data[0].bus_id = default_data[type - 1][1];
1811 		bus_data[0].bus_type = default_data[type - 1][2];
1812 		bus_data[1].bus_id = default_data[type - 1][3];
1813 		bus_data[1].bus_type = default_data[type - 1][4];
1814 		break;
1815 
1816 	/* case 4: case 7:		   MCA NOT supported */
1817 	default:		/* illegal/reserved */
1818 		panic("BAD default MP config: %d", type);
1819 		/* NOTREACHED */
1820 	}
1821 
1822 #if defined(APIC_IO)
1823 	/* general cases from MP v1.4, table 5-2 */
1824 	for (pin = 0; pin < 16; ++pin) {
1825 		io_apic_ints[pin].int_type = 0;
1826 		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1827 		io_apic_ints[pin].src_bus_id = 0;
1828 		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1829 		io_apic_ints[pin].dst_apic_id = io_apic_id;
1830 		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1831 	}
1832 
1833 	/* special cases from MP v1.4, table 5-2 */
1834 	if (type == 2) {
1835 		io_apic_ints[2].int_type = 0xff;	/* N/C */
1836 		io_apic_ints[13].int_type = 0xff;	/* N/C */
1837 #if !defined(APIC_MIXED_MODE)
1838 		/** FIXME: ??? */
1839 		panic("sorry, can't support type 2 default yet");
1840 #endif	/* APIC_MIXED_MODE */
1841 	}
1842 	else
1843 		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1844 
1845 	if (type == 7)
1846 		io_apic_ints[0].int_type = 0xff;	/* N/C */
1847 	else
1848 		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1849 #endif	/* APIC_IO */
1850 }
1851 
1852 
1853 /*
1854  * initialize all the SMP locks
1855  */
1856 
1857 /* critical region around IO APIC, apic_imen */
1858 struct simplelock	imen_lock;
1859 
1860 /* critical region around splxx(), cpl, cml, cil, ipending */
1861 struct simplelock	cpl_lock;
1862 
1863 /* Make FAST_INTR() routines sequential */
1864 struct simplelock	fast_intr_lock;
1865 
1866 /* critical region around INTR() routines */
1867 struct simplelock	intr_lock;
1868 
1869 /* lock regions protected in UP kernel via cli/sti */
1870 struct simplelock	mpintr_lock;
1871 
1872 /* lock region used by kernel profiling */
1873 struct simplelock	mcount_lock;
1874 
1875 #ifdef USE_COMLOCK
1876 /* locks com (tty) data/hardware accesses: a FASTINTR() */
1877 struct simplelock	com_lock;
1878 #endif /* USE_COMLOCK */
1879 
1880 #ifdef USE_CLOCKLOCK
1881 /* lock regions around the clock hardware */
1882 struct simplelock	clock_lock;
1883 #endif /* USE_CLOCKLOCK */
1884 
1885 /* lock around the MP rendezvous */
1886 static struct simplelock smp_rv_lock;
1887 
1888 static void
1889 init_locks(void)
1890 {
1891 	/*
1892 	 * Get the initial mp_lock with a count of 1 for the BSP.
1893 	 * This uses a LOGICAL cpu ID, ie BSP == 0.
1894 	 */
1895 	mp_lock = 0x00000001;
1896 
1897 #if 0
1898 	/* ISR uses its own "giant lock" */
1899 	isr_lock = FREE_LOCK;
1900 #endif
1901 
1902 #if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1903 	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1904 #endif
1905 
1906 	s_lock_init((struct simplelock*)&mpintr_lock);
1907 
1908 	s_lock_init((struct simplelock*)&mcount_lock);
1909 
1910 	s_lock_init((struct simplelock*)&fast_intr_lock);
1911 	s_lock_init((struct simplelock*)&intr_lock);
1912 	s_lock_init((struct simplelock*)&imen_lock);
1913 	s_lock_init((struct simplelock*)&cpl_lock);
1914 	s_lock_init(&smp_rv_lock);
1915 
1916 #ifdef USE_COMLOCK
1917 	s_lock_init((struct simplelock*)&com_lock);
1918 #endif /* USE_COMLOCK */
1919 #ifdef USE_CLOCKLOCK
1920 	s_lock_init((struct simplelock*)&clock_lock);
1921 #endif /* USE_CLOCKLOCK */
1922 }
1923 
1924 
1925 /* Wait for all APs to be fully initialized */
1926 extern int wait_ap(unsigned int);
1927 
1928 /*
1929  * start each AP in our list
1930  */
1931 static int
1932 start_all_aps(u_int boot_addr)
1933 {
1934 	int     x, i, pg;
1935 	u_char  mpbiosreason;
1936 	u_long  mpbioswarmvec;
1937 	struct globaldata *gd;
1938 	char *stack;
1939 
1940 	POSTCODE(START_ALL_APS_POST);
1941 
1942 	/* initialize BSP's local APIC */
1943 	apic_initialize();
1944 	bsp_apic_ready = 1;
1945 
1946 	/* install the AP 1st level boot code */
1947 	install_ap_tramp(boot_addr);
1948 
1949 
1950 	/* save the current value of the warm-start vector */
1951 	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1952 #ifndef PC98
1953 	outb(CMOS_REG, BIOS_RESET);
1954 	mpbiosreason = inb(CMOS_DATA);
1955 #endif
1956 
1957 	/* record BSP in CPU map */
1958 	all_cpus = 1;
1959 
1960 	/* set up 0 -> 4MB P==V mapping for AP boot */
1961 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1962 	invltlb();
1963 
1964 	/* start each AP */
1965 	for (x = 1; x <= mp_naps; ++x) {
1966 
1967 		/* This is a bit verbose, it will go away soon.  */
1968 
1969 		/* first page of AP's private space */
1970 		pg = x * i386_btop(sizeof(struct privatespace));
1971 
1972 		/* allocate a new private data page */
1973 		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1974 
1975 		/* wire it into the private page table page */
1976 		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1977 
1978 		/* allocate and set up an idle stack data page */
1979 		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1980 		for (i = 0; i < UPAGES; i++)
1981 			SMPpt[pg + 5 + i] = (pt_entry_t)
1982 			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1983 
1984 		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1985 		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1986 		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1987 		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1988 
1989 		/* prime data page for it to use */
1990 		gd->gd_cpuid = x;
1991 		gd->gd_cpu_lockid = x << 24;
1992 		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
1993 		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
1994 		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
1995 		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
1996 		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
1997 		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
1998 		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
1999 		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
2000 
2001 		/* setup a vector to our boot code */
2002 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2003 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2004 #ifndef PC98
2005 		outb(CMOS_REG, BIOS_RESET);
2006 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2007 #endif
2008 
2009 		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2010 		bootAP = x;
2011 
2012 		/* attempt to start the Application Processor */
2013 		CHECK_INIT(99);	/* setup checkpoints */
2014 		if (!start_ap(x, boot_addr)) {
2015 			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2016 			CHECK_PRINT("trace");	/* show checkpoints */
2017 			/* better panic as the AP may be running loose */
2018 			printf("panic y/n? [y] ");
2019 			if (cngetc() != 'n')
2020 				panic("bye-bye");
2021 		}
2022 		CHECK_PRINT("trace");		/* show checkpoints */
2023 
2024 		/* record its version info */
2025 		cpu_apic_versions[x] = cpu_apic_versions[0];
2026 
2027 		all_cpus |= (1 << x);		/* record AP in CPU map */
2028 	}
2029 
2030 	/* build our map of 'other' CPUs */
2031 	other_cpus = all_cpus & ~(1 << cpuid);
2032 
2033 	/* fill in our (BSP) APIC version */
2034 	cpu_apic_versions[0] = lapic.version;
2035 
2036 	/* restore the warmstart vector */
2037 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2038 #ifndef PC98
2039 	outb(CMOS_REG, BIOS_RESET);
2040 	outb(CMOS_DATA, mpbiosreason);
2041 #endif
2042 
2043 	/*
2044 	 * Set up the idle context for the BSP.  Similar to above except
2045 	 * that some was done by locore, some by pmap.c and some is implicit
2046 	 * because the BSP is cpu#0 and the page is initially zero, and also
2047 	 * because we can refer to variables by name on the BSP..
2048 	 */
2049 
2050 	/* Allocate and setup BSP idle stack */
2051 	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2052 	for (i = 0; i < UPAGES; i++)
2053 		SMPpt[5 + i] = (pt_entry_t)
2054 		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2055 
2056 	*(int *)PTD = 0;
2057 	pmap_set_opt();
2058 
2059 	/* number of APs actually started */
2060 	return mp_ncpus - 1;
2061 }
2062 
2063 
2064 /*
2065  * load the 1st level AP boot code into base memory.
2066  */
2067 
2068 /* targets for relocation */
2069 extern void bigJump(void);
2070 extern void bootCodeSeg(void);
2071 extern void bootDataSeg(void);
2072 extern void MPentry(void);
2073 extern u_int MP_GDT;
2074 extern u_int mp_gdtbase;
2075 
2076 static void
2077 install_ap_tramp(u_int boot_addr)
2078 {
2079 	int     x;
2080 	int     size = *(int *) ((u_long) & bootMP_size);
2081 	u_char *src = (u_char *) ((u_long) bootMP);
2082 	u_char *dst = (u_char *) boot_addr + KERNBASE;
2083 	u_int   boot_base = (u_int) bootMP;
2084 	u_int8_t *dst8;
2085 	u_int16_t *dst16;
2086 	u_int32_t *dst32;
2087 
2088 	POSTCODE(INSTALL_AP_TRAMP_POST);
2089 
2090 	for (x = 0; x < size; ++x)
2091 		*dst++ = *src++;
2092 
2093 	/*
2094 	 * modify addresses in code we just moved to basemem. unfortunately we
2095 	 * need fairly detailed info about mpboot.s for this to work.  changes
2096 	 * to mpboot.s might require changes here.
2097 	 */
2098 
2099 	/* boot code is located in KERNEL space */
2100 	dst = (u_char *) boot_addr + KERNBASE;
2101 
2102 	/* modify the lgdt arg */
2103 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2104 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2105 
2106 	/* modify the ljmp target for MPentry() */
2107 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2108 	*dst32 = ((u_int) MPentry - KERNBASE);
2109 
2110 	/* modify the target for boot code segment */
2111 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2112 	dst8 = (u_int8_t *) (dst16 + 1);
2113 	*dst16 = (u_int) boot_addr & 0xffff;
2114 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2115 
2116 	/* modify the target for boot data segment */
2117 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2118 	dst8 = (u_int8_t *) (dst16 + 1);
2119 	*dst16 = (u_int) boot_addr & 0xffff;
2120 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2121 }
2122 
2123 
2124 /*
2125  * this function starts the AP (application processor) identified
2126  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2127  * to accomplish this.  This is necessary because of the nuances
2128  * of the different hardware we might encounter.  It ain't pretty,
2129  * but it seems to work.
2130  */
2131 static int
2132 start_ap(int logical_cpu, u_int boot_addr)
2133 {
2134 	int     physical_cpu;
2135 	int     vector;
2136 	int     cpus;
2137 	u_long  icr_lo, icr_hi;
2138 
2139 	POSTCODE(START_AP_POST);
2140 
2141 	/* get the PHYSICAL APIC ID# */
2142 	physical_cpu = CPU_TO_ID(logical_cpu);
2143 
2144 	/* calculate the vector */
2145 	vector = (boot_addr >> 12) & 0xff;
2146 
2147 	/* used as a watchpoint to signal AP startup */
2148 	cpus = mp_ncpus;
2149 
2150 	/*
2151 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2152 	 * and running the target CPU. OR this INIT IPI might be latched (P5
2153 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2154 	 * ignored.
2155 	 */
2156 
2157 	/* setup the address for the target AP */
2158 	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2159 	icr_hi |= (physical_cpu << 24);
2160 	lapic.icr_hi = icr_hi;
2161 
2162 	/* do an INIT IPI: assert RESET */
2163 	icr_lo = lapic.icr_lo & 0xfff00000;
2164 	lapic.icr_lo = icr_lo | 0x0000c500;
2165 
2166 	/* wait for pending status end */
2167 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2168 		 /* spin */ ;
2169 
2170 	/* do an INIT IPI: deassert RESET */
2171 	lapic.icr_lo = icr_lo | 0x00008500;
2172 
2173 	/* wait for pending status end */
2174 	u_sleep(10000);		/* wait ~10mS */
2175 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2176 		 /* spin */ ;
2177 
2178 	/*
2179 	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2180 	 * latched, (P5 bug) this 1st STARTUP would then terminate
2181 	 * immediately, and the previously started INIT IPI would continue. OR
2182 	 * the previous INIT IPI has already run. and this STARTUP IPI will
2183 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2184 	 * will run.
2185 	 */
2186 
2187 	/* do a STARTUP IPI */
2188 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2189 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2190 		 /* spin */ ;
2191 	u_sleep(200);		/* wait ~200uS */
2192 
2193 	/*
2194 	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2195 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2196 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2197 	 * recognized after hardware RESET or INIT IPI.
2198 	 */
2199 
2200 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2201 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2202 		 /* spin */ ;
2203 	u_sleep(200);		/* wait ~200uS */
2204 
2205 	/* wait for it to start */
2206 	set_apic_timer(5000000);/* == 5 seconds */
2207 	while (read_apic_timer())
2208 		if (mp_ncpus > cpus)
2209 			return 1;	/* return SUCCESS */
2210 
2211 	return 0;		/* return FAILURE */
2212 }
2213 
2214 
2215 /*
2216  * Flush the TLB on all other CPU's
2217  *
2218  * XXX: Needs to handshake and wait for completion before proceding.
2219  */
2220 void
2221 smp_invltlb(void)
2222 {
2223 #if defined(APIC_IO)
2224 	if (smp_started && invltlb_ok)
2225 		all_but_self_ipi(XINVLTLB_OFFSET);
2226 #endif  /* APIC_IO */
2227 }
2228 
2229 void
2230 invlpg(u_int addr)
2231 {
2232 	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2233 
2234 	/* send a message to the other CPUs */
2235 	smp_invltlb();
2236 }
2237 
2238 void
2239 invltlb(void)
2240 {
2241 	u_long  temp;
2242 
2243 	/*
2244 	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2245 	 * inlined.
2246 	 */
2247 	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2248 
2249 	/* send a message to the other CPUs */
2250 	smp_invltlb();
2251 }
2252 
2253 
2254 /*
2255  * When called the executing CPU will send an IPI to all other CPUs
2256  *  requesting that they halt execution.
2257  *
2258  * Usually (but not necessarily) called with 'other_cpus' as its arg.
2259  *
2260  *  - Signals all CPUs in map to stop.
2261  *  - Waits for each to stop.
2262  *
2263  * Returns:
2264  *  -1: error
2265  *   0: NA
2266  *   1: ok
2267  *
2268  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2269  *            from executing at same time.
2270  */
2271 int
2272 stop_cpus(u_int map)
2273 {
2274 	if (!smp_started)
2275 		return 0;
2276 
2277 	/* send the Xcpustop IPI to all CPUs in map */
2278 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2279 
2280 	while ((stopped_cpus & map) != map)
2281 		/* spin */ ;
2282 
2283 	return 1;
2284 }
2285 
2286 
2287 /*
2288  * Called by a CPU to restart stopped CPUs.
2289  *
2290  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2291  *
2292  *  - Signals all CPUs in map to restart.
2293  *  - Waits for each to restart.
2294  *
2295  * Returns:
2296  *  -1: error
2297  *   0: NA
2298  *   1: ok
2299  */
2300 int
2301 restart_cpus(u_int map)
2302 {
2303 	if (!smp_started)
2304 		return 0;
2305 
2306 	started_cpus = map;		/* signal other cpus to restart */
2307 
2308 	while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2309 		/* spin */ ;
2310 
2311 	return 1;
2312 }
2313 
2314 int smp_active = 0;	/* are the APs allowed to run? */
2315 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2316 
2317 /* XXX maybe should be hw.ncpu */
2318 static int smp_cpus = 1;	/* how many cpu's running */
2319 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2320 
2321 int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2322 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2323 
2324 /* Warning: Do not staticize.  Used from swtch.s */
2325 int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2326 SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2327 	   &do_page_zero_idle, 0, "");
2328 
2329 /* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2330 int forward_irq_enabled = 1;
2331 SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2332 	   &forward_irq_enabled, 0, "");
2333 
2334 /* Enable forwarding of a signal to a process running on a different CPU */
2335 static int forward_signal_enabled = 1;
2336 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2337 	   &forward_signal_enabled, 0, "");
2338 
2339 /* Enable forwarding of roundrobin to all other cpus */
2340 static int forward_roundrobin_enabled = 1;
2341 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2342 	   &forward_roundrobin_enabled, 0, "");
2343 
2344 /*
2345  * This is called once the rest of the system is up and running and we're
2346  * ready to let the AP's out of the pen.
2347  */
2348 void ap_init(void);
2349 
2350 void
2351 ap_init()
2352 {
2353 	u_int	apic_id;
2354 
2355 	/* BSP may have changed PTD while we're waiting for the lock */
2356 	cpu_invltlb();
2357 
2358 	smp_cpus++;
2359 
2360 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2361 	lidt(&r_idt);
2362 #endif
2363 
2364 	/* Build our map of 'other' CPUs. */
2365 	other_cpus = all_cpus & ~(1 << cpuid);
2366 
2367 	printf("SMP: AP CPU #%d Launched!\n", cpuid);
2368 
2369 	/* set up CPU registers and state */
2370 	cpu_setregs();
2371 
2372 	/* set up FPU state on the AP */
2373 	npxinit(__INITIAL_NPXCW__);
2374 
2375 	/* A quick check from sanity claus */
2376 	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2377 	if (cpuid != apic_id) {
2378 		printf("SMP: cpuid = %d\n", cpuid);
2379 		printf("SMP: apic_id = %d\n", apic_id);
2380 		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2381 		panic("cpuid mismatch! boom!!");
2382 	}
2383 
2384 	/* Init local apic for irq's */
2385 	apic_initialize();
2386 
2387 	/* Set memory range attributes for this CPU to match the BSP */
2388 	mem_range_AP_init();
2389 
2390 	/*
2391 	 * Activate smp_invltlb, although strictly speaking, this isn't
2392 	 * quite correct yet.  We should have a bitfield for cpus willing
2393 	 * to accept TLB flush IPI's or something and sync them.
2394 	 */
2395 	if (smp_cpus == mp_ncpus) {
2396 		invltlb_ok = 1;
2397 		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2398 		smp_active = 1;	 /* historic */
2399 	}
2400 }
2401 
2402 #ifdef BETTER_CLOCK
2403 
2404 #define CHECKSTATE_USER	0
2405 #define CHECKSTATE_SYS	1
2406 #define CHECKSTATE_INTR	2
2407 
2408 /* Do not staticize.  Used from apic_vector.s */
2409 struct proc*	checkstate_curproc[NCPU];
2410 int		checkstate_cpustate[NCPU];
2411 u_long		checkstate_pc[NCPU];
2412 
2413 extern long	cp_time[CPUSTATES];
2414 
2415 #define PC_TO_INDEX(pc, prof)				\
2416         ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2417             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2418 
2419 static void
2420 addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2421 {
2422 	int i;
2423 	struct uprof *prof;
2424 	u_long pc;
2425 
2426 	pc = checkstate_pc[id];
2427 	prof = &p->p_stats->p_prof;
2428 	if (pc >= prof->pr_off &&
2429 	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2430 		if ((p->p_flag & P_OWEUPC) == 0) {
2431 			prof->pr_addr = pc;
2432 			prof->pr_ticks = 1;
2433 			p->p_flag |= P_OWEUPC;
2434 		}
2435 		*astmap |= (1 << id);
2436 	}
2437 }
2438 
2439 static void
2440 forwarded_statclock(int id, int pscnt, int *astmap)
2441 {
2442 	struct pstats *pstats;
2443 	long rss;
2444 	struct rusage *ru;
2445 	struct vmspace *vm;
2446 	int cpustate;
2447 	struct proc *p;
2448 #ifdef GPROF
2449 	register struct gmonparam *g;
2450 	int i;
2451 #endif
2452 
2453 	p = checkstate_curproc[id];
2454 	cpustate = checkstate_cpustate[id];
2455 
2456 	switch (cpustate) {
2457 	case CHECKSTATE_USER:
2458 		if (p->p_flag & P_PROFIL)
2459 			addupc_intr_forwarded(p, id, astmap);
2460 		if (pscnt > 1)
2461 			return;
2462 		p->p_uticks++;
2463 		if (p->p_nice > NZERO)
2464 			cp_time[CP_NICE]++;
2465 		else
2466 			cp_time[CP_USER]++;
2467 		break;
2468 	case CHECKSTATE_SYS:
2469 #ifdef GPROF
2470 		/*
2471 		 * Kernel statistics are just like addupc_intr, only easier.
2472 		 */
2473 		g = &_gmonparam;
2474 		if (g->state == GMON_PROF_ON) {
2475 			i = checkstate_pc[id] - g->lowpc;
2476 			if (i < g->textsize) {
2477 				i /= HISTFRACTION * sizeof(*g->kcount);
2478 				g->kcount[i]++;
2479 			}
2480 		}
2481 #endif
2482 		if (pscnt > 1)
2483 			return;
2484 
2485 		if (!p)
2486 			cp_time[CP_IDLE]++;
2487 		else {
2488 			p->p_sticks++;
2489 			cp_time[CP_SYS]++;
2490 		}
2491 		break;
2492 	case CHECKSTATE_INTR:
2493 	default:
2494 #ifdef GPROF
2495 		/*
2496 		 * Kernel statistics are just like addupc_intr, only easier.
2497 		 */
2498 		g = &_gmonparam;
2499 		if (g->state == GMON_PROF_ON) {
2500 			i = checkstate_pc[id] - g->lowpc;
2501 			if (i < g->textsize) {
2502 				i /= HISTFRACTION * sizeof(*g->kcount);
2503 				g->kcount[i]++;
2504 			}
2505 		}
2506 #endif
2507 		if (pscnt > 1)
2508 			return;
2509 		if (p)
2510 			p->p_iticks++;
2511 		cp_time[CP_INTR]++;
2512 	}
2513 	if (p != NULL) {
2514 		schedclock(p);
2515 
2516 		/* Update resource usage integrals and maximums. */
2517 		if ((pstats = p->p_stats) != NULL &&
2518 		    (ru = &pstats->p_ru) != NULL &&
2519 		    (vm = p->p_vmspace) != NULL) {
2520 			ru->ru_ixrss += pgtok(vm->vm_tsize);
2521 			ru->ru_idrss += pgtok(vm->vm_dsize);
2522 			ru->ru_isrss += pgtok(vm->vm_ssize);
2523 			rss = pgtok(vmspace_resident_count(vm));
2524 			if (ru->ru_maxrss < rss)
2525 				ru->ru_maxrss = rss;
2526         	}
2527 	}
2528 }
2529 
2530 void
2531 forward_statclock(int pscnt)
2532 {
2533 	int map;
2534 	int id;
2535 	int i;
2536 
2537 	/* Kludge. We don't yet have separate locks for the interrupts
2538 	 * and the kernel. This means that we cannot let the other processors
2539 	 * handle complex interrupts while inhibiting them from entering
2540 	 * the kernel in a non-interrupt context.
2541 	 *
2542 	 * What we can do, without changing the locking mechanisms yet,
2543 	 * is letting the other processors handle a very simple interrupt
2544 	 * (wich determines the processor states), and do the main
2545 	 * work ourself.
2546 	 */
2547 
2548 	if (!smp_started || !invltlb_ok || cold || panicstr)
2549 		return;
2550 
2551 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2552 
2553 	map = other_cpus & ~stopped_cpus ;
2554 	checkstate_probed_cpus = 0;
2555 	if (map != 0)
2556 		selected_apic_ipi(map,
2557 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2558 
2559 	i = 0;
2560 	while (checkstate_probed_cpus != map) {
2561 		/* spin */
2562 		i++;
2563 		if (i == 100000) {
2564 #ifdef BETTER_CLOCK_DIAGNOSTIC
2565 			printf("forward_statclock: checkstate %x\n",
2566 			       checkstate_probed_cpus);
2567 #endif
2568 			break;
2569 		}
2570 	}
2571 
2572 	/*
2573 	 * Step 2: walk through other processors processes, update ticks and
2574 	 * profiling info.
2575 	 */
2576 
2577 	map = 0;
2578 	for (id = 0; id < mp_ncpus; id++) {
2579 		if (id == cpuid)
2580 			continue;
2581 		if (((1 << id) & checkstate_probed_cpus) == 0)
2582 			continue;
2583 		forwarded_statclock(id, pscnt, &map);
2584 	}
2585 	if (map != 0) {
2586 		checkstate_need_ast |= map;
2587 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2588 		i = 0;
2589 		while ((checkstate_need_ast & map) != 0) {
2590 			/* spin */
2591 			i++;
2592 			if (i > 100000) {
2593 #ifdef BETTER_CLOCK_DIAGNOSTIC
2594 				printf("forward_statclock: dropped ast 0x%x\n",
2595 				       checkstate_need_ast & map);
2596 #endif
2597 				break;
2598 			}
2599 		}
2600 	}
2601 }
2602 
2603 void
2604 forward_hardclock(int pscnt)
2605 {
2606 	int map;
2607 	int id;
2608 	struct proc *p;
2609 	struct pstats *pstats;
2610 	int i;
2611 
2612 	/* Kludge. We don't yet have separate locks for the interrupts
2613 	 * and the kernel. This means that we cannot let the other processors
2614 	 * handle complex interrupts while inhibiting them from entering
2615 	 * the kernel in a non-interrupt context.
2616 	 *
2617 	 * What we can do, without changing the locking mechanisms yet,
2618 	 * is letting the other processors handle a very simple interrupt
2619 	 * (wich determines the processor states), and do the main
2620 	 * work ourself.
2621 	 */
2622 
2623 	if (!smp_started || !invltlb_ok || cold || panicstr)
2624 		return;
2625 
2626 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2627 
2628 	map = other_cpus & ~stopped_cpus ;
2629 	checkstate_probed_cpus = 0;
2630 	if (map != 0)
2631 		selected_apic_ipi(map,
2632 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2633 
2634 	i = 0;
2635 	while (checkstate_probed_cpus != map) {
2636 		/* spin */
2637 		i++;
2638 		if (i == 100000) {
2639 #ifdef BETTER_CLOCK_DIAGNOSTIC
2640 			printf("forward_hardclock: checkstate %x\n",
2641 			       checkstate_probed_cpus);
2642 #endif
2643 			break;
2644 		}
2645 	}
2646 
2647 	/*
2648 	 * Step 2: walk through other processors processes, update virtual
2649 	 * timer and profiling timer. If stathz == 0, also update ticks and
2650 	 * profiling info.
2651 	 */
2652 
2653 	map = 0;
2654 	for (id = 0; id < mp_ncpus; id++) {
2655 		if (id == cpuid)
2656 			continue;
2657 		if (((1 << id) & checkstate_probed_cpus) == 0)
2658 			continue;
2659 		p = checkstate_curproc[id];
2660 		if (p) {
2661 			pstats = p->p_stats;
2662 			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2663 			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2664 			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2665 				psignal(p, SIGVTALRM);
2666 				map |= (1 << id);
2667 			}
2668 			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2669 			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2670 				psignal(p, SIGPROF);
2671 				map |= (1 << id);
2672 			}
2673 		}
2674 		if (stathz == 0) {
2675 			forwarded_statclock( id, pscnt, &map);
2676 		}
2677 	}
2678 	if (map != 0) {
2679 		checkstate_need_ast |= map;
2680 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2681 		i = 0;
2682 		while ((checkstate_need_ast & map) != 0) {
2683 			/* spin */
2684 			i++;
2685 			if (i > 100000) {
2686 #ifdef BETTER_CLOCK_DIAGNOSTIC
2687 				printf("forward_hardclock: dropped ast 0x%x\n",
2688 				       checkstate_need_ast & map);
2689 #endif
2690 				break;
2691 			}
2692 		}
2693 	}
2694 }
2695 
2696 #endif /* BETTER_CLOCK */
2697 
2698 void
2699 forward_signal(struct proc *p)
2700 {
2701 	int map;
2702 	int id;
2703 	int i;
2704 
2705 	/* Kludge. We don't yet have separate locks for the interrupts
2706 	 * and the kernel. This means that we cannot let the other processors
2707 	 * handle complex interrupts while inhibiting them from entering
2708 	 * the kernel in a non-interrupt context.
2709 	 *
2710 	 * What we can do, without changing the locking mechanisms yet,
2711 	 * is letting the other processors handle a very simple interrupt
2712 	 * (wich determines the processor states), and do the main
2713 	 * work ourself.
2714 	 */
2715 
2716 	if (!smp_started || !invltlb_ok || cold || panicstr)
2717 		return;
2718 	if (!forward_signal_enabled)
2719 		return;
2720 	while (1) {
2721 		if (p->p_stat != SRUN)
2722 			return;
2723 		id = p->p_oncpu;
2724 		if (id == 0xff)
2725 			return;
2726 		map = (1<<id);
2727 		checkstate_need_ast |= map;
2728 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2729 		i = 0;
2730 		while ((checkstate_need_ast & map) != 0) {
2731 			/* spin */
2732 			i++;
2733 			if (i > 100000) {
2734 #if 0
2735 				printf("forward_signal: dropped ast 0x%x\n",
2736 				       checkstate_need_ast & map);
2737 #endif
2738 				break;
2739 			}
2740 		}
2741 		if (id == p->p_oncpu)
2742 			return;
2743 	}
2744 }
2745 
2746 void
2747 forward_roundrobin(void)
2748 {
2749 	u_int map;
2750 	int i;
2751 
2752 	if (!smp_started || !invltlb_ok || cold || panicstr)
2753 		return;
2754 	if (!forward_roundrobin_enabled)
2755 		return;
2756 	resched_cpus |= other_cpus;
2757 	map = other_cpus & ~stopped_cpus ;
2758 #if 1
2759 	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2760 #else
2761 	(void) all_but_self_ipi(XCPUAST_OFFSET);
2762 #endif
2763 	i = 0;
2764 	while ((checkstate_need_ast & map) != 0) {
2765 		/* spin */
2766 		i++;
2767 		if (i > 100000) {
2768 #if 0
2769 			printf("forward_roundrobin: dropped ast 0x%x\n",
2770 			       checkstate_need_ast & map);
2771 #endif
2772 			break;
2773 		}
2774 	}
2775 }
2776 
2777 
2778 #ifdef APIC_INTR_REORDER
2779 /*
2780  *	Maintain mapping from softintr vector to isr bit in local apic.
2781  */
2782 void
2783 set_lapic_isrloc(int intr, int vector)
2784 {
2785 	if (intr < 0 || intr > 32)
2786 		panic("set_apic_isrloc: bad intr argument: %d",intr);
2787 	if (vector < ICU_OFFSET || vector > 255)
2788 		panic("set_apic_isrloc: bad vector argument: %d",vector);
2789 	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2790 	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2791 }
2792 #endif
2793 
2794 /*
2795  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2796  * (if specified), rendezvous, execute the action function (if specified),
2797  * rendezvous again, execute the teardown function (if specified), and then
2798  * resume.
2799  *
2800  * Note that the supplied external functions _must_ be reentrant and aware
2801  * that they are running in parallel and in an unknown lock context.
2802  */
2803 static void (*smp_rv_setup_func)(void *arg);
2804 static void (*smp_rv_action_func)(void *arg);
2805 static void (*smp_rv_teardown_func)(void *arg);
2806 static void *smp_rv_func_arg;
2807 static volatile int smp_rv_waiters[2];
2808 
2809 void
2810 smp_rendezvous_action(void)
2811 {
2812 	/* setup function */
2813 	if (smp_rv_setup_func != NULL)
2814 		smp_rv_setup_func(smp_rv_func_arg);
2815 	/* spin on entry rendezvous */
2816 	atomic_add_int(&smp_rv_waiters[0], 1);
2817 	while (smp_rv_waiters[0] < mp_ncpus)
2818 		;
2819 	/* action function */
2820 	if (smp_rv_action_func != NULL)
2821 		smp_rv_action_func(smp_rv_func_arg);
2822 	/* spin on exit rendezvous */
2823 	atomic_add_int(&smp_rv_waiters[1], 1);
2824 	while (smp_rv_waiters[1] < mp_ncpus)
2825 		;
2826 	/* teardown function */
2827 	if (smp_rv_teardown_func != NULL)
2828 		smp_rv_teardown_func(smp_rv_func_arg);
2829 }
2830 
2831 void
2832 smp_rendezvous(void (* setup_func)(void *),
2833 	       void (* action_func)(void *),
2834 	       void (* teardown_func)(void *),
2835 	       void *arg)
2836 {
2837 	u_int	efl;
2838 
2839 	/* obtain rendezvous lock */
2840 	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2841 
2842 	/* set static function pointers */
2843 	smp_rv_setup_func = setup_func;
2844 	smp_rv_action_func = action_func;
2845 	smp_rv_teardown_func = teardown_func;
2846 	smp_rv_func_arg = arg;
2847 	smp_rv_waiters[0] = 0;
2848 	smp_rv_waiters[1] = 0;
2849 
2850 	/* disable interrupts on this CPU, save interrupt status */
2851 	efl = read_eflags();
2852 	write_eflags(efl & ~PSL_I);
2853 
2854 	/* signal other processors, which will enter the IPI with interrupts off */
2855 	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2856 
2857 	/* call executor function */
2858 	smp_rendezvous_action();
2859 
2860 	/* restore interrupt flag */
2861 	write_eflags(efl);
2862 
2863 	/* release lock */
2864 	s_unlock(&smp_rv_lock);
2865 }
2866