xref: /freebsd/sys/kern/subr_smp.c (revision ee41f1b1cf5e3d4f586cb85b46123b416275862c)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_cpu.h"
29 #include "opt_user_ldt.h"
30 
31 #ifdef SMP
32 #include <machine/smptests.h>
33 #else
34 #error
35 #endif
36 
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #include <sys/mutex.h>
46 #ifdef BETTER_CLOCK
47 #include <sys/dkstat.h>
48 #endif
49 #include <sys/cons.h>	/* cngetc() */
50 
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
56 #ifdef BETTER_CLOCK
57 #include <sys/lock.h>
58 #include <vm/vm_map.h>
59 #include <sys/user.h>
60 #ifdef GPROF
61 #include <sys/gmon.h>
62 #endif
63 #endif
64 
65 #include <machine/smp.h>
66 #include <machine/apic.h>
67 #include <machine/atomic.h>
68 #include <machine/cpufunc.h>
69 #include <machine/mpapic.h>
70 #include <machine/psl.h>
71 #include <machine/segments.h>
72 #include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73 #include <machine/tss.h>
74 #include <machine/specialreg.h>
75 #include <machine/globaldata.h>
76 
77 #if defined(APIC_IO)
78 #include <machine/md_var.h>		/* setidt() */
79 #include <i386/isa/icu.h>		/* IPIs */
80 #include <i386/isa/intr_machdep.h>	/* IPIs */
81 #endif	/* APIC_IO */
82 
83 #if defined(TEST_DEFAULT_CONFIG)
84 #define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85 #else
86 #define MPFPS_MPFB1	mpfps->mpfb1
87 #endif  /* TEST_DEFAULT_CONFIG */
88 
89 #define WARMBOOT_TARGET		0
90 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
91 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
92 
93 #ifdef PC98
94 #define BIOS_BASE		(0xe8000)
95 #define BIOS_SIZE		(0x18000)
96 #else
97 #define BIOS_BASE		(0xf0000)
98 #define BIOS_SIZE		(0x10000)
99 #endif
100 #define BIOS_COUNT		(BIOS_SIZE/4)
101 
102 #define CMOS_REG		(0x70)
103 #define CMOS_DATA		(0x71)
104 #define BIOS_RESET		(0x0f)
105 #define BIOS_WARM		(0x0a)
106 
107 #define PROCENTRY_FLAG_EN	0x01
108 #define PROCENTRY_FLAG_BP	0x02
109 #define IOAPICENTRY_FLAG_EN	0x01
110 
111 
112 /* MP Floating Pointer Structure */
113 typedef struct MPFPS {
114 	char    signature[4];
115 	void   *pap;
116 	u_char  length;
117 	u_char  spec_rev;
118 	u_char  checksum;
119 	u_char  mpfb1;
120 	u_char  mpfb2;
121 	u_char  mpfb3;
122 	u_char  mpfb4;
123 	u_char  mpfb5;
124 }      *mpfps_t;
125 
126 /* MP Configuration Table Header */
127 typedef struct MPCTH {
128 	char    signature[4];
129 	u_short base_table_length;
130 	u_char  spec_rev;
131 	u_char  checksum;
132 	u_char  oem_id[8];
133 	u_char  product_id[12];
134 	void   *oem_table_pointer;
135 	u_short oem_table_size;
136 	u_short entry_count;
137 	void   *apic_address;
138 	u_short extended_table_length;
139 	u_char  extended_table_checksum;
140 	u_char  reserved;
141 }      *mpcth_t;
142 
143 
144 typedef struct PROCENTRY {
145 	u_char  type;
146 	u_char  apic_id;
147 	u_char  apic_version;
148 	u_char  cpu_flags;
149 	u_long  cpu_signature;
150 	u_long  feature_flags;
151 	u_long  reserved1;
152 	u_long  reserved2;
153 }      *proc_entry_ptr;
154 
155 typedef struct BUSENTRY {
156 	u_char  type;
157 	u_char  bus_id;
158 	char    bus_type[6];
159 }      *bus_entry_ptr;
160 
161 typedef struct IOAPICENTRY {
162 	u_char  type;
163 	u_char  apic_id;
164 	u_char  apic_version;
165 	u_char  apic_flags;
166 	void   *apic_address;
167 }      *io_apic_entry_ptr;
168 
169 typedef struct INTENTRY {
170 	u_char  type;
171 	u_char  int_type;
172 	u_short int_flags;
173 	u_char  src_bus_id;
174 	u_char  src_bus_irq;
175 	u_char  dst_apic_id;
176 	u_char  dst_apic_int;
177 }      *int_entry_ptr;
178 
179 /* descriptions of MP basetable entries */
180 typedef struct BASETABLE_ENTRY {
181 	u_char  type;
182 	u_char  length;
183 	char    name[16];
184 }       basetable_entry;
185 
186 /*
187  * this code MUST be enabled here and in mpboot.s.
188  * it follows the very early stages of AP boot by placing values in CMOS ram.
189  * it NORMALLY will never be needed and thus the primitive method for enabling.
190  *
191 #define CHECK_POINTS
192  */
193 
194 #if defined(CHECK_POINTS) && !defined(PC98)
195 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197 
198 #define CHECK_INIT(D);				\
199 	CHECK_WRITE(0x34, (D));			\
200 	CHECK_WRITE(0x35, (D));			\
201 	CHECK_WRITE(0x36, (D));			\
202 	CHECK_WRITE(0x37, (D));			\
203 	CHECK_WRITE(0x38, (D));			\
204 	CHECK_WRITE(0x39, (D));
205 
206 #define CHECK_PRINT(S);				\
207 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208 	   (S),					\
209 	   CHECK_READ(0x34),			\
210 	   CHECK_READ(0x35),			\
211 	   CHECK_READ(0x36),			\
212 	   CHECK_READ(0x37),			\
213 	   CHECK_READ(0x38),			\
214 	   CHECK_READ(0x39));
215 
216 #else				/* CHECK_POINTS */
217 
218 #define CHECK_INIT(D)
219 #define CHECK_PRINT(S)
220 
221 #endif				/* CHECK_POINTS */
222 
223 /*
224  * Values to send to the POST hardware.
225  */
226 #define MP_BOOTADDRESS_POST	0x10
227 #define MP_PROBE_POST		0x11
228 #define MPTABLE_PASS1_POST	0x12
229 
230 #define MP_START_POST		0x13
231 #define MP_ENABLE_POST		0x14
232 #define MPTABLE_PASS2_POST	0x15
233 
234 #define START_ALL_APS_POST	0x16
235 #define INSTALL_AP_TRAMP_POST	0x17
236 #define START_AP_POST		0x18
237 
238 #define MP_ANNOUNCE_POST	0x19
239 
240 /* used to hold the AP's until we are ready to release them */
241 struct mtx			ap_boot_mtx;
242 
243 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244 int	current_postcode;
245 
246 /** XXX FIXME: what system files declare these??? */
247 extern struct region_descriptor r_gdt, r_idt;
248 
249 int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250 int	mp_ncpus;		/* # of CPUs, including BSP */
251 int	mp_naps;		/* # of Applications processors */
252 int	mp_nbusses;		/* # of busses */
253 int	mp_napics;		/* # of IO APICs */
254 int	boot_cpu_id;		/* designated BSP */
255 vm_offset_t cpu_apic_address;
256 vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257 extern	int nkpt;
258 
259 u_int32_t cpu_apic_versions[MAXCPU];
260 u_int32_t *io_apic_versions;
261 
262 #ifdef APIC_INTR_REORDER
263 struct {
264 	volatile int *location;
265 	int bit;
266 } apic_isrbit_location[32];
267 #endif
268 
269 struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
270 
271 /*
272  * APIC ID logical/physical mapping structures.
273  * We oversize these to simplify boot-time config.
274  */
275 int     cpu_num_to_apic_id[NAPICID];
276 int     io_num_to_apic_id[NAPICID];
277 int     apic_id_to_logical[NAPICID];
278 
279 
280 /* Bitmap of all available CPUs */
281 u_int	all_cpus;
282 
283 /* AP uses this during bootstrap.  Do not staticize.  */
284 char *bootSTK;
285 static int bootAP;
286 
287 /* Hotwire a 0->4MB V==P mapping */
288 extern pt_entry_t *KPTphys;
289 
290 /* SMP page table page */
291 extern pt_entry_t *SMPpt;
292 
293 struct pcb stoppcbs[MAXCPU];
294 
295 int smp_started;		/* has the system started? */
296 int smp_active = 0;		/* are the APs allowed to run? */
297 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
298 
299 /* XXX maybe should be hw.ncpu */
300 static int smp_cpus = 1;	/* how many cpu's running */
301 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
302 
303 int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
304 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
305 
306 /* Enable forwarding of a signal to a process running on a different CPU */
307 static int forward_signal_enabled = 1;
308 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
309 	   &forward_signal_enabled, 0, "");
310 
311 /* Enable forwarding of roundrobin to all other cpus */
312 static int forward_roundrobin_enabled = 1;
313 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
314 	   &forward_roundrobin_enabled, 0, "");
315 
316 
317 /*
318  * Local data and functions.
319  */
320 
321 /* Set to 1 once we're ready to let the APs out of the pen. */
322 static volatile int aps_ready = 0;
323 
324 static int	mp_capable;
325 static u_int	boot_address;
326 static u_int	base_memory;
327 
328 static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
329 static mpfps_t	mpfps;
330 static int	search_for_sig(u_int32_t target, int count);
331 static void	mp_enable(u_int boot_addr);
332 
333 static void	mptable_pass1(void);
334 static int	mptable_pass2(void);
335 static void	default_mp_table(int type);
336 static void	fix_mp_table(void);
337 static void	setup_apic_irq_mapping(void);
338 static void	init_locks(void);
339 static int	start_all_aps(u_int boot_addr);
340 static void	install_ap_tramp(u_int boot_addr);
341 static int	start_ap(int logicalCpu, u_int boot_addr);
342 void		ap_init(void);
343 static int	apic_int_is_bus_type(int intr, int bus_type);
344 static void	release_aps(void *dummy);
345 
346 /*
347  * initialize all the SMP locks
348  */
349 
350 /* critical region around IO APIC, apic_imen */
351 struct mtx		imen_mtx;
352 
353 /* lock region used by kernel profiling */
354 struct mtx		mcount_mtx;
355 
356 #ifdef USE_COMLOCK
357 /* locks com (tty) data/hardware accesses: a FASTINTR() */
358 struct mtx		com_mtx;
359 #endif /* USE_COMLOCK */
360 
361 /* lock around the MP rendezvous */
362 static struct mtx	smp_rv_mtx;
363 
364 /* only 1 CPU can panic at a time :) */
365 struct mtx		panic_mtx;
366 
367 static void
368 init_locks(void)
369 {
370 	/*
371 	 * XXX The mcount mutex probably needs to be statically initialized,
372 	 * since it will be used even in the function calls that get us to this
373 	 * point.
374 	 */
375 	mtx_init(&mcount_mtx, "mcount", MTX_DEF);
376 
377 	mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
378 	mtx_init(&panic_mtx, "panic", MTX_DEF);
379 
380 #ifdef USE_COMLOCK
381 	mtx_init(&com_mtx, "com", MTX_SPIN);
382 #endif /* USE_COMLOCK */
383 
384 	mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
385 }
386 
387 /*
388  * Calculate usable address in base memory for AP trampoline code.
389  */
390 u_int
391 mp_bootaddress(u_int basemem)
392 {
393 	POSTCODE(MP_BOOTADDRESS_POST);
394 
395 	base_memory = basemem * 1024;	/* convert to bytes */
396 
397 	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
398 	if ((base_memory - boot_address) < bootMP_size)
399 		boot_address -= 4096;	/* not enough, lower by 4k */
400 
401 	return boot_address;
402 }
403 
404 
405 /*
406  * Look for an Intel MP spec table (ie, SMP capable hardware).
407  */
408 int
409 mp_probe(void)
410 {
411 	int     x;
412 	u_long  segment;
413 	u_int32_t target;
414 
415 	POSTCODE(MP_PROBE_POST);
416 
417 	/* see if EBDA exists */
418 	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
419 		/* search first 1K of EBDA */
420 		target = (u_int32_t) (segment << 4);
421 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
422 			goto found;
423 	} else {
424 		/* last 1K of base memory, effective 'top of base' passed in */
425 		target = (u_int32_t) (base_memory - 0x400);
426 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
427 			goto found;
428 	}
429 
430 	/* search the BIOS */
431 	target = (u_int32_t) BIOS_BASE;
432 	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
433 		goto found;
434 
435 	/* nothing found */
436 	mpfps = (mpfps_t)0;
437 	mp_capable = 0;
438 	return 0;
439 
440 found:
441 	/* calculate needed resources */
442 	mpfps = (mpfps_t)x;
443 	mptable_pass1();
444 
445 	/* flag fact that we are running multiple processors */
446 	mp_capable = 1;
447 	return 1;
448 }
449 
450 
451 /*
452  * Initialize the SMP hardware and the APIC and start up the AP's.
453  */
454 void
455 mp_start(void)
456 {
457 	POSTCODE(MP_START_POST);
458 
459 	/* look for MP capable motherboard */
460 	if (mp_capable)
461 		mp_enable(boot_address);
462 	else
463 		panic("MP hardware not found!");
464 }
465 
466 
467 /*
468  * Print various information about the SMP system hardware and setup.
469  */
470 void
471 mp_announce(void)
472 {
473 	int     x;
474 
475 	POSTCODE(MP_ANNOUNCE_POST);
476 
477 	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
478 	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
479 	printf(", version: 0x%08x", cpu_apic_versions[0]);
480 	printf(", at 0x%08x\n", cpu_apic_address);
481 	for (x = 1; x <= mp_naps; ++x) {
482 		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
483 		printf(", version: 0x%08x", cpu_apic_versions[x]);
484 		printf(", at 0x%08x\n", cpu_apic_address);
485 	}
486 
487 #if defined(APIC_IO)
488 	for (x = 0; x < mp_napics; ++x) {
489 		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
490 		printf(", version: 0x%08x", io_apic_versions[x]);
491 		printf(", at 0x%08x\n", io_apic_address[x]);
492 	}
493 #else
494 	printf(" Warning: APIC I/O disabled\n");
495 #endif	/* APIC_IO */
496 }
497 
498 /*
499  * AP cpu's call this to sync up protected mode.
500  */
501 void
502 init_secondary(void)
503 {
504 	int	gsel_tss;
505 	int	x, myid = bootAP;
506 
507 	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
508 	gdt_segs[GPROC0_SEL].ssd_base =
509 		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
510 	SMP_prvspace[myid].globaldata.gd_prvspace =
511 		&SMP_prvspace[myid].globaldata;
512 
513 	for (x = 0; x < NGDT; x++) {
514 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
515 	}
516 
517 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
518 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
519 	lgdt(&r_gdt);			/* does magic intra-segment return */
520 
521 	lidt(&r_idt);
522 
523 	lldt(_default_ldt);
524 #ifdef USER_LDT
525 	PCPU_SET(currentldt, _default_ldt);
526 #endif
527 
528 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
529 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
530 	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
531 	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
532 	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
533 	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
534 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
535 	ltr(gsel_tss);
536 
537 	pmap_set_opt();
538 }
539 
540 
541 #if defined(APIC_IO)
542 /*
543  * Final configuration of the BSP's local APIC:
544  *  - disable 'pic mode'.
545  *  - disable 'virtual wire mode'.
546  *  - enable NMI.
547  */
548 void
549 bsp_apic_configure(void)
550 {
551 	u_char		byte;
552 	u_int32_t	temp;
553 
554 	/* leave 'pic mode' if necessary */
555 	if (picmode) {
556 		outb(0x22, 0x70);	/* select IMCR */
557 		byte = inb(0x23);	/* current contents */
558 		byte |= 0x01;		/* mask external INTR */
559 		outb(0x23, byte);	/* disconnect 8259s/NMI */
560 	}
561 
562 	/* mask lint0 (the 8259 'virtual wire' connection) */
563 	temp = lapic.lvt_lint0;
564 	temp |= APIC_LVT_M;		/* set the mask */
565 	lapic.lvt_lint0 = temp;
566 
567         /* setup lint1 to handle NMI */
568         temp = lapic.lvt_lint1;
569         temp &= ~APIC_LVT_M;		/* clear the mask */
570         lapic.lvt_lint1 = temp;
571 
572 	if (bootverbose)
573 		apic_dump("bsp_apic_configure()");
574 }
575 #endif  /* APIC_IO */
576 
577 
578 /*******************************************************************
579  * local functions and data
580  */
581 
582 /*
583  * start the SMP system
584  */
585 static void
586 mp_enable(u_int boot_addr)
587 {
588 	int     x;
589 #if defined(APIC_IO)
590 	int     apic;
591 	u_int   ux;
592 #endif	/* APIC_IO */
593 
594 	POSTCODE(MP_ENABLE_POST);
595 
596 	/* turn on 4MB of V == P addressing so we can get to MP table */
597 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
598 	invltlb();
599 
600 	/* examine the MP table for needed info, uses physical addresses */
601 	x = mptable_pass2();
602 
603 	*(int *)PTD = 0;
604 	invltlb();
605 
606 	/* can't process default configs till the CPU APIC is pmapped */
607 	if (x)
608 		default_mp_table(x);
609 
610 	/* post scan cleanup */
611 	fix_mp_table();
612 	setup_apic_irq_mapping();
613 
614 #if defined(APIC_IO)
615 
616 	/* fill the LOGICAL io_apic_versions table */
617 	for (apic = 0; apic < mp_napics; ++apic) {
618 		ux = io_apic_read(apic, IOAPIC_VER);
619 		io_apic_versions[apic] = ux;
620 		io_apic_set_id(apic, IO_TO_ID(apic));
621 	}
622 
623 	/* program each IO APIC in the system */
624 	for (apic = 0; apic < mp_napics; ++apic)
625 		if (io_apic_setup(apic) < 0)
626 			panic("IO APIC setup failure");
627 
628 	/* install a 'Spurious INTerrupt' vector */
629 	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
630 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
631 
632 	/* install an inter-CPU IPI for TLB invalidation */
633 	setidt(XINVLTLB_OFFSET, Xinvltlb,
634 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
635 
636 #ifdef BETTER_CLOCK
637 	/* install an inter-CPU IPI for reading processor state */
638 	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
639 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
640 #endif
641 
642 	/* install an inter-CPU IPI for all-CPU rendezvous */
643 	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
644 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
645 
646 	/* install an inter-CPU IPI for forcing an additional software trap */
647 	setidt(XCPUAST_OFFSET, Xcpuast,
648 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
649 
650 	/* install an inter-CPU IPI for CPU stop/restart */
651 	setidt(XCPUSTOP_OFFSET, Xcpustop,
652 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
653 
654 #if defined(TEST_TEST1)
655 	/* install a "fake hardware INTerrupt" vector */
656 	setidt(XTEST1_OFFSET, Xtest1,
657 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
658 #endif  /** TEST_TEST1 */
659 
660 #endif	/* APIC_IO */
661 
662 	/* initialize all SMP locks */
663 	init_locks();
664 
665 	/* start each Application Processor */
666 	start_all_aps(boot_addr);
667 }
668 
669 
670 /*
671  * look for the MP spec signature
672  */
673 
674 /* string defined by the Intel MP Spec as identifying the MP table */
675 #define MP_SIG		0x5f504d5f	/* _MP_ */
676 #define NEXT(X)		((X) += 4)
677 static int
678 search_for_sig(u_int32_t target, int count)
679 {
680 	int     x;
681 	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
682 
683 	for (x = 0; x < count; NEXT(x))
684 		if (addr[x] == MP_SIG)
685 			/* make array index a byte index */
686 			return (target + (x * sizeof(u_int32_t)));
687 
688 	return -1;
689 }
690 
691 
692 static basetable_entry basetable_entry_types[] =
693 {
694 	{0, 20, "Processor"},
695 	{1, 8, "Bus"},
696 	{2, 8, "I/O APIC"},
697 	{3, 8, "I/O INT"},
698 	{4, 8, "Local INT"}
699 };
700 
701 typedef struct BUSDATA {
702 	u_char  bus_id;
703 	enum busTypes bus_type;
704 }       bus_datum;
705 
706 typedef struct INTDATA {
707 	u_char  int_type;
708 	u_short int_flags;
709 	u_char  src_bus_id;
710 	u_char  src_bus_irq;
711 	u_char  dst_apic_id;
712 	u_char  dst_apic_int;
713 	u_char	int_vector;
714 }       io_int, local_int;
715 
716 typedef struct BUSTYPENAME {
717 	u_char  type;
718 	char    name[7];
719 }       bus_type_name;
720 
721 static bus_type_name bus_type_table[] =
722 {
723 	{CBUS, "CBUS"},
724 	{CBUSII, "CBUSII"},
725 	{EISA, "EISA"},
726 	{MCA, "MCA"},
727 	{UNKNOWN_BUSTYPE, "---"},
728 	{ISA, "ISA"},
729 	{MCA, "MCA"},
730 	{UNKNOWN_BUSTYPE, "---"},
731 	{UNKNOWN_BUSTYPE, "---"},
732 	{UNKNOWN_BUSTYPE, "---"},
733 	{UNKNOWN_BUSTYPE, "---"},
734 	{UNKNOWN_BUSTYPE, "---"},
735 	{PCI, "PCI"},
736 	{UNKNOWN_BUSTYPE, "---"},
737 	{UNKNOWN_BUSTYPE, "---"},
738 	{UNKNOWN_BUSTYPE, "---"},
739 	{UNKNOWN_BUSTYPE, "---"},
740 	{XPRESS, "XPRESS"},
741 	{UNKNOWN_BUSTYPE, "---"}
742 };
743 /* from MP spec v1.4, table 5-1 */
744 static int default_data[7][5] =
745 {
746 /*   nbus, id0, type0, id1, type1 */
747 	{1, 0, ISA, 255, 255},
748 	{1, 0, EISA, 255, 255},
749 	{1, 0, EISA, 255, 255},
750 	{1, 0, MCA, 255, 255},
751 	{2, 0, ISA, 1, PCI},
752 	{2, 0, EISA, 1, PCI},
753 	{2, 0, MCA, 1, PCI}
754 };
755 
756 
757 /* the bus data */
758 static bus_datum *bus_data;
759 
760 /* the IO INT data, one entry per possible APIC INTerrupt */
761 static io_int  *io_apic_ints;
762 
763 static int nintrs;
764 
765 static int processor_entry	__P((proc_entry_ptr entry, int cpu));
766 static int bus_entry		__P((bus_entry_ptr entry, int bus));
767 static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
768 static int int_entry		__P((int_entry_ptr entry, int intr));
769 static int lookup_bus_type	__P((char *name));
770 
771 
772 /*
773  * 1st pass on motherboard's Intel MP specification table.
774  *
775  * initializes:
776  *	mp_ncpus = 1
777  *
778  * determines:
779  *	cpu_apic_address (common to all CPUs)
780  *	io_apic_address[N]
781  *	mp_naps
782  *	mp_nbusses
783  *	mp_napics
784  *	nintrs
785  */
786 static void
787 mptable_pass1(void)
788 {
789 	int	x;
790 	mpcth_t	cth;
791 	int	totalSize;
792 	void*	position;
793 	int	count;
794 	int	type;
795 
796 	POSTCODE(MPTABLE_PASS1_POST);
797 
798 	/* clear various tables */
799 	for (x = 0; x < NAPICID; ++x) {
800 		io_apic_address[x] = ~0;	/* IO APIC address table */
801 	}
802 
803 	/* init everything to empty */
804 	mp_naps = 0;
805 	mp_nbusses = 0;
806 	mp_napics = 0;
807 	nintrs = 0;
808 
809 	/* check for use of 'default' configuration */
810 	if (MPFPS_MPFB1 != 0) {
811 		/* use default addresses */
812 		cpu_apic_address = DEFAULT_APIC_BASE;
813 		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
814 
815 		/* fill in with defaults */
816 		mp_naps = 2;		/* includes BSP */
817 		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
818 #if defined(APIC_IO)
819 		mp_napics = 1;
820 		nintrs = 16;
821 #endif	/* APIC_IO */
822 	}
823 	else {
824 		if ((cth = mpfps->pap) == 0)
825 			panic("MP Configuration Table Header MISSING!");
826 
827 		cpu_apic_address = (vm_offset_t) cth->apic_address;
828 
829 		/* walk the table, recording info of interest */
830 		totalSize = cth->base_table_length - sizeof(struct MPCTH);
831 		position = (u_char *) cth + sizeof(struct MPCTH);
832 		count = cth->entry_count;
833 
834 		while (count--) {
835 			switch (type = *(u_char *) position) {
836 			case 0: /* processor_entry */
837 				if (((proc_entry_ptr)position)->cpu_flags
838 					& PROCENTRY_FLAG_EN)
839 					++mp_naps;
840 				break;
841 			case 1: /* bus_entry */
842 				++mp_nbusses;
843 				break;
844 			case 2: /* io_apic_entry */
845 				if (((io_apic_entry_ptr)position)->apic_flags
846 					& IOAPICENTRY_FLAG_EN)
847 					io_apic_address[mp_napics++] =
848 					    (vm_offset_t)((io_apic_entry_ptr)
849 						position)->apic_address;
850 				break;
851 			case 3: /* int_entry */
852 				++nintrs;
853 				break;
854 			case 4:	/* int_entry */
855 				break;
856 			default:
857 				panic("mpfps Base Table HOSED!");
858 				/* NOTREACHED */
859 			}
860 
861 			totalSize -= basetable_entry_types[type].length;
862 			(u_char*)position += basetable_entry_types[type].length;
863 		}
864 	}
865 
866 	/* qualify the numbers */
867 	if (mp_naps > MAXCPU) {
868 		printf("Warning: only using %d of %d available CPUs!\n",
869 			MAXCPU, mp_naps);
870 		mp_naps = MAXCPU;
871 	}
872 
873 	/*
874 	 * Count the BSP.
875 	 * This is also used as a counter while starting the APs.
876 	 */
877 	mp_ncpus = 1;
878 
879 	--mp_naps;	/* subtract the BSP */
880 }
881 
882 
883 /*
884  * 2nd pass on motherboard's Intel MP specification table.
885  *
886  * sets:
887  *	boot_cpu_id
888  *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
889  *	CPU_TO_ID(N), logical CPU to APIC ID table
890  *	IO_TO_ID(N), logical IO to APIC ID table
891  *	bus_data[N]
892  *	io_apic_ints[N]
893  */
894 static int
895 mptable_pass2(void)
896 {
897 	int     x;
898 	mpcth_t cth;
899 	int     totalSize;
900 	void*   position;
901 	int     count;
902 	int     type;
903 	int     apic, bus, cpu, intr;
904 	int	i, j;
905 	int	pgeflag;
906 
907 	POSTCODE(MPTABLE_PASS2_POST);
908 
909 	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
910 
911 	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
912 	    M_DEVBUF, M_WAITOK);
913 	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
914 	    M_DEVBUF, M_WAITOK);
915 	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
916 	    M_DEVBUF, M_WAITOK);
917 	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
918 	    M_DEVBUF, M_WAITOK);
919 
920 	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
921 
922 	for (i = 0; i < mp_napics; i++) {
923 		for (j = 0; j < mp_napics; j++) {
924 			/* same page frame as a previous IO apic? */
925 			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
926 			    (io_apic_address[i] & PG_FRAME)) {
927 				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
928 					+ (NPTEPG-2-j) * PAGE_SIZE
929 					+ (io_apic_address[i] & PAGE_MASK));
930 				break;
931 			}
932 			/* use this slot if available */
933 			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
934 				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
935 				    pgeflag | (io_apic_address[i] & PG_FRAME));
936 				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
937 					+ (NPTEPG-2-j) * PAGE_SIZE
938 					+ (io_apic_address[i] & PAGE_MASK));
939 				break;
940 			}
941 		}
942 	}
943 
944 	/* clear various tables */
945 	for (x = 0; x < NAPICID; ++x) {
946 		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
947 		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
948 		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
949 	}
950 
951 	/* clear bus data table */
952 	for (x = 0; x < mp_nbusses; ++x)
953 		bus_data[x].bus_id = 0xff;
954 
955 	/* clear IO APIC INT table */
956 	for (x = 0; x < (nintrs + 1); ++x) {
957 		io_apic_ints[x].int_type = 0xff;
958 		io_apic_ints[x].int_vector = 0xff;
959 	}
960 
961 	/* setup the cpu/apic mapping arrays */
962 	boot_cpu_id = -1;
963 
964 	/* record whether PIC or virtual-wire mode */
965 	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
966 
967 	/* check for use of 'default' configuration */
968 	if (MPFPS_MPFB1 != 0)
969 		return MPFPS_MPFB1;	/* return default configuration type */
970 
971 	if ((cth = mpfps->pap) == 0)
972 		panic("MP Configuration Table Header MISSING!");
973 
974 	/* walk the table, recording info of interest */
975 	totalSize = cth->base_table_length - sizeof(struct MPCTH);
976 	position = (u_char *) cth + sizeof(struct MPCTH);
977 	count = cth->entry_count;
978 	apic = bus = intr = 0;
979 	cpu = 1;				/* pre-count the BSP */
980 
981 	while (count--) {
982 		switch (type = *(u_char *) position) {
983 		case 0:
984 			if (processor_entry(position, cpu))
985 				++cpu;
986 			break;
987 		case 1:
988 			if (bus_entry(position, bus))
989 				++bus;
990 			break;
991 		case 2:
992 			if (io_apic_entry(position, apic))
993 				++apic;
994 			break;
995 		case 3:
996 			if (int_entry(position, intr))
997 				++intr;
998 			break;
999 		case 4:
1000 			/* int_entry(position); */
1001 			break;
1002 		default:
1003 			panic("mpfps Base Table HOSED!");
1004 			/* NOTREACHED */
1005 		}
1006 
1007 		totalSize -= basetable_entry_types[type].length;
1008 		(u_char *) position += basetable_entry_types[type].length;
1009 	}
1010 
1011 	if (boot_cpu_id == -1)
1012 		panic("NO BSP found!");
1013 
1014 	/* report fact that its NOT a default configuration */
1015 	return 0;
1016 }
1017 
1018 
1019 void
1020 assign_apic_irq(int apic, int intpin, int irq)
1021 {
1022 	int x;
1023 
1024 	if (int_to_apicintpin[irq].ioapic != -1)
1025 		panic("assign_apic_irq: inconsistent table");
1026 
1027 	int_to_apicintpin[irq].ioapic = apic;
1028 	int_to_apicintpin[irq].int_pin = intpin;
1029 	int_to_apicintpin[irq].apic_address = ioapic[apic];
1030 	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1031 
1032 	for (x = 0; x < nintrs; x++) {
1033 		if ((io_apic_ints[x].int_type == 0 ||
1034 		     io_apic_ints[x].int_type == 3) &&
1035 		    io_apic_ints[x].int_vector == 0xff &&
1036 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1037 		    io_apic_ints[x].dst_apic_int == intpin)
1038 			io_apic_ints[x].int_vector = irq;
1039 	}
1040 }
1041 
1042 void
1043 revoke_apic_irq(int irq)
1044 {
1045 	int x;
1046 	int oldapic;
1047 	int oldintpin;
1048 
1049 	if (int_to_apicintpin[irq].ioapic == -1)
1050 		panic("assign_apic_irq: inconsistent table");
1051 
1052 	oldapic = int_to_apicintpin[irq].ioapic;
1053 	oldintpin = int_to_apicintpin[irq].int_pin;
1054 
1055 	int_to_apicintpin[irq].ioapic = -1;
1056 	int_to_apicintpin[irq].int_pin = 0;
1057 	int_to_apicintpin[irq].apic_address = NULL;
1058 	int_to_apicintpin[irq].redirindex = 0;
1059 
1060 	for (x = 0; x < nintrs; x++) {
1061 		if ((io_apic_ints[x].int_type == 0 ||
1062 		     io_apic_ints[x].int_type == 3) &&
1063 		    io_apic_ints[x].int_vector == 0xff &&
1064 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1065 		    io_apic_ints[x].dst_apic_int == oldintpin)
1066 			io_apic_ints[x].int_vector = 0xff;
1067 	}
1068 }
1069 
1070 
1071 static void
1072 allocate_apic_irq(int intr)
1073 {
1074 	int apic;
1075 	int intpin;
1076 	int irq;
1077 
1078 	if (io_apic_ints[intr].int_vector != 0xff)
1079 		return;		/* Interrupt handler already assigned */
1080 
1081 	if (io_apic_ints[intr].int_type != 0 &&
1082 	    (io_apic_ints[intr].int_type != 3 ||
1083 	     (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1084 	      io_apic_ints[intr].dst_apic_int == 0)))
1085 		return;		/* Not INT or ExtInt on != (0, 0) */
1086 
1087 	irq = 0;
1088 	while (irq < APIC_INTMAPSIZE &&
1089 	       int_to_apicintpin[irq].ioapic != -1)
1090 		irq++;
1091 
1092 	if (irq >= APIC_INTMAPSIZE)
1093 		return;		/* No free interrupt handlers */
1094 
1095 	apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1096 	intpin = io_apic_ints[intr].dst_apic_int;
1097 
1098 	assign_apic_irq(apic, intpin, irq);
1099 	io_apic_setup_intpin(apic, intpin);
1100 }
1101 
1102 
1103 static void
1104 swap_apic_id(int apic, int oldid, int newid)
1105 {
1106 	int x;
1107 	int oapic;
1108 
1109 
1110 	if (oldid == newid)
1111 		return;			/* Nothing to do */
1112 
1113 	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1114 	       apic, oldid, newid);
1115 
1116 	/* Swap physical APIC IDs in interrupt entries */
1117 	for (x = 0; x < nintrs; x++) {
1118 		if (io_apic_ints[x].dst_apic_id == oldid)
1119 			io_apic_ints[x].dst_apic_id = newid;
1120 		else if (io_apic_ints[x].dst_apic_id == newid)
1121 			io_apic_ints[x].dst_apic_id = oldid;
1122 	}
1123 
1124 	/* Swap physical APIC IDs in IO_TO_ID mappings */
1125 	for (oapic = 0; oapic < mp_napics; oapic++)
1126 		if (IO_TO_ID(oapic) == newid)
1127 			break;
1128 
1129 	if (oapic < mp_napics) {
1130 		printf("Changing APIC ID for IO APIC #%d from "
1131 		       "%d to %d in MP table\n",
1132 		       oapic, newid, oldid);
1133 		IO_TO_ID(oapic) = oldid;
1134 	}
1135 	IO_TO_ID(apic) = newid;
1136 }
1137 
1138 
1139 static void
1140 fix_id_to_io_mapping(void)
1141 {
1142 	int x;
1143 
1144 	for (x = 0; x < NAPICID; x++)
1145 		ID_TO_IO(x) = -1;
1146 
1147 	for (x = 0; x <= mp_naps; x++)
1148 		if (CPU_TO_ID(x) < NAPICID)
1149 			ID_TO_IO(CPU_TO_ID(x)) = x;
1150 
1151 	for (x = 0; x < mp_napics; x++)
1152 		if (IO_TO_ID(x) < NAPICID)
1153 			ID_TO_IO(IO_TO_ID(x)) = x;
1154 }
1155 
1156 
1157 static int
1158 first_free_apic_id(void)
1159 {
1160 	int freeid, x;
1161 
1162 	for (freeid = 0; freeid < NAPICID; freeid++) {
1163 		for (x = 0; x <= mp_naps; x++)
1164 			if (CPU_TO_ID(x) == freeid)
1165 				break;
1166 		if (x <= mp_naps)
1167 			continue;
1168 		for (x = 0; x < mp_napics; x++)
1169 			if (IO_TO_ID(x) == freeid)
1170 				break;
1171 		if (x < mp_napics)
1172 			continue;
1173 		return freeid;
1174 	}
1175 	return freeid;
1176 }
1177 
1178 
1179 static int
1180 io_apic_id_acceptable(int apic, int id)
1181 {
1182 	int cpu;		/* Logical CPU number */
1183 	int oapic;		/* Logical IO APIC number for other IO APIC */
1184 
1185 	if (id >= NAPICID)
1186 		return 0;	/* Out of range */
1187 
1188 	for (cpu = 0; cpu <= mp_naps; cpu++)
1189 		if (CPU_TO_ID(cpu) == id)
1190 			return 0;	/* Conflict with CPU */
1191 
1192 	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1193 		if (IO_TO_ID(oapic) == id)
1194 			return 0;	/* Conflict with other APIC */
1195 
1196 	return 1;		/* ID is acceptable for IO APIC */
1197 }
1198 
1199 
1200 /*
1201  * parse an Intel MP specification table
1202  */
1203 static void
1204 fix_mp_table(void)
1205 {
1206 	int	x;
1207 	int	id;
1208 	int	bus_0 = 0;	/* Stop GCC warning */
1209 	int	bus_pci = 0;	/* Stop GCC warning */
1210 	int	num_pci_bus;
1211 	int	apic;		/* IO APIC unit number */
1212 	int     freeid;		/* Free physical APIC ID */
1213 	int	physid;		/* Current physical IO APIC ID */
1214 
1215 	/*
1216 	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1217 	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1218 	 * exists the BIOS must begin with bus entries for the PCI bus and use
1219 	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1220 	 * exists the BIOS can choose to ignore this ordering, and indeed many
1221 	 * MP motherboards do ignore it.  This causes a problem when the PCI
1222 	 * sub-system makes requests of the MP sub-system based on PCI bus
1223 	 * numbers.	So here we look for the situation and renumber the
1224 	 * busses and associated INTs in an effort to "make it right".
1225 	 */
1226 
1227 	/* find bus 0, PCI bus, count the number of PCI busses */
1228 	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1229 		if (bus_data[x].bus_id == 0) {
1230 			bus_0 = x;
1231 		}
1232 		if (bus_data[x].bus_type == PCI) {
1233 			++num_pci_bus;
1234 			bus_pci = x;
1235 		}
1236 	}
1237 	/*
1238 	 * bus_0 == slot of bus with ID of 0
1239 	 * bus_pci == slot of last PCI bus encountered
1240 	 */
1241 
1242 	/* check the 1 PCI bus case for sanity */
1243 	/* if it is number 0 all is well */
1244 	if (num_pci_bus == 1 &&
1245 	    bus_data[bus_pci].bus_id != 0) {
1246 
1247 		/* mis-numbered, swap with whichever bus uses slot 0 */
1248 
1249 		/* swap the bus entry types */
1250 		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1251 		bus_data[bus_0].bus_type = PCI;
1252 
1253 		/* swap each relavant INTerrupt entry */
1254 		id = bus_data[bus_pci].bus_id;
1255 		for (x = 0; x < nintrs; ++x) {
1256 			if (io_apic_ints[x].src_bus_id == id) {
1257 				io_apic_ints[x].src_bus_id = 0;
1258 			}
1259 			else if (io_apic_ints[x].src_bus_id == 0) {
1260 				io_apic_ints[x].src_bus_id = id;
1261 			}
1262 		}
1263 	}
1264 
1265 	/* Assign IO APIC IDs.
1266 	 *
1267 	 * First try the existing ID. If a conflict is detected, try
1268 	 * the ID in the MP table.  If a conflict is still detected, find
1269 	 * a free id.
1270 	 *
1271 	 * We cannot use the ID_TO_IO table before all conflicts has been
1272 	 * resolved and the table has been corrected.
1273 	 */
1274 	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1275 
1276 		/* First try to use the value set by the BIOS */
1277 		physid = io_apic_get_id(apic);
1278 		if (io_apic_id_acceptable(apic, physid)) {
1279 			if (IO_TO_ID(apic) != physid)
1280 				swap_apic_id(apic, IO_TO_ID(apic), physid);
1281 			continue;
1282 		}
1283 
1284 		/* Then check if the value in the MP table is acceptable */
1285 		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1286 			continue;
1287 
1288 		/* Last resort, find a free APIC ID and use it */
1289 		freeid = first_free_apic_id();
1290 		if (freeid >= NAPICID)
1291 			panic("No free physical APIC IDs found");
1292 
1293 		if (io_apic_id_acceptable(apic, freeid)) {
1294 			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1295 			continue;
1296 		}
1297 		panic("Free physical APIC ID not usable");
1298 	}
1299 	fix_id_to_io_mapping();
1300 
1301 	/* detect and fix broken Compaq MP table */
1302 	if (apic_int_type(0, 0) == -1) {
1303 		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1304 		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1305 		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1306 		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1307 		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1308 		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1309 		nintrs++;
1310 	}
1311 }
1312 
1313 
1314 /* Assign low level interrupt handlers */
1315 static void
1316 setup_apic_irq_mapping(void)
1317 {
1318 	int	x;
1319 	int	int_vector;
1320 
1321 	/* Clear array */
1322 	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1323 		int_to_apicintpin[x].ioapic = -1;
1324 		int_to_apicintpin[x].int_pin = 0;
1325 		int_to_apicintpin[x].apic_address = NULL;
1326 		int_to_apicintpin[x].redirindex = 0;
1327 	}
1328 
1329 	/* First assign ISA/EISA interrupts */
1330 	for (x = 0; x < nintrs; x++) {
1331 		int_vector = io_apic_ints[x].src_bus_irq;
1332 		if (int_vector < APIC_INTMAPSIZE &&
1333 		    io_apic_ints[x].int_vector == 0xff &&
1334 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1335 		    (apic_int_is_bus_type(x, ISA) ||
1336 		     apic_int_is_bus_type(x, EISA)) &&
1337 		    io_apic_ints[x].int_type == 0) {
1338 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1339 					io_apic_ints[x].dst_apic_int,
1340 					int_vector);
1341 		}
1342 	}
1343 
1344 	/* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1345 	for (x = 0; x < nintrs; x++) {
1346 		if (io_apic_ints[x].dst_apic_int == 0 &&
1347 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1348 		    io_apic_ints[x].int_vector == 0xff &&
1349 		    int_to_apicintpin[0].ioapic == -1 &&
1350 		    io_apic_ints[x].int_type == 3) {
1351 			assign_apic_irq(0, 0, 0);
1352 			break;
1353 		}
1354 	}
1355 	/* PCI interrupt assignment is deferred */
1356 }
1357 
1358 
1359 static int
1360 processor_entry(proc_entry_ptr entry, int cpu)
1361 {
1362 	/* check for usability */
1363 	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1364 		return 0;
1365 
1366 	if(entry->apic_id >= NAPICID)
1367 		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1368 	/* check for BSP flag */
1369 	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1370 		boot_cpu_id = entry->apic_id;
1371 		CPU_TO_ID(0) = entry->apic_id;
1372 		ID_TO_CPU(entry->apic_id) = 0;
1373 		return 0;	/* its already been counted */
1374 	}
1375 
1376 	/* add another AP to list, if less than max number of CPUs */
1377 	else if (cpu < MAXCPU) {
1378 		CPU_TO_ID(cpu) = entry->apic_id;
1379 		ID_TO_CPU(entry->apic_id) = cpu;
1380 		return 1;
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 
1387 static int
1388 bus_entry(bus_entry_ptr entry, int bus)
1389 {
1390 	int     x;
1391 	char    c, name[8];
1392 
1393 	/* encode the name into an index */
1394 	for (x = 0; x < 6; ++x) {
1395 		if ((c = entry->bus_type[x]) == ' ')
1396 			break;
1397 		name[x] = c;
1398 	}
1399 	name[x] = '\0';
1400 
1401 	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1402 		panic("unknown bus type: '%s'", name);
1403 
1404 	bus_data[bus].bus_id = entry->bus_id;
1405 	bus_data[bus].bus_type = x;
1406 
1407 	return 1;
1408 }
1409 
1410 
1411 static int
1412 io_apic_entry(io_apic_entry_ptr entry, int apic)
1413 {
1414 	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1415 		return 0;
1416 
1417 	IO_TO_ID(apic) = entry->apic_id;
1418 	if (entry->apic_id < NAPICID)
1419 		ID_TO_IO(entry->apic_id) = apic;
1420 
1421 	return 1;
1422 }
1423 
1424 
1425 static int
1426 lookup_bus_type(char *name)
1427 {
1428 	int     x;
1429 
1430 	for (x = 0; x < MAX_BUSTYPE; ++x)
1431 		if (strcmp(bus_type_table[x].name, name) == 0)
1432 			return bus_type_table[x].type;
1433 
1434 	return UNKNOWN_BUSTYPE;
1435 }
1436 
1437 
1438 static int
1439 int_entry(int_entry_ptr entry, int intr)
1440 {
1441 	int apic;
1442 
1443 	io_apic_ints[intr].int_type = entry->int_type;
1444 	io_apic_ints[intr].int_flags = entry->int_flags;
1445 	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1446 	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1447 	if (entry->dst_apic_id == 255) {
1448 		/* This signal goes to all IO APICS.  Select an IO APIC
1449 		   with sufficient number of interrupt pins */
1450 		for (apic = 0; apic < mp_napics; apic++)
1451 			if (((io_apic_read(apic, IOAPIC_VER) &
1452 			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1453 			    entry->dst_apic_int)
1454 				break;
1455 		if (apic < mp_napics)
1456 			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1457 		else
1458 			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1459 	} else
1460 		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1461 	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1462 
1463 	return 1;
1464 }
1465 
1466 
1467 static int
1468 apic_int_is_bus_type(int intr, int bus_type)
1469 {
1470 	int     bus;
1471 
1472 	for (bus = 0; bus < mp_nbusses; ++bus)
1473 		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1474 		    && ((int) bus_data[bus].bus_type == bus_type))
1475 			return 1;
1476 
1477 	return 0;
1478 }
1479 
1480 
1481 /*
1482  * Given a traditional ISA INT mask, return an APIC mask.
1483  */
1484 u_int
1485 isa_apic_mask(u_int isa_mask)
1486 {
1487 	int isa_irq;
1488 	int apic_pin;
1489 
1490 #if defined(SKIP_IRQ15_REDIRECT)
1491 	if (isa_mask == (1 << 15)) {
1492 		printf("skipping ISA IRQ15 redirect\n");
1493 		return isa_mask;
1494 	}
1495 #endif  /* SKIP_IRQ15_REDIRECT */
1496 
1497 	isa_irq = ffs(isa_mask);		/* find its bit position */
1498 	if (isa_irq == 0)			/* doesn't exist */
1499 		return 0;
1500 	--isa_irq;				/* make it zero based */
1501 
1502 	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1503 	if (apic_pin == -1)
1504 		return 0;
1505 
1506 	return (1 << apic_pin);			/* convert pin# to a mask */
1507 }
1508 
1509 
1510 /*
1511  * Determine which APIC pin an ISA/EISA INT is attached to.
1512  */
1513 #define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1514 #define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1515 #define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1516 #define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1517 
1518 #define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1519 int
1520 isa_apic_irq(int isa_irq)
1521 {
1522 	int     intr;
1523 
1524 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1525 		if (INTTYPE(intr) == 0) {		/* standard INT */
1526 			if (SRCBUSIRQ(intr) == isa_irq) {
1527 				if (apic_int_is_bus_type(intr, ISA) ||
1528 			            apic_int_is_bus_type(intr, EISA)) {
1529 					if (INTIRQ(intr) == 0xff)
1530 						return -1; /* unassigned */
1531 					return INTIRQ(intr);	/* found */
1532 				}
1533 			}
1534 		}
1535 	}
1536 	return -1;					/* NOT found */
1537 }
1538 
1539 
1540 /*
1541  * Determine which APIC pin a PCI INT is attached to.
1542  */
1543 #define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1544 #define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1545 #define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1546 int
1547 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1548 {
1549 	int     intr;
1550 
1551 	--pciInt;					/* zero based */
1552 
1553 	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1554 		if ((INTTYPE(intr) == 0)		/* standard INT */
1555 		    && (SRCBUSID(intr) == pciBus)
1556 		    && (SRCBUSDEVICE(intr) == pciDevice)
1557 		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1558 			if (apic_int_is_bus_type(intr, PCI)) {
1559 				if (INTIRQ(intr) == 0xff)
1560 					allocate_apic_irq(intr);
1561 				if (INTIRQ(intr) == 0xff)
1562 					return -1;	/* unassigned */
1563 				return INTIRQ(intr);	/* exact match */
1564 			}
1565 
1566 	return -1;					/* NOT found */
1567 }
1568 
1569 int
1570 next_apic_irq(int irq)
1571 {
1572 	int intr, ointr;
1573 	int bus, bustype;
1574 
1575 	bus = 0;
1576 	bustype = 0;
1577 	for (intr = 0; intr < nintrs; intr++) {
1578 		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1579 			continue;
1580 		bus = SRCBUSID(intr);
1581 		bustype = apic_bus_type(bus);
1582 		if (bustype != ISA &&
1583 		    bustype != EISA &&
1584 		    bustype != PCI)
1585 			continue;
1586 		break;
1587 	}
1588 	if (intr >= nintrs) {
1589 		return -1;
1590 	}
1591 	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1592 		if (INTTYPE(ointr) != 0)
1593 			continue;
1594 		if (bus != SRCBUSID(ointr))
1595 			continue;
1596 		if (bustype == PCI) {
1597 			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1598 				continue;
1599 			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1600 				continue;
1601 		}
1602 		if (bustype == ISA || bustype == EISA) {
1603 			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1604 				continue;
1605 		}
1606 		if (INTPIN(intr) == INTPIN(ointr))
1607 			continue;
1608 		break;
1609 	}
1610 	if (ointr >= nintrs) {
1611 		return -1;
1612 	}
1613 	return INTIRQ(ointr);
1614 }
1615 #undef SRCBUSLINE
1616 #undef SRCBUSDEVICE
1617 #undef SRCBUSID
1618 #undef SRCBUSIRQ
1619 
1620 #undef INTPIN
1621 #undef INTIRQ
1622 #undef INTAPIC
1623 #undef INTTYPE
1624 
1625 
1626 /*
1627  * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1628  *
1629  * XXX FIXME:
1630  *  Exactly what this means is unclear at this point.  It is a solution
1631  *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1632  *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1633  *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1634  *  option.
1635  */
1636 int
1637 undirect_isa_irq(int rirq)
1638 {
1639 #if defined(READY)
1640 	if (bootverbose)
1641 	    printf("Freeing redirected ISA irq %d.\n", rirq);
1642 	/** FIXME: tickle the MB redirector chip */
1643 	return -1;
1644 #else
1645 	if (bootverbose)
1646 	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1647 	return 0;
1648 #endif  /* READY */
1649 }
1650 
1651 
1652 /*
1653  * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1654  */
1655 int
1656 undirect_pci_irq(int rirq)
1657 {
1658 #if defined(READY)
1659 	if (bootverbose)
1660 		printf("Freeing redirected PCI irq %d.\n", rirq);
1661 
1662 	/** FIXME: tickle the MB redirector chip */
1663 	return -1;
1664 #else
1665 	if (bootverbose)
1666 		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1667 		       rirq);
1668 	return 0;
1669 #endif  /* READY */
1670 }
1671 
1672 
1673 /*
1674  * given a bus ID, return:
1675  *  the bus type if found
1676  *  -1 if NOT found
1677  */
1678 int
1679 apic_bus_type(int id)
1680 {
1681 	int     x;
1682 
1683 	for (x = 0; x < mp_nbusses; ++x)
1684 		if (bus_data[x].bus_id == id)
1685 			return bus_data[x].bus_type;
1686 
1687 	return -1;
1688 }
1689 
1690 
1691 /*
1692  * given a LOGICAL APIC# and pin#, return:
1693  *  the associated src bus ID if found
1694  *  -1 if NOT found
1695  */
1696 int
1697 apic_src_bus_id(int apic, int pin)
1698 {
1699 	int     x;
1700 
1701 	/* search each of the possible INTerrupt sources */
1702 	for (x = 0; x < nintrs; ++x)
1703 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1704 		    (pin == io_apic_ints[x].dst_apic_int))
1705 			return (io_apic_ints[x].src_bus_id);
1706 
1707 	return -1;		/* NOT found */
1708 }
1709 
1710 
1711 /*
1712  * given a LOGICAL APIC# and pin#, return:
1713  *  the associated src bus IRQ if found
1714  *  -1 if NOT found
1715  */
1716 int
1717 apic_src_bus_irq(int apic, int pin)
1718 {
1719 	int     x;
1720 
1721 	for (x = 0; x < nintrs; x++)
1722 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1723 		    (pin == io_apic_ints[x].dst_apic_int))
1724 			return (io_apic_ints[x].src_bus_irq);
1725 
1726 	return -1;		/* NOT found */
1727 }
1728 
1729 
1730 /*
1731  * given a LOGICAL APIC# and pin#, return:
1732  *  the associated INTerrupt type if found
1733  *  -1 if NOT found
1734  */
1735 int
1736 apic_int_type(int apic, int pin)
1737 {
1738 	int     x;
1739 
1740 	/* search each of the possible INTerrupt sources */
1741 	for (x = 0; x < nintrs; ++x)
1742 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1743 		    (pin == io_apic_ints[x].dst_apic_int))
1744 			return (io_apic_ints[x].int_type);
1745 
1746 	return -1;		/* NOT found */
1747 }
1748 
1749 int
1750 apic_irq(int apic, int pin)
1751 {
1752 	int x;
1753 	int res;
1754 
1755 	for (x = 0; x < nintrs; ++x)
1756 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1757 		    (pin == io_apic_ints[x].dst_apic_int)) {
1758 			res = io_apic_ints[x].int_vector;
1759 			if (res == 0xff)
1760 				return -1;
1761 			if (apic != int_to_apicintpin[res].ioapic)
1762 				panic("apic_irq: inconsistent table");
1763 			if (pin != int_to_apicintpin[res].int_pin)
1764 				panic("apic_irq inconsistent table (2)");
1765 			return res;
1766 		}
1767 	return -1;
1768 }
1769 
1770 
1771 /*
1772  * given a LOGICAL APIC# and pin#, return:
1773  *  the associated trigger mode if found
1774  *  -1 if NOT found
1775  */
1776 int
1777 apic_trigger(int apic, int pin)
1778 {
1779 	int     x;
1780 
1781 	/* search each of the possible INTerrupt sources */
1782 	for (x = 0; x < nintrs; ++x)
1783 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1784 		    (pin == io_apic_ints[x].dst_apic_int))
1785 			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1786 
1787 	return -1;		/* NOT found */
1788 }
1789 
1790 
1791 /*
1792  * given a LOGICAL APIC# and pin#, return:
1793  *  the associated 'active' level if found
1794  *  -1 if NOT found
1795  */
1796 int
1797 apic_polarity(int apic, int pin)
1798 {
1799 	int     x;
1800 
1801 	/* search each of the possible INTerrupt sources */
1802 	for (x = 0; x < nintrs; ++x)
1803 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1804 		    (pin == io_apic_ints[x].dst_apic_int))
1805 			return (io_apic_ints[x].int_flags & 0x03);
1806 
1807 	return -1;		/* NOT found */
1808 }
1809 
1810 
1811 /*
1812  * set data according to MP defaults
1813  * FIXME: probably not complete yet...
1814  */
1815 static void
1816 default_mp_table(int type)
1817 {
1818 	int     ap_cpu_id;
1819 #if defined(APIC_IO)
1820 	int     io_apic_id;
1821 	int     pin;
1822 #endif	/* APIC_IO */
1823 
1824 #if 0
1825 	printf("  MP default config type: %d\n", type);
1826 	switch (type) {
1827 	case 1:
1828 		printf("   bus: ISA, APIC: 82489DX\n");
1829 		break;
1830 	case 2:
1831 		printf("   bus: EISA, APIC: 82489DX\n");
1832 		break;
1833 	case 3:
1834 		printf("   bus: EISA, APIC: 82489DX\n");
1835 		break;
1836 	case 4:
1837 		printf("   bus: MCA, APIC: 82489DX\n");
1838 		break;
1839 	case 5:
1840 		printf("   bus: ISA+PCI, APIC: Integrated\n");
1841 		break;
1842 	case 6:
1843 		printf("   bus: EISA+PCI, APIC: Integrated\n");
1844 		break;
1845 	case 7:
1846 		printf("   bus: MCA+PCI, APIC: Integrated\n");
1847 		break;
1848 	default:
1849 		printf("   future type\n");
1850 		break;
1851 		/* NOTREACHED */
1852 	}
1853 #endif	/* 0 */
1854 
1855 	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1856 	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1857 
1858 	/* BSP */
1859 	CPU_TO_ID(0) = boot_cpu_id;
1860 	ID_TO_CPU(boot_cpu_id) = 0;
1861 
1862 	/* one and only AP */
1863 	CPU_TO_ID(1) = ap_cpu_id;
1864 	ID_TO_CPU(ap_cpu_id) = 1;
1865 
1866 #if defined(APIC_IO)
1867 	/* one and only IO APIC */
1868 	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1869 
1870 	/*
1871 	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1872 	 * necessary as some hardware isn't properly setting up the IO APIC
1873 	 */
1874 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1875 	if (io_apic_id != 2) {
1876 #else
1877 	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1878 #endif	/* REALLY_ANAL_IOAPICID_VALUE */
1879 		io_apic_set_id(0, 2);
1880 		io_apic_id = 2;
1881 	}
1882 	IO_TO_ID(0) = io_apic_id;
1883 	ID_TO_IO(io_apic_id) = 0;
1884 #endif	/* APIC_IO */
1885 
1886 	/* fill out bus entries */
1887 	switch (type) {
1888 	case 1:
1889 	case 2:
1890 	case 3:
1891 	case 4:
1892 	case 5:
1893 	case 6:
1894 	case 7:
1895 		bus_data[0].bus_id = default_data[type - 1][1];
1896 		bus_data[0].bus_type = default_data[type - 1][2];
1897 		bus_data[1].bus_id = default_data[type - 1][3];
1898 		bus_data[1].bus_type = default_data[type - 1][4];
1899 		break;
1900 
1901 	/* case 4: case 7:		   MCA NOT supported */
1902 	default:		/* illegal/reserved */
1903 		panic("BAD default MP config: %d", type);
1904 		/* NOTREACHED */
1905 	}
1906 
1907 #if defined(APIC_IO)
1908 	/* general cases from MP v1.4, table 5-2 */
1909 	for (pin = 0; pin < 16; ++pin) {
1910 		io_apic_ints[pin].int_type = 0;
1911 		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1912 		io_apic_ints[pin].src_bus_id = 0;
1913 		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1914 		io_apic_ints[pin].dst_apic_id = io_apic_id;
1915 		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1916 	}
1917 
1918 	/* special cases from MP v1.4, table 5-2 */
1919 	if (type == 2) {
1920 		io_apic_ints[2].int_type = 0xff;	/* N/C */
1921 		io_apic_ints[13].int_type = 0xff;	/* N/C */
1922 #if !defined(APIC_MIXED_MODE)
1923 		/** FIXME: ??? */
1924 		panic("sorry, can't support type 2 default yet");
1925 #endif	/* APIC_MIXED_MODE */
1926 	}
1927 	else
1928 		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1929 
1930 	if (type == 7)
1931 		io_apic_ints[0].int_type = 0xff;	/* N/C */
1932 	else
1933 		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1934 #endif	/* APIC_IO */
1935 }
1936 
1937 
1938 /*
1939  * start each AP in our list
1940  */
1941 static int
1942 start_all_aps(u_int boot_addr)
1943 {
1944 	int     x, i, pg;
1945 	u_char  mpbiosreason;
1946 	u_long  mpbioswarmvec;
1947 	struct globaldata *gd;
1948 	char *stack;
1949 
1950 	POSTCODE(START_ALL_APS_POST);
1951 
1952 	/* initialize BSP's local APIC */
1953 	apic_initialize();
1954 	bsp_apic_ready = 1;
1955 
1956 	/* install the AP 1st level boot code */
1957 	install_ap_tramp(boot_addr);
1958 
1959 
1960 	/* save the current value of the warm-start vector */
1961 	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1962 #ifndef PC98
1963 	outb(CMOS_REG, BIOS_RESET);
1964 	mpbiosreason = inb(CMOS_DATA);
1965 #endif
1966 
1967 	/* record BSP in CPU map */
1968 	all_cpus = 1;
1969 
1970 	/* set up 0 -> 4MB P==V mapping for AP boot */
1971 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1972 	invltlb();
1973 
1974 	/* start each AP */
1975 	for (x = 1; x <= mp_naps; ++x) {
1976 
1977 		/* This is a bit verbose, it will go away soon.  */
1978 
1979 		/* first page of AP's private space */
1980 		pg = x * i386_btop(sizeof(struct privatespace));
1981 
1982 		/* allocate a new private data page */
1983 		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1984 
1985 		/* wire it into the private page table page */
1986 		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1987 
1988 		/* allocate and set up an idle stack data page */
1989 		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1990 		for (i = 0; i < UPAGES; i++)
1991 			SMPpt[pg + 1 + i] = (pt_entry_t)
1992 			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1993 
1994 		/* prime data page for it to use */
1995 		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
1996 		gd->gd_cpuid = x;
1997 
1998 		/* setup a vector to our boot code */
1999 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2000 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2001 #ifndef PC98
2002 		outb(CMOS_REG, BIOS_RESET);
2003 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2004 #endif
2005 
2006 		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2007 		bootAP = x;
2008 
2009 		/* attempt to start the Application Processor */
2010 		CHECK_INIT(99);	/* setup checkpoints */
2011 		if (!start_ap(x, boot_addr)) {
2012 			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2013 			CHECK_PRINT("trace");	/* show checkpoints */
2014 			/* better panic as the AP may be running loose */
2015 			printf("panic y/n? [y] ");
2016 			if (cngetc() != 'n')
2017 				panic("bye-bye");
2018 		}
2019 		CHECK_PRINT("trace");		/* show checkpoints */
2020 
2021 		/* record its version info */
2022 		cpu_apic_versions[x] = cpu_apic_versions[0];
2023 
2024 		all_cpus |= (1 << x);		/* record AP in CPU map */
2025 	}
2026 
2027 	/* build our map of 'other' CPUs */
2028 	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2029 
2030 	/* fill in our (BSP) APIC version */
2031 	cpu_apic_versions[0] = lapic.version;
2032 
2033 	/* restore the warmstart vector */
2034 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2035 #ifndef PC98
2036 	outb(CMOS_REG, BIOS_RESET);
2037 	outb(CMOS_DATA, mpbiosreason);
2038 #endif
2039 
2040 	/*
2041 	 * Set up the idle context for the BSP.  Similar to above except
2042 	 * that some was done by locore, some by pmap.c and some is implicit
2043 	 * because the BSP is cpu#0 and the page is initially zero, and also
2044 	 * because we can refer to variables by name on the BSP..
2045 	 */
2046 
2047 	/* Allocate and setup BSP idle stack */
2048 	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2049 	for (i = 0; i < UPAGES; i++)
2050 		SMPpt[1 + i] = (pt_entry_t)
2051 		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2052 
2053 	*(int *)PTD = 0;
2054 	pmap_set_opt();
2055 
2056 	/* number of APs actually started */
2057 	return mp_ncpus - 1;
2058 }
2059 
2060 
2061 /*
2062  * load the 1st level AP boot code into base memory.
2063  */
2064 
2065 /* targets for relocation */
2066 extern void bigJump(void);
2067 extern void bootCodeSeg(void);
2068 extern void bootDataSeg(void);
2069 extern void MPentry(void);
2070 extern u_int MP_GDT;
2071 extern u_int mp_gdtbase;
2072 
2073 static void
2074 install_ap_tramp(u_int boot_addr)
2075 {
2076 	int     x;
2077 	int     size = *(int *) ((u_long) & bootMP_size);
2078 	u_char *src = (u_char *) ((u_long) bootMP);
2079 	u_char *dst = (u_char *) boot_addr + KERNBASE;
2080 	u_int   boot_base = (u_int) bootMP;
2081 	u_int8_t *dst8;
2082 	u_int16_t *dst16;
2083 	u_int32_t *dst32;
2084 
2085 	POSTCODE(INSTALL_AP_TRAMP_POST);
2086 
2087 	for (x = 0; x < size; ++x)
2088 		*dst++ = *src++;
2089 
2090 	/*
2091 	 * modify addresses in code we just moved to basemem. unfortunately we
2092 	 * need fairly detailed info about mpboot.s for this to work.  changes
2093 	 * to mpboot.s might require changes here.
2094 	 */
2095 
2096 	/* boot code is located in KERNEL space */
2097 	dst = (u_char *) boot_addr + KERNBASE;
2098 
2099 	/* modify the lgdt arg */
2100 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2101 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2102 
2103 	/* modify the ljmp target for MPentry() */
2104 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2105 	*dst32 = ((u_int) MPentry - KERNBASE);
2106 
2107 	/* modify the target for boot code segment */
2108 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2109 	dst8 = (u_int8_t *) (dst16 + 1);
2110 	*dst16 = (u_int) boot_addr & 0xffff;
2111 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2112 
2113 	/* modify the target for boot data segment */
2114 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2115 	dst8 = (u_int8_t *) (dst16 + 1);
2116 	*dst16 = (u_int) boot_addr & 0xffff;
2117 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2118 }
2119 
2120 
2121 /*
2122  * this function starts the AP (application processor) identified
2123  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2124  * to accomplish this.  This is necessary because of the nuances
2125  * of the different hardware we might encounter.  It ain't pretty,
2126  * but it seems to work.
2127  */
2128 static int
2129 start_ap(int logical_cpu, u_int boot_addr)
2130 {
2131 	int     physical_cpu;
2132 	int     vector;
2133 	int     cpus;
2134 	u_long  icr_lo, icr_hi;
2135 
2136 	POSTCODE(START_AP_POST);
2137 
2138 	/* get the PHYSICAL APIC ID# */
2139 	physical_cpu = CPU_TO_ID(logical_cpu);
2140 
2141 	/* calculate the vector */
2142 	vector = (boot_addr >> 12) & 0xff;
2143 
2144 	/* used as a watchpoint to signal AP startup */
2145 	cpus = mp_ncpus;
2146 
2147 	/*
2148 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2149 	 * and running the target CPU. OR this INIT IPI might be latched (P5
2150 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2151 	 * ignored.
2152 	 */
2153 
2154 	/* setup the address for the target AP */
2155 	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2156 	icr_hi |= (physical_cpu << 24);
2157 	lapic.icr_hi = icr_hi;
2158 
2159 	/* do an INIT IPI: assert RESET */
2160 	icr_lo = lapic.icr_lo & 0xfff00000;
2161 	lapic.icr_lo = icr_lo | 0x0000c500;
2162 
2163 	/* wait for pending status end */
2164 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2165 		 /* spin */ ;
2166 
2167 	/* do an INIT IPI: deassert RESET */
2168 	lapic.icr_lo = icr_lo | 0x00008500;
2169 
2170 	/* wait for pending status end */
2171 	u_sleep(10000);		/* wait ~10mS */
2172 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2173 		 /* spin */ ;
2174 
2175 	/*
2176 	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2177 	 * latched, (P5 bug) this 1st STARTUP would then terminate
2178 	 * immediately, and the previously started INIT IPI would continue. OR
2179 	 * the previous INIT IPI has already run. and this STARTUP IPI will
2180 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2181 	 * will run.
2182 	 */
2183 
2184 	/* do a STARTUP IPI */
2185 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2186 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2187 		 /* spin */ ;
2188 	u_sleep(200);		/* wait ~200uS */
2189 
2190 	/*
2191 	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2192 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2193 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2194 	 * recognized after hardware RESET or INIT IPI.
2195 	 */
2196 
2197 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2198 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2199 		 /* spin */ ;
2200 	u_sleep(200);		/* wait ~200uS */
2201 
2202 	/* wait for it to start */
2203 	set_apic_timer(5000000);/* == 5 seconds */
2204 	while (read_apic_timer())
2205 		if (mp_ncpus > cpus)
2206 			return 1;	/* return SUCCESS */
2207 
2208 	return 0;		/* return FAILURE */
2209 }
2210 
2211 /*
2212  * Flush the TLB on all other CPU's
2213  *
2214  * XXX: Needs to handshake and wait for completion before proceding.
2215  */
2216 void
2217 smp_invltlb(void)
2218 {
2219 #if defined(APIC_IO)
2220 	if (smp_started && invltlb_ok)
2221 		all_but_self_ipi(XINVLTLB_OFFSET);
2222 #endif  /* APIC_IO */
2223 }
2224 
2225 void
2226 invlpg(u_int addr)
2227 {
2228 	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2229 
2230 	/* send a message to the other CPUs */
2231 	smp_invltlb();
2232 }
2233 
2234 void
2235 invltlb(void)
2236 {
2237 	u_long  temp;
2238 
2239 	/*
2240 	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2241 	 * inlined.
2242 	 */
2243 	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2244 
2245 	/* send a message to the other CPUs */
2246 	smp_invltlb();
2247 }
2248 
2249 
2250 /*
2251  * This is called once the rest of the system is up and running and we're
2252  * ready to let the AP's out of the pen.
2253  */
2254 void
2255 ap_init(void)
2256 {
2257 	u_int	apic_id;
2258 
2259 	/* spin until all the AP's are ready */
2260 	while (!aps_ready)
2261 		/* spin */ ;
2262 
2263 	/*
2264 	 * Set curproc to our per-cpu idleproc so that mutexes have
2265 	 * something unique to lock with.
2266 	 */
2267 	PCPU_SET(curproc, PCPU_GET(idleproc));
2268 
2269 	/* lock against other AP's that are waking up */
2270 	mtx_lock_spin(&ap_boot_mtx);
2271 
2272 	/* BSP may have changed PTD while we're waiting for the lock */
2273 	cpu_invltlb();
2274 
2275 	smp_cpus++;
2276 
2277 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2278 	lidt(&r_idt);
2279 #endif
2280 
2281 	/* Build our map of 'other' CPUs. */
2282 	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2283 
2284 	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2285 
2286 	/* set up CPU registers and state */
2287 	cpu_setregs();
2288 
2289 	/* set up FPU state on the AP */
2290 	npxinit(__INITIAL_NPXCW__);
2291 
2292 	/* A quick check from sanity claus */
2293 	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2294 	if (PCPU_GET(cpuid) != apic_id) {
2295 		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2296 		printf("SMP: apic_id = %d\n", apic_id);
2297 		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2298 		panic("cpuid mismatch! boom!!");
2299 	}
2300 
2301 	/* Init local apic for irq's */
2302 	apic_initialize();
2303 
2304 	/* Set memory range attributes for this CPU to match the BSP */
2305 	mem_range_AP_init();
2306 
2307 	/*
2308 	 * Activate smp_invltlb, although strictly speaking, this isn't
2309 	 * quite correct yet.  We should have a bitfield for cpus willing
2310 	 * to accept TLB flush IPI's or something and sync them.
2311 	 */
2312 	if (smp_cpus == mp_ncpus) {
2313 		invltlb_ok = 1;
2314 		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2315 		smp_active = 1;	 /* historic */
2316 	}
2317 
2318 	/* let other AP's wake up now */
2319 	mtx_unlock_spin(&ap_boot_mtx);
2320 
2321 	/* wait until all the AP's are up */
2322 	while (smp_started == 0)
2323 		; /* nothing */
2324 
2325 	microuptime(PCPU_PTR(switchtime));
2326 	PCPU_SET(switchticks, ticks);
2327 
2328 	/* ok, now grab sched_lock and enter the scheduler */
2329 	enable_intr();
2330 	mtx_lock_spin(&sched_lock);
2331 	cpu_throw();	/* doesn't return */
2332 
2333 	panic("scheduler returned us to ap_init");
2334 }
2335 
2336 #ifdef BETTER_CLOCK
2337 
2338 #define CHECKSTATE_USER	0
2339 #define CHECKSTATE_SYS	1
2340 #define CHECKSTATE_INTR	2
2341 
2342 /* Do not staticize.  Used from apic_vector.s */
2343 struct proc*	checkstate_curproc[MAXCPU];
2344 int		checkstate_cpustate[MAXCPU];
2345 u_long		checkstate_pc[MAXCPU];
2346 
2347 #define PC_TO_INDEX(pc, prof)				\
2348         ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2349             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2350 
2351 static void
2352 addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2353 {
2354 	int i;
2355 	struct uprof *prof;
2356 	u_long pc;
2357 
2358 	pc = checkstate_pc[id];
2359 	prof = &p->p_stats->p_prof;
2360 	if (pc >= prof->pr_off &&
2361 	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2362 		mtx_assert(&sched_lock, MA_OWNED);
2363 		if ((p->p_sflag & PS_OWEUPC) == 0) {
2364 			prof->pr_addr = pc;
2365 			prof->pr_ticks = 1;
2366 			p->p_sflag |= PS_OWEUPC;
2367 		}
2368 		*astmap |= (1 << id);
2369 	}
2370 }
2371 
2372 static void
2373 forwarded_statclock(int id, int pscnt, int *astmap)
2374 {
2375 	struct pstats *pstats;
2376 	long rss;
2377 	struct rusage *ru;
2378 	struct vmspace *vm;
2379 	int cpustate;
2380 	struct proc *p;
2381 #ifdef GPROF
2382 	register struct gmonparam *g;
2383 	int i;
2384 #endif
2385 
2386 	mtx_assert(&sched_lock, MA_OWNED);
2387 	p = checkstate_curproc[id];
2388 	cpustate = checkstate_cpustate[id];
2389 
2390 	/* XXX */
2391 	if (p->p_ithd)
2392 		cpustate = CHECKSTATE_INTR;
2393 	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2394 		cpustate = CHECKSTATE_SYS;
2395 
2396 	switch (cpustate) {
2397 	case CHECKSTATE_USER:
2398 		if (p->p_sflag & PS_PROFIL)
2399 			addupc_intr_forwarded(p, id, astmap);
2400 		if (pscnt > 1)
2401 			return;
2402 		p->p_uticks++;
2403 		if (p->p_nice > NZERO)
2404 			cp_time[CP_NICE]++;
2405 		else
2406 			cp_time[CP_USER]++;
2407 		break;
2408 	case CHECKSTATE_SYS:
2409 #ifdef GPROF
2410 		/*
2411 		 * Kernel statistics are just like addupc_intr, only easier.
2412 		 */
2413 		g = &_gmonparam;
2414 		if (g->state == GMON_PROF_ON) {
2415 			i = checkstate_pc[id] - g->lowpc;
2416 			if (i < g->textsize) {
2417 				i /= HISTFRACTION * sizeof(*g->kcount);
2418 				g->kcount[i]++;
2419 			}
2420 		}
2421 #endif
2422 		if (pscnt > 1)
2423 			return;
2424 
2425 		p->p_sticks++;
2426 		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2427 			cp_time[CP_IDLE]++;
2428 		else
2429 			cp_time[CP_SYS]++;
2430 		break;
2431 	case CHECKSTATE_INTR:
2432 	default:
2433 #ifdef GPROF
2434 		/*
2435 		 * Kernel statistics are just like addupc_intr, only easier.
2436 		 */
2437 		g = &_gmonparam;
2438 		if (g->state == GMON_PROF_ON) {
2439 			i = checkstate_pc[id] - g->lowpc;
2440 			if (i < g->textsize) {
2441 				i /= HISTFRACTION * sizeof(*g->kcount);
2442 				g->kcount[i]++;
2443 			}
2444 		}
2445 #endif
2446 		if (pscnt > 1)
2447 			return;
2448 		KASSERT(p != NULL, ("NULL process in interrupt state"));
2449 		p->p_iticks++;
2450 		cp_time[CP_INTR]++;
2451 	}
2452 
2453 	schedclock(p);
2454 
2455 	/* Update resource usage integrals and maximums. */
2456 	if ((pstats = p->p_stats) != NULL &&
2457 	    (ru = &pstats->p_ru) != NULL &&
2458 	    (vm = p->p_vmspace) != NULL) {
2459 		ru->ru_ixrss += pgtok(vm->vm_tsize);
2460 		ru->ru_idrss += pgtok(vm->vm_dsize);
2461 		ru->ru_isrss += pgtok(vm->vm_ssize);
2462 		rss = pgtok(vmspace_resident_count(vm));
2463 		if (ru->ru_maxrss < rss)
2464 			ru->ru_maxrss = rss;
2465 	}
2466 }
2467 
2468 void
2469 forward_statclock(int pscnt)
2470 {
2471 	int map;
2472 	int id;
2473 	int i;
2474 
2475 	/* Kludge. We don't yet have separate locks for the interrupts
2476 	 * and the kernel. This means that we cannot let the other processors
2477 	 * handle complex interrupts while inhibiting them from entering
2478 	 * the kernel in a non-interrupt context.
2479 	 *
2480 	 * What we can do, without changing the locking mechanisms yet,
2481 	 * is letting the other processors handle a very simple interrupt
2482 	 * (wich determines the processor states), and do the main
2483 	 * work ourself.
2484 	 */
2485 
2486 	CTR1(KTR_SMP, "forward_statclock(%d)", pscnt);
2487 
2488 	if (!smp_started || !invltlb_ok || cold || panicstr)
2489 		return;
2490 
2491 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2492 
2493 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2494 	checkstate_probed_cpus = 0;
2495 	if (map != 0)
2496 		selected_apic_ipi(map,
2497 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2498 
2499 	i = 0;
2500 	while (checkstate_probed_cpus != map) {
2501 		/* spin */
2502 		i++;
2503 		if (i == 100000) {
2504 #ifdef BETTER_CLOCK_DIAGNOSTIC
2505 			printf("forward_statclock: checkstate %x\n",
2506 			       checkstate_probed_cpus);
2507 #endif
2508 			break;
2509 		}
2510 	}
2511 
2512 	/*
2513 	 * Step 2: walk through other processors processes, update ticks and
2514 	 * profiling info.
2515 	 */
2516 
2517 	map = 0;
2518 	for (id = 0; id < mp_ncpus; id++) {
2519 		if (id == PCPU_GET(cpuid))
2520 			continue;
2521 		if (((1 << id) & checkstate_probed_cpus) == 0)
2522 			continue;
2523 		forwarded_statclock(id, pscnt, &map);
2524 	}
2525 	if (map != 0) {
2526 		checkstate_need_ast |= map;
2527 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2528 		i = 0;
2529 		while ((checkstate_need_ast & map) != 0) {
2530 			/* spin */
2531 			i++;
2532 			if (i > 100000) {
2533 #ifdef BETTER_CLOCK_DIAGNOSTIC
2534 				printf("forward_statclock: dropped ast 0x%x\n",
2535 				       checkstate_need_ast & map);
2536 #endif
2537 				break;
2538 			}
2539 		}
2540 	}
2541 }
2542 
2543 void
2544 forward_hardclock(int pscnt)
2545 {
2546 	int map;
2547 	int id;
2548 	struct proc *p;
2549 	struct pstats *pstats;
2550 	int i;
2551 
2552 	/* Kludge. We don't yet have separate locks for the interrupts
2553 	 * and the kernel. This means that we cannot let the other processors
2554 	 * handle complex interrupts while inhibiting them from entering
2555 	 * the kernel in a non-interrupt context.
2556 	 *
2557 	 * What we can do, without changing the locking mechanisms yet,
2558 	 * is letting the other processors handle a very simple interrupt
2559 	 * (wich determines the processor states), and do the main
2560 	 * work ourself.
2561 	 */
2562 
2563 	CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt);
2564 
2565 	if (!smp_started || !invltlb_ok || cold || panicstr)
2566 		return;
2567 
2568 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2569 
2570 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2571 	checkstate_probed_cpus = 0;
2572 	if (map != 0)
2573 		selected_apic_ipi(map,
2574 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2575 
2576 	i = 0;
2577 	while (checkstate_probed_cpus != map) {
2578 		/* spin */
2579 		i++;
2580 		if (i == 100000) {
2581 #ifdef BETTER_CLOCK_DIAGNOSTIC
2582 			printf("forward_hardclock: checkstate %x\n",
2583 			       checkstate_probed_cpus);
2584 #endif
2585 			break;
2586 		}
2587 	}
2588 
2589 	/*
2590 	 * Step 2: walk through other processors processes, update virtual
2591 	 * timer and profiling timer. If stathz == 0, also update ticks and
2592 	 * profiling info.
2593 	 */
2594 
2595 	map = 0;
2596 	for (id = 0; id < mp_ncpus; id++) {
2597 		if (id == PCPU_GET(cpuid))
2598 			continue;
2599 		if (((1 << id) & checkstate_probed_cpus) == 0)
2600 			continue;
2601 		p = checkstate_curproc[id];
2602 		if (p) {
2603 			pstats = p->p_stats;
2604 			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2605 			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2606 			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2607 				p->p_sflag |= PS_ALRMPEND;
2608 				map |= (1 << id);
2609 			}
2610 			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2611 			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2612 				p->p_sflag |= PS_PROFPEND;
2613 				map |= (1 << id);
2614 			}
2615 		}
2616 		if (stathz == 0) {
2617 			forwarded_statclock( id, pscnt, &map);
2618 		}
2619 	}
2620 	if (map != 0) {
2621 		checkstate_need_ast |= map;
2622 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2623 		i = 0;
2624 		while ((checkstate_need_ast & map) != 0) {
2625 			/* spin */
2626 			i++;
2627 			if (i > 100000) {
2628 #ifdef BETTER_CLOCK_DIAGNOSTIC
2629 				printf("forward_hardclock: dropped ast 0x%x\n",
2630 				       checkstate_need_ast & map);
2631 #endif
2632 				break;
2633 			}
2634 		}
2635 	}
2636 }
2637 
2638 #endif /* BETTER_CLOCK */
2639 
2640 void
2641 forward_signal(struct proc *p)
2642 {
2643 	int map;
2644 	int id;
2645 	int i;
2646 
2647 	/* Kludge. We don't yet have separate locks for the interrupts
2648 	 * and the kernel. This means that we cannot let the other processors
2649 	 * handle complex interrupts while inhibiting them from entering
2650 	 * the kernel in a non-interrupt context.
2651 	 *
2652 	 * What we can do, without changing the locking mechanisms yet,
2653 	 * is letting the other processors handle a very simple interrupt
2654 	 * (wich determines the processor states), and do the main
2655 	 * work ourself.
2656 	 */
2657 
2658 	CTR1(KTR_SMP, "forward_signal(%p)", p);
2659 
2660 	if (!smp_started || !invltlb_ok || cold || panicstr)
2661 		return;
2662 	if (!forward_signal_enabled)
2663 		return;
2664 	mtx_lock_spin(&sched_lock);
2665 	while (1) {
2666 		if (p->p_stat != SRUN) {
2667 			mtx_unlock_spin(&sched_lock);
2668 			return;
2669 		}
2670 		id = p->p_oncpu;
2671 		mtx_unlock_spin(&sched_lock);
2672 		if (id == 0xff)
2673 			return;
2674 		map = (1<<id);
2675 		checkstate_need_ast |= map;
2676 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2677 		i = 0;
2678 		while ((checkstate_need_ast & map) != 0) {
2679 			/* spin */
2680 			i++;
2681 			if (i > 100000) {
2682 #if 0
2683 				printf("forward_signal: dropped ast 0x%x\n",
2684 				       checkstate_need_ast & map);
2685 #endif
2686 				break;
2687 			}
2688 		}
2689 		mtx_lock_spin(&sched_lock);
2690 		if (id == p->p_oncpu) {
2691 			mtx_unlock_spin(&sched_lock);
2692 			return;
2693 		}
2694 	}
2695 }
2696 
2697 void
2698 forward_roundrobin(void)
2699 {
2700 	u_int map;
2701 	int i;
2702 
2703 	CTR0(KTR_SMP, "forward_roundrobin()");
2704 
2705 	if (!smp_started || !invltlb_ok || cold || panicstr)
2706 		return;
2707 	if (!forward_roundrobin_enabled)
2708 		return;
2709 	resched_cpus |= PCPU_GET(other_cpus);
2710 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2711 #if 1
2712 	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2713 #else
2714 	(void) all_but_self_ipi(XCPUAST_OFFSET);
2715 #endif
2716 	i = 0;
2717 	while ((checkstate_need_ast & map) != 0) {
2718 		/* spin */
2719 		i++;
2720 		if (i > 100000) {
2721 #if 0
2722 			printf("forward_roundrobin: dropped ast 0x%x\n",
2723 			       checkstate_need_ast & map);
2724 #endif
2725 			break;
2726 		}
2727 	}
2728 }
2729 
2730 /*
2731  * When called the executing CPU will send an IPI to all other CPUs
2732  *  requesting that they halt execution.
2733  *
2734  * Usually (but not necessarily) called with 'other_cpus' as its arg.
2735  *
2736  *  - Signals all CPUs in map to stop.
2737  *  - Waits for each to stop.
2738  *
2739  * Returns:
2740  *  -1: error
2741  *   0: NA
2742  *   1: ok
2743  *
2744  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2745  *            from executing at same time.
2746  */
2747 int
2748 stop_cpus(u_int map)
2749 {
2750 	int count = 0;
2751 
2752 	if (!smp_started)
2753 		return 0;
2754 
2755 	/* send the Xcpustop IPI to all CPUs in map */
2756 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2757 
2758 	while (count++ < 100000 && (stopped_cpus & map) != map)
2759 		/* spin */ ;
2760 
2761 #ifdef DIAGNOSTIC
2762 	if ((stopped_cpus & map) != map)
2763 		printf("Warning: CPUs 0x%x did not stop!\n",
2764 		    (~(stopped_cpus & map)) & map);
2765 #endif
2766 
2767 	return 1;
2768 }
2769 
2770 
2771 /*
2772  * Called by a CPU to restart stopped CPUs.
2773  *
2774  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2775  *
2776  *  - Signals all CPUs in map to restart.
2777  *  - Waits for each to restart.
2778  *
2779  * Returns:
2780  *  -1: error
2781  *   0: NA
2782  *   1: ok
2783  */
2784 int
2785 restart_cpus(u_int map)
2786 {
2787 	int count = 0;
2788 
2789 	if (!smp_started)
2790 		return 0;
2791 
2792 	started_cpus = map;		/* signal other cpus to restart */
2793 
2794 	/* wait for each to clear its bit */
2795 	while (count++ < 100000 && (stopped_cpus & map) != 0)
2796 		/* spin */ ;
2797 
2798 #ifdef DIAGNOSTIC
2799 	if ((stopped_cpus & map) != 0)
2800 		printf("Warning: CPUs 0x%x did not restart!\n",
2801 		    (~(stopped_cpus & map)) & map);
2802 #endif
2803 
2804 	return 1;
2805 }
2806 
2807 
2808 #ifdef APIC_INTR_REORDER
2809 /*
2810  *	Maintain mapping from softintr vector to isr bit in local apic.
2811  */
2812 void
2813 set_lapic_isrloc(int intr, int vector)
2814 {
2815 	if (intr < 0 || intr > 32)
2816 		panic("set_apic_isrloc: bad intr argument: %d",intr);
2817 	if (vector < ICU_OFFSET || vector > 255)
2818 		panic("set_apic_isrloc: bad vector argument: %d",vector);
2819 	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2820 	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2821 }
2822 #endif
2823 
2824 /*
2825  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2826  * (if specified), rendezvous, execute the action function (if specified),
2827  * rendezvous again, execute the teardown function (if specified), and then
2828  * resume.
2829  *
2830  * Note that the supplied external functions _must_ be reentrant and aware
2831  * that they are running in parallel and in an unknown lock context.
2832  */
2833 static void (*smp_rv_setup_func)(void *arg);
2834 static void (*smp_rv_action_func)(void *arg);
2835 static void (*smp_rv_teardown_func)(void *arg);
2836 static void *smp_rv_func_arg;
2837 static volatile int smp_rv_waiters[2];
2838 
2839 void
2840 smp_rendezvous_action(void)
2841 {
2842 	/* setup function */
2843 	if (smp_rv_setup_func != NULL)
2844 		smp_rv_setup_func(smp_rv_func_arg);
2845 	/* spin on entry rendezvous */
2846 	atomic_add_int(&smp_rv_waiters[0], 1);
2847 	while (smp_rv_waiters[0] < mp_ncpus)
2848 		;
2849 	/* action function */
2850 	if (smp_rv_action_func != NULL)
2851 		smp_rv_action_func(smp_rv_func_arg);
2852 	/* spin on exit rendezvous */
2853 	atomic_add_int(&smp_rv_waiters[1], 1);
2854 	while (smp_rv_waiters[1] < mp_ncpus)
2855 		;
2856 	/* teardown function */
2857 	if (smp_rv_teardown_func != NULL)
2858 		smp_rv_teardown_func(smp_rv_func_arg);
2859 }
2860 
2861 void
2862 smp_rendezvous(void (* setup_func)(void *),
2863 	       void (* action_func)(void *),
2864 	       void (* teardown_func)(void *),
2865 	       void *arg)
2866 {
2867 
2868 	/* obtain rendezvous lock */
2869 	mtx_lock_spin(&smp_rv_mtx);
2870 
2871 	/* set static function pointers */
2872 	smp_rv_setup_func = setup_func;
2873 	smp_rv_action_func = action_func;
2874 	smp_rv_teardown_func = teardown_func;
2875 	smp_rv_func_arg = arg;
2876 	smp_rv_waiters[0] = 0;
2877 	smp_rv_waiters[1] = 0;
2878 
2879 	/*
2880 	 * signal other processors, which will enter the IPI with interrupts off
2881 	 */
2882 	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2883 
2884 	/* call executor function */
2885 	smp_rendezvous_action();
2886 
2887 	/* release lock */
2888 	mtx_unlock_spin(&smp_rv_mtx);
2889 }
2890 
2891 void
2892 release_aps(void *dummy __unused)
2893 {
2894 	atomic_store_rel_int(&aps_ready, 1);
2895 }
2896 
2897 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2898