xref: /freebsd/sys/kern/subr_smp.c (revision a1a4f1a0d87b594d3f17a97dc0127eec1417e6f6)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_smp.h"
29 #include "opt_cpu.h"
30 #include "opt_user_ldt.h"
31 
32 #ifdef SMP
33 #include <machine/smptests.h>
34 #else
35 #error
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #ifdef BETTER_CLOCK
46 #include <sys/dkstat.h>
47 #endif
48 #include <sys/cons.h>	/* cngetc() */
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_extern.h>
55 #ifdef BETTER_CLOCK
56 #include <sys/lock.h>
57 #include <vm/vm_map.h>
58 #include <sys/user.h>
59 #ifdef GPROF
60 #include <sys/gmon.h>
61 #endif
62 #endif
63 
64 #include <machine/smp.h>
65 #include <machine/apic.h>
66 #include <machine/atomic.h>
67 #include <machine/cpufunc.h>
68 #include <machine/mpapic.h>
69 #include <machine/psl.h>
70 #include <machine/segments.h>
71 #include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
72 #include <machine/tss.h>
73 #include <machine/specialreg.h>
74 #include <machine/cputypes.h>
75 #include <machine/globaldata.h>
76 
77 #if defined(APIC_IO)
78 #include <machine/md_var.h>		/* setidt() */
79 #include <i386/isa/icu.h>		/* IPIs */
80 #include <i386/isa/intr_machdep.h>	/* IPIs */
81 #endif	/* APIC_IO */
82 
83 #if defined(TEST_DEFAULT_CONFIG)
84 #define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85 #else
86 #define MPFPS_MPFB1	mpfps->mpfb1
87 #endif  /* TEST_DEFAULT_CONFIG */
88 
89 #define WARMBOOT_TARGET		0
90 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
91 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
92 
93 #ifdef PC98
94 #define BIOS_BASE		(0xe8000)
95 #define BIOS_SIZE		(0x18000)
96 #else
97 #define BIOS_BASE		(0xf0000)
98 #define BIOS_SIZE		(0x10000)
99 #endif
100 #define BIOS_COUNT		(BIOS_SIZE/4)
101 
102 #define CMOS_REG		(0x70)
103 #define CMOS_DATA		(0x71)
104 #define BIOS_RESET		(0x0f)
105 #define BIOS_WARM		(0x0a)
106 
107 #define PROCENTRY_FLAG_EN	0x01
108 #define PROCENTRY_FLAG_BP	0x02
109 #define IOAPICENTRY_FLAG_EN	0x01
110 
111 
112 /* MP Floating Pointer Structure */
113 typedef struct MPFPS {
114 	char    signature[4];
115 	void   *pap;
116 	u_char  length;
117 	u_char  spec_rev;
118 	u_char  checksum;
119 	u_char  mpfb1;
120 	u_char  mpfb2;
121 	u_char  mpfb3;
122 	u_char  mpfb4;
123 	u_char  mpfb5;
124 }      *mpfps_t;
125 
126 /* MP Configuration Table Header */
127 typedef struct MPCTH {
128 	char    signature[4];
129 	u_short base_table_length;
130 	u_char  spec_rev;
131 	u_char  checksum;
132 	u_char  oem_id[8];
133 	u_char  product_id[12];
134 	void   *oem_table_pointer;
135 	u_short oem_table_size;
136 	u_short entry_count;
137 	void   *apic_address;
138 	u_short extended_table_length;
139 	u_char  extended_table_checksum;
140 	u_char  reserved;
141 }      *mpcth_t;
142 
143 
144 typedef struct PROCENTRY {
145 	u_char  type;
146 	u_char  apic_id;
147 	u_char  apic_version;
148 	u_char  cpu_flags;
149 	u_long  cpu_signature;
150 	u_long  feature_flags;
151 	u_long  reserved1;
152 	u_long  reserved2;
153 }      *proc_entry_ptr;
154 
155 typedef struct BUSENTRY {
156 	u_char  type;
157 	u_char  bus_id;
158 	char    bus_type[6];
159 }      *bus_entry_ptr;
160 
161 typedef struct IOAPICENTRY {
162 	u_char  type;
163 	u_char  apic_id;
164 	u_char  apic_version;
165 	u_char  apic_flags;
166 	void   *apic_address;
167 }      *io_apic_entry_ptr;
168 
169 typedef struct INTENTRY {
170 	u_char  type;
171 	u_char  int_type;
172 	u_short int_flags;
173 	u_char  src_bus_id;
174 	u_char  src_bus_irq;
175 	u_char  dst_apic_id;
176 	u_char  dst_apic_int;
177 }      *int_entry_ptr;
178 
179 /* descriptions of MP basetable entries */
180 typedef struct BASETABLE_ENTRY {
181 	u_char  type;
182 	u_char  length;
183 	char    name[16];
184 }       basetable_entry;
185 
186 /*
187  * this code MUST be enabled here and in mpboot.s.
188  * it follows the very early stages of AP boot by placing values in CMOS ram.
189  * it NORMALLY will never be needed and thus the primitive method for enabling.
190  *
191 #define CHECK_POINTS
192  */
193 
194 #if defined(CHECK_POINTS) && !defined(PC98)
195 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197 
198 #define CHECK_INIT(D);				\
199 	CHECK_WRITE(0x34, (D));			\
200 	CHECK_WRITE(0x35, (D));			\
201 	CHECK_WRITE(0x36, (D));			\
202 	CHECK_WRITE(0x37, (D));			\
203 	CHECK_WRITE(0x38, (D));			\
204 	CHECK_WRITE(0x39, (D));
205 
206 #define CHECK_PRINT(S);				\
207 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208 	   (S),					\
209 	   CHECK_READ(0x34),			\
210 	   CHECK_READ(0x35),			\
211 	   CHECK_READ(0x36),			\
212 	   CHECK_READ(0x37),			\
213 	   CHECK_READ(0x38),			\
214 	   CHECK_READ(0x39));
215 
216 #else				/* CHECK_POINTS */
217 
218 #define CHECK_INIT(D)
219 #define CHECK_PRINT(S)
220 
221 #endif				/* CHECK_POINTS */
222 
223 /*
224  * Values to send to the POST hardware.
225  */
226 #define MP_BOOTADDRESS_POST	0x10
227 #define MP_PROBE_POST		0x11
228 #define MPTABLE_PASS1_POST	0x12
229 
230 #define MP_START_POST		0x13
231 #define MP_ENABLE_POST		0x14
232 #define MPTABLE_PASS2_POST	0x15
233 
234 #define START_ALL_APS_POST	0x16
235 #define INSTALL_AP_TRAMP_POST	0x17
236 #define START_AP_POST		0x18
237 
238 #define MP_ANNOUNCE_POST	0x19
239 
240 
241 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
242 int	current_postcode;
243 
244 /** XXX FIXME: what system files declare these??? */
245 extern struct region_descriptor r_gdt, r_idt;
246 
247 int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
248 int	mp_ncpus;		/* # of CPUs, including BSP */
249 int	mp_naps;		/* # of Applications processors */
250 int	mp_nbusses;		/* # of busses */
251 int	mp_napics;		/* # of IO APICs */
252 int	boot_cpu_id;		/* designated BSP */
253 vm_offset_t cpu_apic_address;
254 vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
255 extern	int nkpt;
256 
257 u_int32_t cpu_apic_versions[NCPU];
258 u_int32_t io_apic_versions[NAPIC];
259 
260 #ifdef APIC_INTR_DIAGNOSTIC
261 int apic_itrace_enter[32];
262 int apic_itrace_tryisrlock[32];
263 int apic_itrace_gotisrlock[32];
264 int apic_itrace_active[32];
265 int apic_itrace_masked[32];
266 int apic_itrace_noisrlock[32];
267 int apic_itrace_masked2[32];
268 int apic_itrace_unmask[32];
269 int apic_itrace_noforward[32];
270 int apic_itrace_leave[32];
271 int apic_itrace_enter2[32];
272 int apic_itrace_doreti[32];
273 int apic_itrace_splz[32];
274 int apic_itrace_eoi[32];
275 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
276 unsigned short apic_itrace_debugbuffer[32768];
277 int apic_itrace_debugbuffer_idx;
278 struct simplelock apic_itrace_debuglock;
279 #endif
280 #endif
281 
282 #ifdef APIC_INTR_REORDER
283 struct {
284 	volatile int *location;
285 	int bit;
286 } apic_isrbit_location[32];
287 #endif
288 
289 struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
290 
291 /*
292  * APIC ID logical/physical mapping structures.
293  * We oversize these to simplify boot-time config.
294  */
295 int     cpu_num_to_apic_id[NAPICID];
296 int     io_num_to_apic_id[NAPICID];
297 int     apic_id_to_logical[NAPICID];
298 
299 
300 /* Bitmap of all available CPUs */
301 u_int	all_cpus;
302 
303 /* AP uses this during bootstrap.  Do not staticize.  */
304 char *bootSTK;
305 static int bootAP;
306 
307 /* Hotwire a 0->4MB V==P mapping */
308 extern pt_entry_t *KPTphys;
309 
310 /* SMP page table page */
311 extern pt_entry_t *SMPpt;
312 
313 struct pcb stoppcbs[NCPU];
314 
315 int smp_started;		/* has the system started? */
316 
317 /*
318  * Local data and functions.
319  */
320 
321 static int	mp_capable;
322 static u_int	boot_address;
323 static u_int	base_memory;
324 
325 static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
326 static mpfps_t	mpfps;
327 static int	search_for_sig(u_int32_t target, int count);
328 static void	mp_enable(u_int boot_addr);
329 
330 static int	mptable_pass1(void);
331 static int	mptable_pass2(void);
332 static void	default_mp_table(int type);
333 static void	fix_mp_table(void);
334 static void	setup_apic_irq_mapping(void);
335 static void	init_locks(void);
336 static int	start_all_aps(u_int boot_addr);
337 static void	install_ap_tramp(u_int boot_addr);
338 static int	start_ap(int logicalCpu, u_int boot_addr);
339 
340 /*
341  * Calculate usable address in base memory for AP trampoline code.
342  */
343 u_int
344 mp_bootaddress(u_int basemem)
345 {
346 	POSTCODE(MP_BOOTADDRESS_POST);
347 
348 	base_memory = basemem * 1024;	/* convert to bytes */
349 
350 	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
351 	if ((base_memory - boot_address) < bootMP_size)
352 		boot_address -= 4096;	/* not enough, lower by 4k */
353 
354 	return boot_address;
355 }
356 
357 
358 /*
359  * Look for an Intel MP spec table (ie, SMP capable hardware).
360  */
361 int
362 mp_probe(void)
363 {
364 	int     x;
365 	u_long  segment;
366 	u_int32_t target;
367 
368 	POSTCODE(MP_PROBE_POST);
369 
370 	/* see if EBDA exists */
371 	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
372 		/* search first 1K of EBDA */
373 		target = (u_int32_t) (segment << 4);
374 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
375 			goto found;
376 	} else {
377 		/* last 1K of base memory, effective 'top of base' passed in */
378 		target = (u_int32_t) (base_memory - 0x400);
379 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
380 			goto found;
381 	}
382 
383 	/* search the BIOS */
384 	target = (u_int32_t) BIOS_BASE;
385 	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
386 		goto found;
387 
388 	/* nothing found */
389 	mpfps = (mpfps_t)0;
390 	mp_capable = 0;
391 	return 0;
392 
393 found:
394 	/* calculate needed resources */
395 	mpfps = (mpfps_t)x;
396 	if (mptable_pass1())
397 		panic("you must reconfigure your kernel");
398 
399 	/* flag fact that we are running multiple processors */
400 	mp_capable = 1;
401 	return 1;
402 }
403 
404 
405 /*
406  * Startup the SMP processors.
407  */
408 void
409 mp_start(void)
410 {
411 	POSTCODE(MP_START_POST);
412 
413 	/* look for MP capable motherboard */
414 	if (mp_capable)
415 		mp_enable(boot_address);
416 	else
417 		panic("MP hardware not found!");
418 }
419 
420 
421 /*
422  * Print various information about the SMP system hardware and setup.
423  */
424 void
425 mp_announce(void)
426 {
427 	int     x;
428 
429 	POSTCODE(MP_ANNOUNCE_POST);
430 
431 	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
432 	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
433 	printf(", version: 0x%08x", cpu_apic_versions[0]);
434 	printf(", at 0x%08x\n", cpu_apic_address);
435 	for (x = 1; x <= mp_naps; ++x) {
436 		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
437 		printf(", version: 0x%08x", cpu_apic_versions[x]);
438 		printf(", at 0x%08x\n", cpu_apic_address);
439 	}
440 
441 #if defined(APIC_IO)
442 	for (x = 0; x < mp_napics; ++x) {
443 		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
444 		printf(", version: 0x%08x", io_apic_versions[x]);
445 		printf(", at 0x%08x\n", io_apic_address[x]);
446 	}
447 #else
448 	printf(" Warning: APIC I/O disabled\n");
449 #endif	/* APIC_IO */
450 }
451 
452 /*
453  * AP cpu's call this to sync up protected mode.
454  */
455 void
456 init_secondary(void)
457 {
458 	int	gsel_tss;
459 	int	x, myid = bootAP;
460 
461 	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
462 	gdt_segs[GPROC0_SEL].ssd_base =
463 		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
464 	SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid];
465 
466 	for (x = 0; x < NGDT; x++) {
467 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
468 	}
469 
470 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
471 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
472 	lgdt(&r_gdt);			/* does magic intra-segment return */
473 
474 	lidt(&r_idt);
475 
476 	lldt(_default_ldt);
477 #ifdef USER_LDT
478 	currentldt = _default_ldt;
479 #endif
480 
481 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
482 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
483 	common_tss.tss_esp0 = 0;	/* not used until after switch */
484 	common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
485 	common_tss.tss_ioopt = (sizeof common_tss) << 16;
486 	tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
487 	common_tssd = *tss_gdt;
488 	ltr(gsel_tss);
489 
490 	load_cr0(0x8005003b);		/* XXX! */
491 
492 	pmap_set_opt();
493 }
494 
495 
496 #if defined(APIC_IO)
497 /*
498  * Final configuration of the BSP's local APIC:
499  *  - disable 'pic mode'.
500  *  - disable 'virtual wire mode'.
501  *  - enable NMI.
502  */
503 void
504 bsp_apic_configure(void)
505 {
506 	u_char		byte;
507 	u_int32_t	temp;
508 
509 	/* leave 'pic mode' if necessary */
510 	if (picmode) {
511 		outb(0x22, 0x70);	/* select IMCR */
512 		byte = inb(0x23);	/* current contents */
513 		byte |= 0x01;		/* mask external INTR */
514 		outb(0x23, byte);	/* disconnect 8259s/NMI */
515 	}
516 
517 	/* mask lint0 (the 8259 'virtual wire' connection) */
518 	temp = lapic.lvt_lint0;
519 	temp |= APIC_LVT_M;		/* set the mask */
520 	lapic.lvt_lint0 = temp;
521 
522         /* setup lint1 to handle NMI */
523         temp = lapic.lvt_lint1;
524         temp &= ~APIC_LVT_M;		/* clear the mask */
525         lapic.lvt_lint1 = temp;
526 
527 	if (bootverbose)
528 		apic_dump("bsp_apic_configure()");
529 }
530 #endif  /* APIC_IO */
531 
532 
533 /*******************************************************************
534  * local functions and data
535  */
536 
537 /*
538  * start the SMP system
539  */
540 static void
541 mp_enable(u_int boot_addr)
542 {
543 	int     x;
544 #if defined(APIC_IO)
545 	int     apic;
546 	u_int   ux;
547 #endif	/* APIC_IO */
548 
549 	POSTCODE(MP_ENABLE_POST);
550 
551 	/* turn on 4MB of V == P addressing so we can get to MP table */
552 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
553 	invltlb();
554 
555 	/* examine the MP table for needed info, uses physical addresses */
556 	x = mptable_pass2();
557 
558 	*(int *)PTD = 0;
559 	invltlb();
560 
561 	/* can't process default configs till the CPU APIC is pmapped */
562 	if (x)
563 		default_mp_table(x);
564 
565 	/* post scan cleanup */
566 	fix_mp_table();
567 	setup_apic_irq_mapping();
568 
569 #if defined(APIC_IO)
570 
571 	/* fill the LOGICAL io_apic_versions table */
572 	for (apic = 0; apic < mp_napics; ++apic) {
573 		ux = io_apic_read(apic, IOAPIC_VER);
574 		io_apic_versions[apic] = ux;
575 	}
576 
577 	/* program each IO APIC in the system */
578 	for (apic = 0; apic < mp_napics; ++apic)
579 		if (io_apic_setup(apic) < 0)
580 			panic("IO APIC setup failure");
581 
582 	/* install a 'Spurious INTerrupt' vector */
583 	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
584 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
585 
586 	/* install an inter-CPU IPI for TLB invalidation */
587 	setidt(XINVLTLB_OFFSET, Xinvltlb,
588 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
589 
590 #ifdef BETTER_CLOCK
591 	/* install an inter-CPU IPI for reading processor state */
592 	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
593 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
594 #endif
595 
596 	/* install an inter-CPU IPI for all-CPU rendezvous */
597 	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
598 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
599 
600 	/* install an inter-CPU IPI for forcing an additional software trap */
601 	setidt(XCPUAST_OFFSET, Xcpuast,
602 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
603 
604 	/* install an inter-CPU IPI for interrupt forwarding */
605 	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
606 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
607 
608 	/* install an inter-CPU IPI for CPU stop/restart */
609 	setidt(XCPUSTOP_OFFSET, Xcpustop,
610 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
611 
612 #if defined(TEST_TEST1)
613 	/* install a "fake hardware INTerrupt" vector */
614 	setidt(XTEST1_OFFSET, Xtest1,
615 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
616 #endif  /** TEST_TEST1 */
617 
618 #endif	/* APIC_IO */
619 
620 	/* initialize all SMP locks */
621 	init_locks();
622 
623 	/* start each Application Processor */
624 	start_all_aps(boot_addr);
625 
626 	/*
627 	 * The init process might be started on a different CPU now,
628 	 * and the boot CPU might not call prepare_usermode to get
629 	 * cr0 correctly configured. Thus we initialize cr0 here.
630 	 */
631 	load_cr0(rcr0() | CR0_WP | CR0_AM);
632 }
633 
634 
635 /*
636  * look for the MP spec signature
637  */
638 
639 /* string defined by the Intel MP Spec as identifying the MP table */
640 #define MP_SIG		0x5f504d5f	/* _MP_ */
641 #define NEXT(X)		((X) += 4)
642 static int
643 search_for_sig(u_int32_t target, int count)
644 {
645 	int     x;
646 	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
647 
648 	for (x = 0; x < count; NEXT(x))
649 		if (addr[x] == MP_SIG)
650 			/* make array index a byte index */
651 			return (target + (x * sizeof(u_int32_t)));
652 
653 	return -1;
654 }
655 
656 
657 static basetable_entry basetable_entry_types[] =
658 {
659 	{0, 20, "Processor"},
660 	{1, 8, "Bus"},
661 	{2, 8, "I/O APIC"},
662 	{3, 8, "I/O INT"},
663 	{4, 8, "Local INT"}
664 };
665 
666 typedef struct BUSDATA {
667 	u_char  bus_id;
668 	enum busTypes bus_type;
669 }       bus_datum;
670 
671 typedef struct INTDATA {
672 	u_char  int_type;
673 	u_short int_flags;
674 	u_char  src_bus_id;
675 	u_char  src_bus_irq;
676 	u_char  dst_apic_id;
677 	u_char  dst_apic_int;
678 	u_char	int_vector;
679 }       io_int, local_int;
680 
681 typedef struct BUSTYPENAME {
682 	u_char  type;
683 	char    name[7];
684 }       bus_type_name;
685 
686 static bus_type_name bus_type_table[] =
687 {
688 	{CBUS, "CBUS"},
689 	{CBUSII, "CBUSII"},
690 	{EISA, "EISA"},
691 	{UNKNOWN_BUSTYPE, "---"},
692 	{UNKNOWN_BUSTYPE, "---"},
693 	{ISA, "ISA"},
694 	{UNKNOWN_BUSTYPE, "---"},
695 	{UNKNOWN_BUSTYPE, "---"},
696 	{UNKNOWN_BUSTYPE, "---"},
697 	{UNKNOWN_BUSTYPE, "---"},
698 	{UNKNOWN_BUSTYPE, "---"},
699 	{UNKNOWN_BUSTYPE, "---"},
700 	{PCI, "PCI"},
701 	{UNKNOWN_BUSTYPE, "---"},
702 	{UNKNOWN_BUSTYPE, "---"},
703 	{UNKNOWN_BUSTYPE, "---"},
704 	{UNKNOWN_BUSTYPE, "---"},
705 	{XPRESS, "XPRESS"},
706 	{UNKNOWN_BUSTYPE, "---"}
707 };
708 /* from MP spec v1.4, table 5-1 */
709 static int default_data[7][5] =
710 {
711 /*   nbus, id0, type0, id1, type1 */
712 	{1, 0, ISA, 255, 255},
713 	{1, 0, EISA, 255, 255},
714 	{1, 0, EISA, 255, 255},
715 	{0, 255, 255, 255, 255},/* MCA not supported */
716 	{2, 0, ISA, 1, PCI},
717 	{2, 0, EISA, 1, PCI},
718 	{0, 255, 255, 255, 255}	/* MCA not supported */
719 };
720 
721 
722 /* the bus data */
723 static bus_datum bus_data[NBUS];
724 
725 /* the IO INT data, one entry per possible APIC INTerrupt */
726 static io_int  io_apic_ints[NINTR];
727 
728 static int nintrs;
729 
730 static int processor_entry	__P((proc_entry_ptr entry, int cpu));
731 static int bus_entry		__P((bus_entry_ptr entry, int bus));
732 static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
733 static int int_entry		__P((int_entry_ptr entry, int intr));
734 static int lookup_bus_type	__P((char *name));
735 
736 
737 /*
738  * 1st pass on motherboard's Intel MP specification table.
739  *
740  * initializes:
741  *	mp_ncpus = 1
742  *
743  * determines:
744  *	cpu_apic_address (common to all CPUs)
745  *	io_apic_address[N]
746  *	mp_naps
747  *	mp_nbusses
748  *	mp_napics
749  *	nintrs
750  */
751 static int
752 mptable_pass1(void)
753 {
754 	int	x;
755 	mpcth_t	cth;
756 	int	totalSize;
757 	void*	position;
758 	int	count;
759 	int	type;
760 	int	mustpanic;
761 
762 	POSTCODE(MPTABLE_PASS1_POST);
763 
764 	mustpanic = 0;
765 
766 	/* clear various tables */
767 	for (x = 0; x < NAPICID; ++x) {
768 		io_apic_address[x] = ~0;	/* IO APIC address table */
769 	}
770 
771 	/* init everything to empty */
772 	mp_naps = 0;
773 	mp_nbusses = 0;
774 	mp_napics = 0;
775 	nintrs = 0;
776 
777 	/* check for use of 'default' configuration */
778 	if (MPFPS_MPFB1 != 0) {
779 		/* use default addresses */
780 		cpu_apic_address = DEFAULT_APIC_BASE;
781 		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
782 
783 		/* fill in with defaults */
784 		mp_naps = 2;		/* includes BSP */
785 		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
786 #if defined(APIC_IO)
787 		mp_napics = 1;
788 		nintrs = 16;
789 #endif	/* APIC_IO */
790 	}
791 	else {
792 		if ((cth = mpfps->pap) == 0)
793 			panic("MP Configuration Table Header MISSING!");
794 
795 		cpu_apic_address = (vm_offset_t) cth->apic_address;
796 
797 		/* walk the table, recording info of interest */
798 		totalSize = cth->base_table_length - sizeof(struct MPCTH);
799 		position = (u_char *) cth + sizeof(struct MPCTH);
800 		count = cth->entry_count;
801 
802 		while (count--) {
803 			switch (type = *(u_char *) position) {
804 			case 0: /* processor_entry */
805 				if (((proc_entry_ptr)position)->cpu_flags
806 					& PROCENTRY_FLAG_EN)
807 					++mp_naps;
808 				break;
809 			case 1: /* bus_entry */
810 				++mp_nbusses;
811 				break;
812 			case 2: /* io_apic_entry */
813 				if (((io_apic_entry_ptr)position)->apic_flags
814 					& IOAPICENTRY_FLAG_EN)
815 					io_apic_address[mp_napics++] =
816 					    (vm_offset_t)((io_apic_entry_ptr)
817 						position)->apic_address;
818 				break;
819 			case 3: /* int_entry */
820 				++nintrs;
821 				break;
822 			case 4:	/* int_entry */
823 				break;
824 			default:
825 				panic("mpfps Base Table HOSED!");
826 				/* NOTREACHED */
827 			}
828 
829 			totalSize -= basetable_entry_types[type].length;
830 			(u_char*)position += basetable_entry_types[type].length;
831 		}
832 	}
833 
834 	/* qualify the numbers */
835 	if (mp_naps > NCPU) {
836 		printf("Warning: only using %d of %d available CPUs!\n",
837 			NCPU, mp_naps);
838 		mp_naps = NCPU;
839 	}
840 	if (mp_nbusses > NBUS) {
841 		printf("found %d busses, increase NBUS\n", mp_nbusses);
842 		mustpanic = 1;
843 	}
844 	if (mp_napics > NAPIC) {
845 		printf("found %d apics, increase NAPIC\n", mp_napics);
846 		mustpanic = 1;
847 	}
848 	if (nintrs > NINTR) {
849 		printf("found %d intrs, increase NINTR\n", nintrs);
850 		mustpanic = 1;
851 	}
852 
853 	/*
854 	 * Count the BSP.
855 	 * This is also used as a counter while starting the APs.
856 	 */
857 	mp_ncpus = 1;
858 
859 	--mp_naps;	/* subtract the BSP */
860 
861 	return mustpanic;
862 }
863 
864 
865 /*
866  * 2nd pass on motherboard's Intel MP specification table.
867  *
868  * sets:
869  *	boot_cpu_id
870  *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
871  *	CPU_TO_ID(N), logical CPU to APIC ID table
872  *	IO_TO_ID(N), logical IO to APIC ID table
873  *	bus_data[N]
874  *	io_apic_ints[N]
875  */
876 static int
877 mptable_pass2(void)
878 {
879 	int     x;
880 	mpcth_t cth;
881 	int     totalSize;
882 	void*   position;
883 	int     count;
884 	int     type;
885 	int     apic, bus, cpu, intr;
886 
887 	POSTCODE(MPTABLE_PASS2_POST);
888 
889 	/* clear various tables */
890 	for (x = 0; x < NAPICID; ++x) {
891 		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
892 		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
893 		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
894 	}
895 
896 	/* clear bus data table */
897 	for (x = 0; x < NBUS; ++x)
898 		bus_data[x].bus_id = 0xff;
899 
900 	/* clear IO APIC INT table */
901 	for (x = 0; x < NINTR; ++x) {
902 		io_apic_ints[x].int_type = 0xff;
903 		io_apic_ints[x].int_vector = 0xff;
904 	}
905 
906 	/* setup the cpu/apic mapping arrays */
907 	boot_cpu_id = -1;
908 
909 	/* record whether PIC or virtual-wire mode */
910 	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
911 
912 	/* check for use of 'default' configuration */
913 	if (MPFPS_MPFB1 != 0)
914 		return MPFPS_MPFB1;	/* return default configuration type */
915 
916 	if ((cth = mpfps->pap) == 0)
917 		panic("MP Configuration Table Header MISSING!");
918 
919 	/* walk the table, recording info of interest */
920 	totalSize = cth->base_table_length - sizeof(struct MPCTH);
921 	position = (u_char *) cth + sizeof(struct MPCTH);
922 	count = cth->entry_count;
923 	apic = bus = intr = 0;
924 	cpu = 1;				/* pre-count the BSP */
925 
926 	while (count--) {
927 		switch (type = *(u_char *) position) {
928 		case 0:
929 			if (processor_entry(position, cpu))
930 				++cpu;
931 			break;
932 		case 1:
933 			if (bus_entry(position, bus))
934 				++bus;
935 			break;
936 		case 2:
937 			if (io_apic_entry(position, apic))
938 				++apic;
939 			break;
940 		case 3:
941 			if (int_entry(position, intr))
942 				++intr;
943 			break;
944 		case 4:
945 			/* int_entry(position); */
946 			break;
947 		default:
948 			panic("mpfps Base Table HOSED!");
949 			/* NOTREACHED */
950 		}
951 
952 		totalSize -= basetable_entry_types[type].length;
953 		(u_char *) position += basetable_entry_types[type].length;
954 	}
955 
956 	if (boot_cpu_id == -1)
957 		panic("NO BSP found!");
958 
959 	/* report fact that its NOT a default configuration */
960 	return 0;
961 }
962 
963 
964 static void
965 assign_apic_irq(int apic, int intpin, int irq)
966 {
967 	int x;
968 
969 	if (int_to_apicintpin[irq].ioapic != -1)
970 		panic("assign_apic_irq: inconsistent table");
971 
972 	int_to_apicintpin[irq].ioapic = apic;
973 	int_to_apicintpin[irq].int_pin = intpin;
974 	int_to_apicintpin[irq].apic_address = ioapic[apic];
975 	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
976 
977 	for (x = 0; x < nintrs; x++) {
978 		if ((io_apic_ints[x].int_type == 0 ||
979 		     io_apic_ints[x].int_type == 3) &&
980 		    io_apic_ints[x].int_vector == 0xff &&
981 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
982 		    io_apic_ints[x].dst_apic_int == intpin)
983 			io_apic_ints[x].int_vector = irq;
984 	}
985 }
986 
987 /*
988  * parse an Intel MP specification table
989  */
990 static void
991 fix_mp_table(void)
992 {
993 	int	x;
994 	int	id;
995 	int	bus_0 = 0;	/* Stop GCC warning */
996 	int	bus_pci = 0;	/* Stop GCC warning */
997 	int	num_pci_bus;
998 
999 	/*
1000 	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1001 	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1002 	 * exists the BIOS must begin with bus entries for the PCI bus and use
1003 	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1004 	 * exists the BIOS can choose to ignore this ordering, and indeed many
1005 	 * MP motherboards do ignore it.  This causes a problem when the PCI
1006 	 * sub-system makes requests of the MP sub-system based on PCI bus
1007 	 * numbers.	So here we look for the situation and renumber the
1008 	 * busses and associated INTs in an effort to "make it right".
1009 	 */
1010 
1011 	/* find bus 0, PCI bus, count the number of PCI busses */
1012 	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1013 		if (bus_data[x].bus_id == 0) {
1014 			bus_0 = x;
1015 		}
1016 		if (bus_data[x].bus_type == PCI) {
1017 			++num_pci_bus;
1018 			bus_pci = x;
1019 		}
1020 	}
1021 	/*
1022 	 * bus_0 == slot of bus with ID of 0
1023 	 * bus_pci == slot of last PCI bus encountered
1024 	 */
1025 
1026 	/* check the 1 PCI bus case for sanity */
1027 	if (num_pci_bus == 1) {
1028 
1029 		/* if it is number 0 all is well */
1030 		if (bus_data[bus_pci].bus_id == 0)
1031 			return;
1032 
1033 		/* mis-numbered, swap with whichever bus uses slot 0 */
1034 
1035 		/* swap the bus entry types */
1036 		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1037 		bus_data[bus_0].bus_type = PCI;
1038 
1039 		/* swap each relavant INTerrupt entry */
1040 		id = bus_data[bus_pci].bus_id;
1041 		for (x = 0; x < nintrs; ++x) {
1042 			if (io_apic_ints[x].src_bus_id == id) {
1043 				io_apic_ints[x].src_bus_id = 0;
1044 			}
1045 			else if (io_apic_ints[x].src_bus_id == 0) {
1046 				io_apic_ints[x].src_bus_id = id;
1047 			}
1048 		}
1049 	}
1050 	/* sanity check if more than 1 PCI bus */
1051 	else if (num_pci_bus > 1) {
1052 		for (x = 0; x < mp_nbusses; ++x) {
1053 			if (bus_data[x].bus_type != PCI)
1054 				continue;
1055 		}
1056 	}
1057 }
1058 
1059 
1060 static void
1061 setup_apic_irq_mapping(void)
1062 {
1063 	int	x;
1064 	int	int_vector;
1065 
1066 	/* Assign low level interrupt handlers */
1067 	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1068 		int_to_apicintpin[x].ioapic = -1;
1069 		int_to_apicintpin[x].int_pin = 0;
1070 		int_to_apicintpin[x].apic_address = NULL;
1071 		int_to_apicintpin[x].redirindex = 0;
1072 	}
1073 	for (x = 0; x < nintrs; x++) {
1074 		if (io_apic_ints[x].dst_apic_int < APIC_INTMAPSIZE &&
1075 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1076 		    io_apic_ints[x].int_vector == 0xff &&
1077 		    (io_apic_ints[x].int_type == 0 ||
1078 		     io_apic_ints[x].int_type == 3)) {
1079 			assign_apic_irq(0,
1080 					io_apic_ints[x].dst_apic_int,
1081 					io_apic_ints[x].dst_apic_int);
1082 		}
1083 	}
1084 	int_vector = 0;
1085 	while (int_vector < APIC_INTMAPSIZE &&
1086 	       int_to_apicintpin[int_vector].ioapic != -1)
1087 		int_vector++;
1088 	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1089 		if ((io_apic_ints[x].int_type == 0 ||
1090 		     io_apic_ints[x].int_type == 3) &&
1091 		    io_apic_ints[x].int_vector == 0xff) {
1092 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1093 					io_apic_ints[x].dst_apic_int,
1094 					int_vector);
1095 			int_vector++;
1096 			while (int_vector < APIC_INTMAPSIZE &&
1097 			       int_to_apicintpin[int_vector].ioapic != -1)
1098 				int_vector++;
1099 		}
1100 	}
1101 }
1102 
1103 
1104 static int
1105 processor_entry(proc_entry_ptr entry, int cpu)
1106 {
1107 	/* check for usability */
1108 	if ((cpu >= NCPU) || !(entry->cpu_flags & PROCENTRY_FLAG_EN))
1109 		return 0;
1110 
1111 	/* check for BSP flag */
1112 	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1113 		boot_cpu_id = entry->apic_id;
1114 		CPU_TO_ID(0) = entry->apic_id;
1115 		ID_TO_CPU(entry->apic_id) = 0;
1116 		return 0;	/* its already been counted */
1117 	}
1118 
1119 	/* add another AP to list, if less than max number of CPUs */
1120 	else {
1121 		CPU_TO_ID(cpu) = entry->apic_id;
1122 		ID_TO_CPU(entry->apic_id) = cpu;
1123 		return 1;
1124 	}
1125 }
1126 
1127 
1128 static int
1129 bus_entry(bus_entry_ptr entry, int bus)
1130 {
1131 	int     x;
1132 	char    c, name[8];
1133 
1134 	/* encode the name into an index */
1135 	for (x = 0; x < 6; ++x) {
1136 		if ((c = entry->bus_type[x]) == ' ')
1137 			break;
1138 		name[x] = c;
1139 	}
1140 	name[x] = '\0';
1141 
1142 	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1143 		panic("unknown bus type: '%s'", name);
1144 
1145 	bus_data[bus].bus_id = entry->bus_id;
1146 	bus_data[bus].bus_type = x;
1147 
1148 	return 1;
1149 }
1150 
1151 
1152 static int
1153 io_apic_entry(io_apic_entry_ptr entry, int apic)
1154 {
1155 	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1156 		return 0;
1157 
1158 	IO_TO_ID(apic) = entry->apic_id;
1159 	ID_TO_IO(entry->apic_id) = apic;
1160 
1161 	return 1;
1162 }
1163 
1164 
1165 static int
1166 lookup_bus_type(char *name)
1167 {
1168 	int     x;
1169 
1170 	for (x = 0; x < MAX_BUSTYPE; ++x)
1171 		if (strcmp(bus_type_table[x].name, name) == 0)
1172 			return bus_type_table[x].type;
1173 
1174 	return UNKNOWN_BUSTYPE;
1175 }
1176 
1177 
1178 static int
1179 int_entry(int_entry_ptr entry, int intr)
1180 {
1181 	int apic;
1182 
1183 	io_apic_ints[intr].int_type = entry->int_type;
1184 	io_apic_ints[intr].int_flags = entry->int_flags;
1185 	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1186 	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1187 	if (entry->dst_apic_id == 255) {
1188 		/* This signal goes to all IO APICS.  Select an IO APIC
1189 		   with sufficient number of interrupt pins */
1190 		for (apic = 0; apic < mp_napics; apic++)
1191 			if (((io_apic_read(apic, IOAPIC_VER) &
1192 			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1193 			    entry->dst_apic_int)
1194 				break;
1195 		if (apic < mp_napics)
1196 			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1197 		else
1198 			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1199 	} else
1200 		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1201 	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1202 
1203 	return 1;
1204 }
1205 
1206 
1207 static int
1208 apic_int_is_bus_type(int intr, int bus_type)
1209 {
1210 	int     bus;
1211 
1212 	for (bus = 0; bus < mp_nbusses; ++bus)
1213 		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1214 		    && ((int) bus_data[bus].bus_type == bus_type))
1215 			return 1;
1216 
1217 	return 0;
1218 }
1219 
1220 
1221 /*
1222  * Given a traditional ISA INT mask, return an APIC mask.
1223  */
1224 u_int
1225 isa_apic_mask(u_int isa_mask)
1226 {
1227 	int isa_irq;
1228 	int apic_pin;
1229 
1230 #if defined(SKIP_IRQ15_REDIRECT)
1231 	if (isa_mask == (1 << 15)) {
1232 		printf("skipping ISA IRQ15 redirect\n");
1233 		return isa_mask;
1234 	}
1235 #endif  /* SKIP_IRQ15_REDIRECT */
1236 
1237 	isa_irq = ffs(isa_mask);		/* find its bit position */
1238 	if (isa_irq == 0)			/* doesn't exist */
1239 		return 0;
1240 	--isa_irq;				/* make it zero based */
1241 
1242 	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1243 	if (apic_pin == -1)
1244 		return 0;
1245 
1246 	return (1 << apic_pin);			/* convert pin# to a mask */
1247 }
1248 
1249 
1250 /*
1251  * Determine which APIC pin an ISA/EISA INT is attached to.
1252  */
1253 #define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1254 #define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1255 #define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1256 #define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1257 
1258 #define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1259 int
1260 isa_apic_irq(int isa_irq)
1261 {
1262 	int     intr;
1263 
1264 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1265 		if (INTTYPE(intr) == 0) {		/* standard INT */
1266 			if (SRCBUSIRQ(intr) == isa_irq) {
1267 				if (apic_int_is_bus_type(intr, ISA) ||
1268 			            apic_int_is_bus_type(intr, EISA))
1269 					return INTIRQ(intr);	/* found */
1270 			}
1271 		}
1272 	}
1273 	return -1;					/* NOT found */
1274 }
1275 
1276 
1277 /*
1278  * Determine which APIC pin a PCI INT is attached to.
1279  */
1280 #define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1281 #define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1282 #define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1283 int
1284 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1285 {
1286 	int     intr;
1287 
1288 	--pciInt;					/* zero based */
1289 
1290 	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1291 		if ((INTTYPE(intr) == 0)		/* standard INT */
1292 		    && (SRCBUSID(intr) == pciBus)
1293 		    && (SRCBUSDEVICE(intr) == pciDevice)
1294 		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1295 			if (apic_int_is_bus_type(intr, PCI))
1296 				return INTIRQ(intr);	/* exact match */
1297 
1298 	return -1;					/* NOT found */
1299 }
1300 
1301 int
1302 next_apic_irq(int irq)
1303 {
1304 	int intr, ointr;
1305 	int bus, bustype;
1306 
1307 	bus = 0;
1308 	bustype = 0;
1309 	for (intr = 0; intr < nintrs; intr++) {
1310 		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1311 			continue;
1312 		bus = SRCBUSID(intr);
1313 		bustype = apic_bus_type(bus);
1314 		if (bustype != ISA &&
1315 		    bustype != EISA &&
1316 		    bustype != PCI)
1317 			continue;
1318 		break;
1319 	}
1320 	if (intr >= nintrs) {
1321 		return -1;
1322 	}
1323 	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1324 		if (INTTYPE(ointr) != 0)
1325 			continue;
1326 		if (bus != SRCBUSID(ointr))
1327 			continue;
1328 		if (bustype == PCI) {
1329 			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1330 				continue;
1331 			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1332 				continue;
1333 		}
1334 		if (bustype == ISA || bustype == EISA) {
1335 			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1336 				continue;
1337 		}
1338 		if (INTPIN(intr) == INTPIN(ointr))
1339 			continue;
1340 		break;
1341 	}
1342 	if (ointr >= nintrs) {
1343 		return -1;
1344 	}
1345 	return INTIRQ(ointr);
1346 }
1347 #undef SRCBUSLINE
1348 #undef SRCBUSDEVICE
1349 #undef SRCBUSID
1350 #undef SRCBUSIRQ
1351 
1352 #undef INTPIN
1353 #undef INTIRQ
1354 #undef INTAPIC
1355 #undef INTTYPE
1356 
1357 
1358 /*
1359  * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1360  *
1361  * XXX FIXME:
1362  *  Exactly what this means is unclear at this point.  It is a solution
1363  *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1364  *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1365  *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1366  *  option.
1367  */
1368 int
1369 undirect_isa_irq(int rirq)
1370 {
1371 #if defined(READY)
1372 	if (bootverbose)
1373 	    printf("Freeing redirected ISA irq %d.\n", rirq);
1374 	/** FIXME: tickle the MB redirector chip */
1375 	return ???;
1376 #else
1377 	if (bootverbose)
1378 	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1379 	return 0;
1380 #endif  /* READY */
1381 }
1382 
1383 
1384 /*
1385  * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1386  */
1387 int
1388 undirect_pci_irq(int rirq)
1389 {
1390 #if defined(READY)
1391 	if (bootverbose)
1392 		printf("Freeing redirected PCI irq %d.\n", rirq);
1393 
1394 	/** FIXME: tickle the MB redirector chip */
1395 	return ???;
1396 #else
1397 	if (bootverbose)
1398 		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1399 		       rirq);
1400 	return 0;
1401 #endif  /* READY */
1402 }
1403 
1404 
1405 /*
1406  * given a bus ID, return:
1407  *  the bus type if found
1408  *  -1 if NOT found
1409  */
1410 int
1411 apic_bus_type(int id)
1412 {
1413 	int     x;
1414 
1415 	for (x = 0; x < mp_nbusses; ++x)
1416 		if (bus_data[x].bus_id == id)
1417 			return bus_data[x].bus_type;
1418 
1419 	return -1;
1420 }
1421 
1422 
1423 /*
1424  * given a LOGICAL APIC# and pin#, return:
1425  *  the associated src bus ID if found
1426  *  -1 if NOT found
1427  */
1428 int
1429 apic_src_bus_id(int apic, int pin)
1430 {
1431 	int     x;
1432 
1433 	/* search each of the possible INTerrupt sources */
1434 	for (x = 0; x < nintrs; ++x)
1435 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1436 		    (pin == io_apic_ints[x].dst_apic_int))
1437 			return (io_apic_ints[x].src_bus_id);
1438 
1439 	return -1;		/* NOT found */
1440 }
1441 
1442 
1443 /*
1444  * given a LOGICAL APIC# and pin#, return:
1445  *  the associated src bus IRQ if found
1446  *  -1 if NOT found
1447  */
1448 int
1449 apic_src_bus_irq(int apic, int pin)
1450 {
1451 	int     x;
1452 
1453 	for (x = 0; x < nintrs; x++)
1454 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1455 		    (pin == io_apic_ints[x].dst_apic_int))
1456 			return (io_apic_ints[x].src_bus_irq);
1457 
1458 	return -1;		/* NOT found */
1459 }
1460 
1461 
1462 /*
1463  * given a LOGICAL APIC# and pin#, return:
1464  *  the associated INTerrupt type if found
1465  *  -1 if NOT found
1466  */
1467 int
1468 apic_int_type(int apic, int pin)
1469 {
1470 	int     x;
1471 
1472 	/* search each of the possible INTerrupt sources */
1473 	for (x = 0; x < nintrs; ++x)
1474 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1475 		    (pin == io_apic_ints[x].dst_apic_int))
1476 			return (io_apic_ints[x].int_type);
1477 
1478 	return -1;		/* NOT found */
1479 }
1480 
1481 int
1482 apic_irq(int apic, int pin)
1483 {
1484 	int x;
1485 	int res;
1486 
1487 	for (x = 0; x < nintrs; ++x)
1488 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1489 		    (pin == io_apic_ints[x].dst_apic_int)) {
1490 			res = io_apic_ints[x].int_vector;
1491 			if (res == 0xff)
1492 				return -1;
1493 			if (apic != int_to_apicintpin[res].ioapic)
1494 				panic("apic_irq: inconsistent table");
1495 			if (pin != int_to_apicintpin[res].int_pin)
1496 				panic("apic_irq inconsistent table (2)");
1497 			return res;
1498 		}
1499 	return -1;
1500 }
1501 
1502 
1503 /*
1504  * given a LOGICAL APIC# and pin#, return:
1505  *  the associated trigger mode if found
1506  *  -1 if NOT found
1507  */
1508 int
1509 apic_trigger(int apic, int pin)
1510 {
1511 	int     x;
1512 
1513 	/* search each of the possible INTerrupt sources */
1514 	for (x = 0; x < nintrs; ++x)
1515 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1516 		    (pin == io_apic_ints[x].dst_apic_int))
1517 			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1518 
1519 	return -1;		/* NOT found */
1520 }
1521 
1522 
1523 /*
1524  * given a LOGICAL APIC# and pin#, return:
1525  *  the associated 'active' level if found
1526  *  -1 if NOT found
1527  */
1528 int
1529 apic_polarity(int apic, int pin)
1530 {
1531 	int     x;
1532 
1533 	/* search each of the possible INTerrupt sources */
1534 	for (x = 0; x < nintrs; ++x)
1535 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1536 		    (pin == io_apic_ints[x].dst_apic_int))
1537 			return (io_apic_ints[x].int_flags & 0x03);
1538 
1539 	return -1;		/* NOT found */
1540 }
1541 
1542 
1543 /*
1544  * set data according to MP defaults
1545  * FIXME: probably not complete yet...
1546  */
1547 static void
1548 default_mp_table(int type)
1549 {
1550 	int     ap_cpu_id;
1551 #if defined(APIC_IO)
1552 	u_int32_t ux;
1553 	int     io_apic_id;
1554 	int     pin;
1555 #endif	/* APIC_IO */
1556 
1557 #if 0
1558 	printf("  MP default config type: %d\n", type);
1559 	switch (type) {
1560 	case 1:
1561 		printf("   bus: ISA, APIC: 82489DX\n");
1562 		break;
1563 	case 2:
1564 		printf("   bus: EISA, APIC: 82489DX\n");
1565 		break;
1566 	case 3:
1567 		printf("   bus: EISA, APIC: 82489DX\n");
1568 		break;
1569 	case 4:
1570 		printf("   bus: MCA, APIC: 82489DX\n");
1571 		break;
1572 	case 5:
1573 		printf("   bus: ISA+PCI, APIC: Integrated\n");
1574 		break;
1575 	case 6:
1576 		printf("   bus: EISA+PCI, APIC: Integrated\n");
1577 		break;
1578 	case 7:
1579 		printf("   bus: MCA+PCI, APIC: Integrated\n");
1580 		break;
1581 	default:
1582 		printf("   future type\n");
1583 		break;
1584 		/* NOTREACHED */
1585 	}
1586 #endif	/* 0 */
1587 
1588 	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1589 	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1590 
1591 	/* BSP */
1592 	CPU_TO_ID(0) = boot_cpu_id;
1593 	ID_TO_CPU(boot_cpu_id) = 0;
1594 
1595 	/* one and only AP */
1596 	CPU_TO_ID(1) = ap_cpu_id;
1597 	ID_TO_CPU(ap_cpu_id) = 1;
1598 
1599 #if defined(APIC_IO)
1600 	/* one and only IO APIC */
1601 	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1602 
1603 	/*
1604 	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1605 	 * necessary as some hardware isn't properly setting up the IO APIC
1606 	 */
1607 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1608 	if (io_apic_id != 2) {
1609 #else
1610 	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1611 #endif	/* REALLY_ANAL_IOAPICID_VALUE */
1612 		ux = io_apic_read(0, IOAPIC_ID);	/* get current contents */
1613 		ux &= ~APIC_ID_MASK;	/* clear the ID field */
1614 		ux |= 0x02000000;	/* set it to '2' */
1615 		io_apic_write(0, IOAPIC_ID, ux);	/* write new value */
1616 		ux = io_apic_read(0, IOAPIC_ID);	/* re-read && test */
1617 		if ((ux & APIC_ID_MASK) != 0x02000000)
1618 			panic("can't control IO APIC ID, reg: 0x%08x", ux);
1619 		io_apic_id = 2;
1620 	}
1621 	IO_TO_ID(0) = io_apic_id;
1622 	ID_TO_IO(io_apic_id) = 0;
1623 #endif	/* APIC_IO */
1624 
1625 	/* fill out bus entries */
1626 	switch (type) {
1627 	case 1:
1628 	case 2:
1629 	case 3:
1630 	case 5:
1631 	case 6:
1632 		bus_data[0].bus_id = default_data[type - 1][1];
1633 		bus_data[0].bus_type = default_data[type - 1][2];
1634 		bus_data[1].bus_id = default_data[type - 1][3];
1635 		bus_data[1].bus_type = default_data[type - 1][4];
1636 		break;
1637 
1638 	/* case 4: case 7:		   MCA NOT supported */
1639 	default:		/* illegal/reserved */
1640 		panic("BAD default MP config: %d", type);
1641 		/* NOTREACHED */
1642 	}
1643 
1644 #if defined(APIC_IO)
1645 	/* general cases from MP v1.4, table 5-2 */
1646 	for (pin = 0; pin < 16; ++pin) {
1647 		io_apic_ints[pin].int_type = 0;
1648 		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1649 		io_apic_ints[pin].src_bus_id = 0;
1650 		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1651 		io_apic_ints[pin].dst_apic_id = io_apic_id;
1652 		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1653 	}
1654 
1655 	/* special cases from MP v1.4, table 5-2 */
1656 	if (type == 2) {
1657 		io_apic_ints[2].int_type = 0xff;	/* N/C */
1658 		io_apic_ints[13].int_type = 0xff;	/* N/C */
1659 #if !defined(APIC_MIXED_MODE)
1660 		/** FIXME: ??? */
1661 		panic("sorry, can't support type 2 default yet");
1662 #endif	/* APIC_MIXED_MODE */
1663 	}
1664 	else
1665 		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1666 
1667 	if (type == 7)
1668 		io_apic_ints[0].int_type = 0xff;	/* N/C */
1669 	else
1670 		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1671 #endif	/* APIC_IO */
1672 }
1673 
1674 
1675 /*
1676  * initialize all the SMP locks
1677  */
1678 
1679 /* critical region around IO APIC, apic_imen */
1680 struct simplelock	imen_lock;
1681 
1682 /* critical region around splxx(), cpl, cml, cil, ipending */
1683 struct simplelock	cpl_lock;
1684 
1685 /* Make FAST_INTR() routines sequential */
1686 struct simplelock	fast_intr_lock;
1687 
1688 /* critical region around INTR() routines */
1689 struct simplelock	intr_lock;
1690 
1691 /* lock regions protected in UP kernel via cli/sti */
1692 struct simplelock	mpintr_lock;
1693 
1694 /* lock region used by kernel profiling */
1695 struct simplelock	mcount_lock;
1696 
1697 #ifdef USE_COMLOCK
1698 /* locks com (tty) data/hardware accesses: a FASTINTR() */
1699 struct simplelock	com_lock;
1700 #endif /* USE_COMLOCK */
1701 
1702 #ifdef USE_CLOCKLOCK
1703 /* lock regions around the clock hardware */
1704 struct simplelock	clock_lock;
1705 #endif /* USE_CLOCKLOCK */
1706 
1707 /* lock around the MP rendezvous */
1708 static struct simplelock smp_rv_lock;
1709 
1710 static void
1711 init_locks(void)
1712 {
1713 	/*
1714 	 * Get the initial mp_lock with a count of 1 for the BSP.
1715 	 * This uses a LOGICAL cpu ID, ie BSP == 0.
1716 	 */
1717 	mp_lock = 0x00000001;
1718 
1719 	/* ISR uses its own "giant lock" */
1720 	isr_lock = FREE_LOCK;
1721 
1722 #if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1723 	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1724 #endif
1725 
1726 	s_lock_init((struct simplelock*)&mpintr_lock);
1727 
1728 	s_lock_init((struct simplelock*)&mcount_lock);
1729 
1730 	s_lock_init((struct simplelock*)&fast_intr_lock);
1731 	s_lock_init((struct simplelock*)&intr_lock);
1732 	s_lock_init((struct simplelock*)&imen_lock);
1733 	s_lock_init((struct simplelock*)&cpl_lock);
1734 	s_lock_init(&smp_rv_lock);
1735 
1736 #ifdef USE_COMLOCK
1737 	s_lock_init((struct simplelock*)&com_lock);
1738 #endif /* USE_COMLOCK */
1739 #ifdef USE_CLOCKLOCK
1740 	s_lock_init((struct simplelock*)&clock_lock);
1741 #endif /* USE_CLOCKLOCK */
1742 }
1743 
1744 
1745 /* Wait for all APs to be fully initialized */
1746 extern int wait_ap(unsigned int);
1747 
1748 /*
1749  * start each AP in our list
1750  */
1751 static int
1752 start_all_aps(u_int boot_addr)
1753 {
1754 	int     x, i, pg;
1755 	u_char  mpbiosreason;
1756 	u_long  mpbioswarmvec;
1757 	struct globaldata *gd;
1758 	char *stack;
1759 
1760 	POSTCODE(START_ALL_APS_POST);
1761 
1762 	/* initialize BSP's local APIC */
1763 	apic_initialize();
1764 	bsp_apic_ready = 1;
1765 
1766 	/* install the AP 1st level boot code */
1767 	install_ap_tramp(boot_addr);
1768 
1769 
1770 	/* save the current value of the warm-start vector */
1771 	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1772 #ifndef PC98
1773 	outb(CMOS_REG, BIOS_RESET);
1774 	mpbiosreason = inb(CMOS_DATA);
1775 #endif
1776 
1777 	/* record BSP in CPU map */
1778 	all_cpus = 1;
1779 
1780 	/* set up 0 -> 4MB P==V mapping for AP boot */
1781 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1782 	invltlb();
1783 
1784 	/* start each AP */
1785 	for (x = 1; x <= mp_naps; ++x) {
1786 
1787 		/* This is a bit verbose, it will go away soon.  */
1788 
1789 		/* first page of AP's private space */
1790 		pg = x * i386_btop(sizeof(struct privatespace));
1791 
1792 		/* allocate a new private data page */
1793 		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1794 
1795 		/* wire it into the private page table page */
1796 		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1797 
1798 		/* allocate and set up an idle stack data page */
1799 		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1800 		for (i = 0; i < UPAGES; i++)
1801 			SMPpt[pg + 5 + i] = (pt_entry_t)
1802 			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1803 
1804 		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
1805 		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
1806 		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
1807 		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
1808 
1809 		/* prime data page for it to use */
1810 		gd->gd_cpuid = x;
1811 		gd->gd_cpu_lockid = x << 24;
1812 		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
1813 		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
1814 		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
1815 		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
1816 		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
1817 		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
1818 		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
1819 		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
1820 
1821 		/* setup a vector to our boot code */
1822 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
1823 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
1824 #ifndef PC98
1825 		outb(CMOS_REG, BIOS_RESET);
1826 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
1827 #endif
1828 
1829 		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
1830 		bootAP = x;
1831 
1832 		/* attempt to start the Application Processor */
1833 		CHECK_INIT(99);	/* setup checkpoints */
1834 		if (!start_ap(x, boot_addr)) {
1835 			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
1836 			CHECK_PRINT("trace");	/* show checkpoints */
1837 			/* better panic as the AP may be running loose */
1838 			printf("panic y/n? [y] ");
1839 			if (cngetc() != 'n')
1840 				panic("bye-bye");
1841 		}
1842 		CHECK_PRINT("trace");		/* show checkpoints */
1843 
1844 		/* record its version info */
1845 		cpu_apic_versions[x] = cpu_apic_versions[0];
1846 
1847 		all_cpus |= (1 << x);		/* record AP in CPU map */
1848 	}
1849 
1850 	/* build our map of 'other' CPUs */
1851 	other_cpus = all_cpus & ~(1 << cpuid);
1852 
1853 	/* fill in our (BSP) APIC version */
1854 	cpu_apic_versions[0] = lapic.version;
1855 
1856 	/* restore the warmstart vector */
1857 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
1858 #ifndef PC98
1859 	outb(CMOS_REG, BIOS_RESET);
1860 	outb(CMOS_DATA, mpbiosreason);
1861 #endif
1862 
1863 	/*
1864 	 * Set up the idle context for the BSP.  Similar to above except
1865 	 * that some was done by locore, some by pmap.c and some is implicit
1866 	 * because the BSP is cpu#0 and the page is initially zero, and also
1867 	 * because we can refer to variables by name on the BSP..
1868 	 */
1869 
1870 	/* Allocate and setup BSP idle stack */
1871 	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
1872 	for (i = 0; i < UPAGES; i++)
1873 		SMPpt[5 + i] = (pt_entry_t)
1874 		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1875 
1876 	*(int *)PTD = 0;
1877 	pmap_set_opt();
1878 
1879 	/* number of APs actually started */
1880 	return mp_ncpus - 1;
1881 }
1882 
1883 
1884 /*
1885  * load the 1st level AP boot code into base memory.
1886  */
1887 
1888 /* targets for relocation */
1889 extern void bigJump(void);
1890 extern void bootCodeSeg(void);
1891 extern void bootDataSeg(void);
1892 extern void MPentry(void);
1893 extern u_int MP_GDT;
1894 extern u_int mp_gdtbase;
1895 
1896 static void
1897 install_ap_tramp(u_int boot_addr)
1898 {
1899 	int     x;
1900 	int     size = *(int *) ((u_long) & bootMP_size);
1901 	u_char *src = (u_char *) ((u_long) bootMP);
1902 	u_char *dst = (u_char *) boot_addr + KERNBASE;
1903 	u_int   boot_base = (u_int) bootMP;
1904 	u_int8_t *dst8;
1905 	u_int16_t *dst16;
1906 	u_int32_t *dst32;
1907 
1908 	POSTCODE(INSTALL_AP_TRAMP_POST);
1909 
1910 	for (x = 0; x < size; ++x)
1911 		*dst++ = *src++;
1912 
1913 	/*
1914 	 * modify addresses in code we just moved to basemem. unfortunately we
1915 	 * need fairly detailed info about mpboot.s for this to work.  changes
1916 	 * to mpboot.s might require changes here.
1917 	 */
1918 
1919 	/* boot code is located in KERNEL space */
1920 	dst = (u_char *) boot_addr + KERNBASE;
1921 
1922 	/* modify the lgdt arg */
1923 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1924 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
1925 
1926 	/* modify the ljmp target for MPentry() */
1927 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1928 	*dst32 = ((u_int) MPentry - KERNBASE);
1929 
1930 	/* modify the target for boot code segment */
1931 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1932 	dst8 = (u_int8_t *) (dst16 + 1);
1933 	*dst16 = (u_int) boot_addr & 0xffff;
1934 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
1935 
1936 	/* modify the target for boot data segment */
1937 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1938 	dst8 = (u_int8_t *) (dst16 + 1);
1939 	*dst16 = (u_int) boot_addr & 0xffff;
1940 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
1941 }
1942 
1943 
1944 /*
1945  * this function starts the AP (application processor) identified
1946  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
1947  * to accomplish this.  This is necessary because of the nuances
1948  * of the different hardware we might encounter.  It ain't pretty,
1949  * but it seems to work.
1950  */
1951 static int
1952 start_ap(int logical_cpu, u_int boot_addr)
1953 {
1954 	int     physical_cpu;
1955 	int     vector;
1956 	int     cpus;
1957 	u_long  icr_lo, icr_hi;
1958 
1959 	POSTCODE(START_AP_POST);
1960 
1961 	/* get the PHYSICAL APIC ID# */
1962 	physical_cpu = CPU_TO_ID(logical_cpu);
1963 
1964 	/* calculate the vector */
1965 	vector = (boot_addr >> 12) & 0xff;
1966 
1967 	/* used as a watchpoint to signal AP startup */
1968 	cpus = mp_ncpus;
1969 
1970 	/*
1971 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
1972 	 * and running the target CPU. OR this INIT IPI might be latched (P5
1973 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1974 	 * ignored.
1975 	 */
1976 
1977 	/* setup the address for the target AP */
1978 	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
1979 	icr_hi |= (physical_cpu << 24);
1980 	lapic.icr_hi = icr_hi;
1981 
1982 	/* do an INIT IPI: assert RESET */
1983 	icr_lo = lapic.icr_lo & 0xfff00000;
1984 	lapic.icr_lo = icr_lo | 0x0000c500;
1985 
1986 	/* wait for pending status end */
1987 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
1988 		 /* spin */ ;
1989 
1990 	/* do an INIT IPI: deassert RESET */
1991 	lapic.icr_lo = icr_lo | 0x00008500;
1992 
1993 	/* wait for pending status end */
1994 	u_sleep(10000);		/* wait ~10mS */
1995 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
1996 		 /* spin */ ;
1997 
1998 	/*
1999 	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2000 	 * latched, (P5 bug) this 1st STARTUP would then terminate
2001 	 * immediately, and the previously started INIT IPI would continue. OR
2002 	 * the previous INIT IPI has already run. and this STARTUP IPI will
2003 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2004 	 * will run.
2005 	 */
2006 
2007 	/* do a STARTUP IPI */
2008 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2009 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2010 		 /* spin */ ;
2011 	u_sleep(200);		/* wait ~200uS */
2012 
2013 	/*
2014 	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2015 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2016 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2017 	 * recognized after hardware RESET or INIT IPI.
2018 	 */
2019 
2020 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2021 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2022 		 /* spin */ ;
2023 	u_sleep(200);		/* wait ~200uS */
2024 
2025 	/* wait for it to start */
2026 	set_apic_timer(5000000);/* == 5 seconds */
2027 	while (read_apic_timer())
2028 		if (mp_ncpus > cpus)
2029 			return 1;	/* return SUCCESS */
2030 
2031 	return 0;		/* return FAILURE */
2032 }
2033 
2034 
2035 /*
2036  * Flush the TLB on all other CPU's
2037  *
2038  * XXX: Needs to handshake and wait for completion before proceding.
2039  */
2040 void
2041 smp_invltlb(void)
2042 {
2043 #if defined(APIC_IO)
2044 	if (smp_started && invltlb_ok)
2045 		all_but_self_ipi(XINVLTLB_OFFSET);
2046 #endif  /* APIC_IO */
2047 }
2048 
2049 void
2050 invlpg(u_int addr)
2051 {
2052 	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2053 
2054 	/* send a message to the other CPUs */
2055 	smp_invltlb();
2056 }
2057 
2058 void
2059 invltlb(void)
2060 {
2061 	u_long  temp;
2062 
2063 	/*
2064 	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2065 	 * inlined.
2066 	 */
2067 	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2068 
2069 	/* send a message to the other CPUs */
2070 	smp_invltlb();
2071 }
2072 
2073 
2074 /*
2075  * When called the executing CPU will send an IPI to all other CPUs
2076  *  requesting that they halt execution.
2077  *
2078  * Usually (but not necessarily) called with 'other_cpus' as its arg.
2079  *
2080  *  - Signals all CPUs in map to stop.
2081  *  - Waits for each to stop.
2082  *
2083  * Returns:
2084  *  -1: error
2085  *   0: NA
2086  *   1: ok
2087  *
2088  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2089  *            from executing at same time.
2090  */
2091 int
2092 stop_cpus(u_int map)
2093 {
2094 	if (!smp_started)
2095 		return 0;
2096 
2097 	/* send the Xcpustop IPI to all CPUs in map */
2098 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2099 
2100 	while ((stopped_cpus & map) != map)
2101 		/* spin */ ;
2102 
2103 	return 1;
2104 }
2105 
2106 
2107 /*
2108  * Called by a CPU to restart stopped CPUs.
2109  *
2110  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2111  *
2112  *  - Signals all CPUs in map to restart.
2113  *  - Waits for each to restart.
2114  *
2115  * Returns:
2116  *  -1: error
2117  *   0: NA
2118  *   1: ok
2119  */
2120 int
2121 restart_cpus(u_int map)
2122 {
2123 	if (!smp_started)
2124 		return 0;
2125 
2126 	started_cpus = map;		/* signal other cpus to restart */
2127 
2128 	while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2129 		/* spin */ ;
2130 
2131 	return 1;
2132 }
2133 
2134 int smp_active = 0;	/* are the APs allowed to run? */
2135 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2136 
2137 /* XXX maybe should be hw.ncpu */
2138 static int smp_cpus = 1;	/* how many cpu's running */
2139 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2140 
2141 int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2142 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2143 
2144 /* Warning: Do not staticize.  Used from swtch.s */
2145 int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2146 SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2147 	   &do_page_zero_idle, 0, "");
2148 
2149 /* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2150 int forward_irq_enabled = 1;
2151 SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2152 	   &forward_irq_enabled, 0, "");
2153 
2154 /* Enable forwarding of a signal to a process running on a different CPU */
2155 static int forward_signal_enabled = 1;
2156 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2157 	   &forward_signal_enabled, 0, "");
2158 
2159 /* Enable forwarding of roundrobin to all other cpus */
2160 static int forward_roundrobin_enabled = 1;
2161 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2162 	   &forward_roundrobin_enabled, 0, "");
2163 
2164 /*
2165  * This is called once the rest of the system is up and running and we're
2166  * ready to let the AP's out of the pen.
2167  */
2168 void ap_init(void);
2169 
2170 void
2171 ap_init()
2172 {
2173 	u_int	apic_id;
2174 
2175 	/* BSP may have changed PTD while we're waiting for the lock */
2176 	cpu_invltlb();
2177 
2178 	smp_cpus++;
2179 
2180 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2181 	lidt(&r_idt);
2182 #endif
2183 
2184 	/* Build our map of 'other' CPUs. */
2185 	other_cpus = all_cpus & ~(1 << cpuid);
2186 
2187 	printf("SMP: AP CPU #%d Launched!\n", cpuid);
2188 
2189 	/* XXX FIXME: i386 specific, and redundant: Setup the FPU. */
2190 	load_cr0((rcr0() & ~CR0_EM) | CR0_MP | CR0_NE | CR0_TS);
2191 
2192 	/* A quick check from sanity claus */
2193 	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2194 	if (cpuid != apic_id) {
2195 		printf("SMP: cpuid = %d\n", cpuid);
2196 		printf("SMP: apic_id = %d\n", apic_id);
2197 		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2198 		panic("cpuid mismatch! boom!!");
2199 	}
2200 
2201 	/* Init local apic for irq's */
2202 	apic_initialize();
2203 
2204 	/* Set memory range attributes for this CPU to match the BSP */
2205 	mem_range_AP_init();
2206 
2207 	/*
2208 	 * Activate smp_invltlb, although strictly speaking, this isn't
2209 	 * quite correct yet.  We should have a bitfield for cpus willing
2210 	 * to accept TLB flush IPI's or something and sync them.
2211 	 */
2212 	if (smp_cpus == mp_ncpus) {
2213 		invltlb_ok = 1;
2214 		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2215 		smp_active = 1;	 /* historic */
2216 	}
2217 }
2218 
2219 #ifdef BETTER_CLOCK
2220 
2221 #define CHECKSTATE_USER	0
2222 #define CHECKSTATE_SYS	1
2223 #define CHECKSTATE_INTR	2
2224 
2225 /* Do not staticize.  Used from apic_vector.s */
2226 struct proc*	checkstate_curproc[NCPU];
2227 int		checkstate_cpustate[NCPU];
2228 u_long		checkstate_pc[NCPU];
2229 
2230 extern long	cp_time[CPUSTATES];
2231 
2232 #define PC_TO_INDEX(pc, prof)				\
2233         ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2234             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2235 
2236 static void
2237 addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2238 {
2239 	int i;
2240 	struct uprof *prof;
2241 	u_long pc;
2242 
2243 	pc = checkstate_pc[id];
2244 	prof = &p->p_stats->p_prof;
2245 	if (pc >= prof->pr_off &&
2246 	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2247 		if ((p->p_flag & P_OWEUPC) == 0) {
2248 			prof->pr_addr = pc;
2249 			prof->pr_ticks = 1;
2250 			p->p_flag |= P_OWEUPC;
2251 		}
2252 		*astmap |= (1 << id);
2253 	}
2254 }
2255 
2256 static void
2257 forwarded_statclock(int id, int pscnt, int *astmap)
2258 {
2259 	struct pstats *pstats;
2260 	long rss;
2261 	struct rusage *ru;
2262 	struct vmspace *vm;
2263 	int cpustate;
2264 	struct proc *p;
2265 #ifdef GPROF
2266 	register struct gmonparam *g;
2267 	int i;
2268 #endif
2269 
2270 	p = checkstate_curproc[id];
2271 	cpustate = checkstate_cpustate[id];
2272 
2273 	switch (cpustate) {
2274 	case CHECKSTATE_USER:
2275 		if (p->p_flag & P_PROFIL)
2276 			addupc_intr_forwarded(p, id, astmap);
2277 		if (pscnt > 1)
2278 			return;
2279 		p->p_uticks++;
2280 		if (p->p_nice > NZERO)
2281 			cp_time[CP_NICE]++;
2282 		else
2283 			cp_time[CP_USER]++;
2284 		break;
2285 	case CHECKSTATE_SYS:
2286 #ifdef GPROF
2287 		/*
2288 		 * Kernel statistics are just like addupc_intr, only easier.
2289 		 */
2290 		g = &_gmonparam;
2291 		if (g->state == GMON_PROF_ON) {
2292 			i = checkstate_pc[id] - g->lowpc;
2293 			if (i < g->textsize) {
2294 				i /= HISTFRACTION * sizeof(*g->kcount);
2295 				g->kcount[i]++;
2296 			}
2297 		}
2298 #endif
2299 		if (pscnt > 1)
2300 			return;
2301 
2302 		if (!p)
2303 			cp_time[CP_IDLE]++;
2304 		else {
2305 			p->p_sticks++;
2306 			cp_time[CP_SYS]++;
2307 		}
2308 		break;
2309 	case CHECKSTATE_INTR:
2310 	default:
2311 #ifdef GPROF
2312 		/*
2313 		 * Kernel statistics are just like addupc_intr, only easier.
2314 		 */
2315 		g = &_gmonparam;
2316 		if (g->state == GMON_PROF_ON) {
2317 			i = checkstate_pc[id] - g->lowpc;
2318 			if (i < g->textsize) {
2319 				i /= HISTFRACTION * sizeof(*g->kcount);
2320 				g->kcount[i]++;
2321 			}
2322 		}
2323 #endif
2324 		if (pscnt > 1)
2325 			return;
2326 		if (p)
2327 			p->p_iticks++;
2328 		cp_time[CP_INTR]++;
2329 	}
2330 	if (p != NULL) {
2331 		p->p_cpticks++;
2332 		if (++p->p_estcpu == 0)
2333 			p->p_estcpu--;
2334 		if ((p->p_estcpu & 3) == 0) {
2335 			resetpriority(p);
2336 			if (p->p_priority >= PUSER)
2337 				p->p_priority = p->p_usrpri;
2338 		}
2339 
2340 		/* Update resource usage integrals and maximums. */
2341 		if ((pstats = p->p_stats) != NULL &&
2342 		    (ru = &pstats->p_ru) != NULL &&
2343 		    (vm = p->p_vmspace) != NULL) {
2344 			ru->ru_ixrss += pgtok(vm->vm_tsize);
2345 			ru->ru_idrss += pgtok(vm->vm_dsize);
2346 			ru->ru_isrss += pgtok(vm->vm_ssize);
2347 			rss = pgtok(vmspace_resident_count(vm));
2348 			if (ru->ru_maxrss < rss)
2349 				ru->ru_maxrss = rss;
2350         	}
2351 	}
2352 }
2353 
2354 void
2355 forward_statclock(int pscnt)
2356 {
2357 	int map;
2358 	int id;
2359 	int i;
2360 
2361 	/* Kludge. We don't yet have separate locks for the interrupts
2362 	 * and the kernel. This means that we cannot let the other processors
2363 	 * handle complex interrupts while inhibiting them from entering
2364 	 * the kernel in a non-interrupt context.
2365 	 *
2366 	 * What we can do, without changing the locking mechanisms yet,
2367 	 * is letting the other processors handle a very simple interrupt
2368 	 * (wich determines the processor states), and do the main
2369 	 * work ourself.
2370 	 */
2371 
2372 	if (!smp_started || !invltlb_ok || cold || panicstr)
2373 		return;
2374 
2375 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2376 
2377 	map = other_cpus & ~stopped_cpus ;
2378 	checkstate_probed_cpus = 0;
2379 	if (map != 0)
2380 		selected_apic_ipi(map,
2381 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2382 
2383 	i = 0;
2384 	while (checkstate_probed_cpus != map) {
2385 		/* spin */
2386 		i++;
2387 		if (i == 100000) {
2388 #ifdef BETTER_CLOCK_DIAGNOSTIC
2389 			printf("forward_statclock: checkstate %x\n",
2390 			       checkstate_probed_cpus);
2391 #endif
2392 			break;
2393 		}
2394 	}
2395 
2396 	/*
2397 	 * Step 2: walk through other processors processes, update ticks and
2398 	 * profiling info.
2399 	 */
2400 
2401 	map = 0;
2402 	for (id = 0; id < mp_ncpus; id++) {
2403 		if (id == cpuid)
2404 			continue;
2405 		if (((1 << id) & checkstate_probed_cpus) == 0)
2406 			continue;
2407 		forwarded_statclock(id, pscnt, &map);
2408 	}
2409 	if (map != 0) {
2410 		checkstate_need_ast |= map;
2411 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2412 		i = 0;
2413 		while ((checkstate_need_ast & map) != 0) {
2414 			/* spin */
2415 			i++;
2416 			if (i > 100000) {
2417 #ifdef BETTER_CLOCK_DIAGNOSTIC
2418 				printf("forward_statclock: dropped ast 0x%x\n",
2419 				       checkstate_need_ast & map);
2420 #endif
2421 				break;
2422 			}
2423 		}
2424 	}
2425 }
2426 
2427 void
2428 forward_hardclock(int pscnt)
2429 {
2430 	int map;
2431 	int id;
2432 	struct proc *p;
2433 	struct pstats *pstats;
2434 	int i;
2435 
2436 	/* Kludge. We don't yet have separate locks for the interrupts
2437 	 * and the kernel. This means that we cannot let the other processors
2438 	 * handle complex interrupts while inhibiting them from entering
2439 	 * the kernel in a non-interrupt context.
2440 	 *
2441 	 * What we can do, without changing the locking mechanisms yet,
2442 	 * is letting the other processors handle a very simple interrupt
2443 	 * (wich determines the processor states), and do the main
2444 	 * work ourself.
2445 	 */
2446 
2447 	if (!smp_started || !invltlb_ok || cold || panicstr)
2448 		return;
2449 
2450 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2451 
2452 	map = other_cpus & ~stopped_cpus ;
2453 	checkstate_probed_cpus = 0;
2454 	if (map != 0)
2455 		selected_apic_ipi(map,
2456 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2457 
2458 	i = 0;
2459 	while (checkstate_probed_cpus != map) {
2460 		/* spin */
2461 		i++;
2462 		if (i == 100000) {
2463 #ifdef BETTER_CLOCK_DIAGNOSTIC
2464 			printf("forward_hardclock: checkstate %x\n",
2465 			       checkstate_probed_cpus);
2466 #endif
2467 			break;
2468 		}
2469 	}
2470 
2471 	/*
2472 	 * Step 2: walk through other processors processes, update virtual
2473 	 * timer and profiling timer. If stathz == 0, also update ticks and
2474 	 * profiling info.
2475 	 */
2476 
2477 	map = 0;
2478 	for (id = 0; id < mp_ncpus; id++) {
2479 		if (id == cpuid)
2480 			continue;
2481 		if (((1 << id) & checkstate_probed_cpus) == 0)
2482 			continue;
2483 		p = checkstate_curproc[id];
2484 		if (p) {
2485 			pstats = p->p_stats;
2486 			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2487 			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2488 			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2489 				psignal(p, SIGVTALRM);
2490 				map |= (1 << id);
2491 			}
2492 			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2493 			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2494 				psignal(p, SIGPROF);
2495 				map |= (1 << id);
2496 			}
2497 		}
2498 		if (stathz == 0) {
2499 			forwarded_statclock( id, pscnt, &map);
2500 		}
2501 	}
2502 	if (map != 0) {
2503 		checkstate_need_ast |= map;
2504 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2505 		i = 0;
2506 		while ((checkstate_need_ast & map) != 0) {
2507 			/* spin */
2508 			i++;
2509 			if (i > 100000) {
2510 #ifdef BETTER_CLOCK_DIAGNOSTIC
2511 				printf("forward_hardclock: dropped ast 0x%x\n",
2512 				       checkstate_need_ast & map);
2513 #endif
2514 				break;
2515 			}
2516 		}
2517 	}
2518 }
2519 
2520 #endif /* BETTER_CLOCK */
2521 
2522 void
2523 forward_signal(struct proc *p)
2524 {
2525 	int map;
2526 	int id;
2527 	int i;
2528 
2529 	/* Kludge. We don't yet have separate locks for the interrupts
2530 	 * and the kernel. This means that we cannot let the other processors
2531 	 * handle complex interrupts while inhibiting them from entering
2532 	 * the kernel in a non-interrupt context.
2533 	 *
2534 	 * What we can do, without changing the locking mechanisms yet,
2535 	 * is letting the other processors handle a very simple interrupt
2536 	 * (wich determines the processor states), and do the main
2537 	 * work ourself.
2538 	 */
2539 
2540 	if (!smp_started || !invltlb_ok || cold || panicstr)
2541 		return;
2542 	if (!forward_signal_enabled)
2543 		return;
2544 	while (1) {
2545 		if (p->p_stat != SRUN)
2546 			return;
2547 		id = p->p_oncpu;
2548 		if (id == 0xff)
2549 			return;
2550 		map = (1<<id);
2551 		checkstate_need_ast |= map;
2552 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2553 		i = 0;
2554 		while ((checkstate_need_ast & map) != 0) {
2555 			/* spin */
2556 			i++;
2557 			if (i > 100000) {
2558 #if 0
2559 				printf("forward_signal: dropped ast 0x%x\n",
2560 				       checkstate_need_ast & map);
2561 #endif
2562 				break;
2563 			}
2564 		}
2565 		if (id == p->p_oncpu)
2566 			return;
2567 	}
2568 }
2569 
2570 void
2571 forward_roundrobin(void)
2572 {
2573 	u_int map;
2574 	int i;
2575 
2576 	if (!smp_started || !invltlb_ok || cold || panicstr)
2577 		return;
2578 	if (!forward_roundrobin_enabled)
2579 		return;
2580 	resched_cpus |= other_cpus;
2581 	map = other_cpus & ~stopped_cpus ;
2582 #if 1
2583 	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2584 #else
2585 	(void) all_but_self_ipi(XCPUAST_OFFSET);
2586 #endif
2587 	i = 0;
2588 	while ((checkstate_need_ast & map) != 0) {
2589 		/* spin */
2590 		i++;
2591 		if (i > 100000) {
2592 #if 0
2593 			printf("forward_roundrobin: dropped ast 0x%x\n",
2594 			       checkstate_need_ast & map);
2595 #endif
2596 			break;
2597 		}
2598 	}
2599 }
2600 
2601 
2602 #ifdef APIC_INTR_REORDER
2603 /*
2604  *	Maintain mapping from softintr vector to isr bit in local apic.
2605  */
2606 void
2607 set_lapic_isrloc(int intr, int vector)
2608 {
2609 	if (intr < 0 || intr > 32)
2610 		panic("set_apic_isrloc: bad intr argument: %d",intr);
2611 	if (vector < ICU_OFFSET || vector > 255)
2612 		panic("set_apic_isrloc: bad vector argument: %d",vector);
2613 	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2614 	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2615 }
2616 #endif
2617 
2618 /*
2619  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2620  * (if specified), rendezvous, execute the action function (if specified),
2621  * rendezvous again, execute the teardown function (if specified), and then
2622  * resume.
2623  *
2624  * Note that the supplied external functions _must_ be reentrant and aware
2625  * that they are running in parallel and in an unknown lock context.
2626  */
2627 static void (*smp_rv_setup_func)(void *arg);
2628 static void (*smp_rv_action_func)(void *arg);
2629 static void (*smp_rv_teardown_func)(void *arg);
2630 static void *smp_rv_func_arg;
2631 static volatile int smp_rv_waiters[2];
2632 
2633 void
2634 smp_rendezvous_action(void)
2635 {
2636 	/* setup function */
2637 	if (smp_rv_setup_func != NULL)
2638 		smp_rv_setup_func(smp_rv_func_arg);
2639 	/* spin on entry rendezvous */
2640 	atomic_add_int(&smp_rv_waiters[0], 1);
2641 	while (smp_rv_waiters[0] < mp_ncpus)
2642 		;
2643 	/* action function */
2644 	if (smp_rv_action_func != NULL)
2645 		smp_rv_action_func(smp_rv_func_arg);
2646 	/* spin on exit rendezvous */
2647 	atomic_add_int(&smp_rv_waiters[1], 1);
2648 	while (smp_rv_waiters[1] < mp_ncpus)
2649 		;
2650 	/* teardown function */
2651 	if (smp_rv_teardown_func != NULL)
2652 		smp_rv_teardown_func(smp_rv_func_arg);
2653 }
2654 
2655 void
2656 smp_rendezvous(void (* setup_func)(void *),
2657 	       void (* action_func)(void *),
2658 	       void (* teardown_func)(void *),
2659 	       void *arg)
2660 {
2661 	u_int	efl;
2662 
2663 	/* obtain rendezvous lock */
2664 	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2665 
2666 	/* set static function pointers */
2667 	smp_rv_setup_func = setup_func;
2668 	smp_rv_action_func = action_func;
2669 	smp_rv_teardown_func = teardown_func;
2670 	smp_rv_func_arg = arg;
2671 	smp_rv_waiters[0] = 0;
2672 	smp_rv_waiters[1] = 0;
2673 
2674 	/* disable interrupts on this CPU, save interrupt status */
2675 	efl = read_eflags();
2676 	write_eflags(efl & ~PSL_I);
2677 
2678 	/* signal other processors, which will enter the IPI with interrupts off */
2679 	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2680 
2681 	/* call executor function */
2682 	smp_rendezvous_action();
2683 
2684 	/* restore interrupt flag */
2685 	write_eflags(efl);
2686 
2687 	/* release lock */
2688 	s_unlock(&smp_rv_lock);
2689 }
2690