xref: /freebsd/sys/kern/subr_smp.c (revision 1d66272a85cde1c8a69c58f4b5dd649babd6eca6)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_cpu.h"
29 #include "opt_user_ldt.h"
30 
31 #ifdef SMP
32 #include <machine/smptests.h>
33 #else
34 #error
35 #endif
36 
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #include <sys/mutex.h>
46 #ifdef BETTER_CLOCK
47 #include <sys/dkstat.h>
48 #endif
49 #include <sys/cons.h>	/* cngetc() */
50 
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
56 #ifdef BETTER_CLOCK
57 #include <sys/lock.h>
58 #include <vm/vm_map.h>
59 #include <sys/user.h>
60 #ifdef GPROF
61 #include <sys/gmon.h>
62 #endif
63 #endif
64 
65 #include <machine/smp.h>
66 #include <machine/apic.h>
67 #include <machine/atomic.h>
68 #include <machine/cpufunc.h>
69 #include <machine/mpapic.h>
70 #include <machine/psl.h>
71 #include <machine/segments.h>
72 #include <machine/smptests.h>	/** TEST_DEFAULT_CONFIG, TEST_TEST1 */
73 #include <machine/tss.h>
74 #include <machine/specialreg.h>
75 #include <machine/globaldata.h>
76 
77 #if defined(APIC_IO)
78 #include <machine/md_var.h>		/* setidt() */
79 #include <i386/isa/icu.h>		/* IPIs */
80 #include <i386/isa/intr_machdep.h>	/* IPIs */
81 #endif	/* APIC_IO */
82 
83 #if defined(TEST_DEFAULT_CONFIG)
84 #define MPFPS_MPFB1	TEST_DEFAULT_CONFIG
85 #else
86 #define MPFPS_MPFB1	mpfps->mpfb1
87 #endif  /* TEST_DEFAULT_CONFIG */
88 
89 #define WARMBOOT_TARGET		0
90 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
91 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
92 
93 #ifdef PC98
94 #define BIOS_BASE		(0xe8000)
95 #define BIOS_SIZE		(0x18000)
96 #else
97 #define BIOS_BASE		(0xf0000)
98 #define BIOS_SIZE		(0x10000)
99 #endif
100 #define BIOS_COUNT		(BIOS_SIZE/4)
101 
102 #define CMOS_REG		(0x70)
103 #define CMOS_DATA		(0x71)
104 #define BIOS_RESET		(0x0f)
105 #define BIOS_WARM		(0x0a)
106 
107 #define PROCENTRY_FLAG_EN	0x01
108 #define PROCENTRY_FLAG_BP	0x02
109 #define IOAPICENTRY_FLAG_EN	0x01
110 
111 
112 /* MP Floating Pointer Structure */
113 typedef struct MPFPS {
114 	char    signature[4];
115 	void   *pap;
116 	u_char  length;
117 	u_char  spec_rev;
118 	u_char  checksum;
119 	u_char  mpfb1;
120 	u_char  mpfb2;
121 	u_char  mpfb3;
122 	u_char  mpfb4;
123 	u_char  mpfb5;
124 }      *mpfps_t;
125 
126 /* MP Configuration Table Header */
127 typedef struct MPCTH {
128 	char    signature[4];
129 	u_short base_table_length;
130 	u_char  spec_rev;
131 	u_char  checksum;
132 	u_char  oem_id[8];
133 	u_char  product_id[12];
134 	void   *oem_table_pointer;
135 	u_short oem_table_size;
136 	u_short entry_count;
137 	void   *apic_address;
138 	u_short extended_table_length;
139 	u_char  extended_table_checksum;
140 	u_char  reserved;
141 }      *mpcth_t;
142 
143 
144 typedef struct PROCENTRY {
145 	u_char  type;
146 	u_char  apic_id;
147 	u_char  apic_version;
148 	u_char  cpu_flags;
149 	u_long  cpu_signature;
150 	u_long  feature_flags;
151 	u_long  reserved1;
152 	u_long  reserved2;
153 }      *proc_entry_ptr;
154 
155 typedef struct BUSENTRY {
156 	u_char  type;
157 	u_char  bus_id;
158 	char    bus_type[6];
159 }      *bus_entry_ptr;
160 
161 typedef struct IOAPICENTRY {
162 	u_char  type;
163 	u_char  apic_id;
164 	u_char  apic_version;
165 	u_char  apic_flags;
166 	void   *apic_address;
167 }      *io_apic_entry_ptr;
168 
169 typedef struct INTENTRY {
170 	u_char  type;
171 	u_char  int_type;
172 	u_short int_flags;
173 	u_char  src_bus_id;
174 	u_char  src_bus_irq;
175 	u_char  dst_apic_id;
176 	u_char  dst_apic_int;
177 }      *int_entry_ptr;
178 
179 /* descriptions of MP basetable entries */
180 typedef struct BASETABLE_ENTRY {
181 	u_char  type;
182 	u_char  length;
183 	char    name[16];
184 }       basetable_entry;
185 
186 /*
187  * this code MUST be enabled here and in mpboot.s.
188  * it follows the very early stages of AP boot by placing values in CMOS ram.
189  * it NORMALLY will never be needed and thus the primitive method for enabling.
190  *
191 #define CHECK_POINTS
192  */
193 
194 #if defined(CHECK_POINTS) && !defined(PC98)
195 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
196 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
197 
198 #define CHECK_INIT(D);				\
199 	CHECK_WRITE(0x34, (D));			\
200 	CHECK_WRITE(0x35, (D));			\
201 	CHECK_WRITE(0x36, (D));			\
202 	CHECK_WRITE(0x37, (D));			\
203 	CHECK_WRITE(0x38, (D));			\
204 	CHECK_WRITE(0x39, (D));
205 
206 #define CHECK_PRINT(S);				\
207 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
208 	   (S),					\
209 	   CHECK_READ(0x34),			\
210 	   CHECK_READ(0x35),			\
211 	   CHECK_READ(0x36),			\
212 	   CHECK_READ(0x37),			\
213 	   CHECK_READ(0x38),			\
214 	   CHECK_READ(0x39));
215 
216 #else				/* CHECK_POINTS */
217 
218 #define CHECK_INIT(D)
219 #define CHECK_PRINT(S)
220 
221 #endif				/* CHECK_POINTS */
222 
223 /*
224  * Values to send to the POST hardware.
225  */
226 #define MP_BOOTADDRESS_POST	0x10
227 #define MP_PROBE_POST		0x11
228 #define MPTABLE_PASS1_POST	0x12
229 
230 #define MP_START_POST		0x13
231 #define MP_ENABLE_POST		0x14
232 #define MPTABLE_PASS2_POST	0x15
233 
234 #define START_ALL_APS_POST	0x16
235 #define INSTALL_AP_TRAMP_POST	0x17
236 #define START_AP_POST		0x18
237 
238 #define MP_ANNOUNCE_POST	0x19
239 
240 /* used to hold the AP's until we are ready to release them */
241 struct simplelock	ap_boot_lock;
242 
243 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
244 int	current_postcode;
245 
246 /** XXX FIXME: what system files declare these??? */
247 extern struct region_descriptor r_gdt, r_idt;
248 
249 int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
250 int	mp_ncpus;		/* # of CPUs, including BSP */
251 int	mp_naps;		/* # of Applications processors */
252 int	mp_nbusses;		/* # of busses */
253 int	mp_napics;		/* # of IO APICs */
254 int	boot_cpu_id;		/* designated BSP */
255 vm_offset_t cpu_apic_address;
256 vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
257 extern	int nkpt;
258 
259 u_int32_t cpu_apic_versions[MAXCPU];
260 u_int32_t *io_apic_versions;
261 
262 #ifdef APIC_INTR_DIAGNOSTIC
263 int apic_itrace_enter[32];
264 int apic_itrace_tryisrlock[32];
265 int apic_itrace_gotisrlock[32];
266 int apic_itrace_active[32];
267 int apic_itrace_masked[32];
268 int apic_itrace_noisrlock[32];
269 int apic_itrace_masked2[32];
270 int apic_itrace_unmask[32];
271 int apic_itrace_noforward[32];
272 int apic_itrace_leave[32];
273 int apic_itrace_enter2[32];
274 int apic_itrace_doreti[32];
275 int apic_itrace_splz[32];
276 int apic_itrace_eoi[32];
277 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
278 unsigned short apic_itrace_debugbuffer[32768];
279 int apic_itrace_debugbuffer_idx;
280 struct simplelock apic_itrace_debuglock;
281 #endif
282 #endif
283 
284 #ifdef APIC_INTR_REORDER
285 struct {
286 	volatile int *location;
287 	int bit;
288 } apic_isrbit_location[32];
289 #endif
290 
291 struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
292 
293 /*
294  * APIC ID logical/physical mapping structures.
295  * We oversize these to simplify boot-time config.
296  */
297 int     cpu_num_to_apic_id[NAPICID];
298 int     io_num_to_apic_id[NAPICID];
299 int     apic_id_to_logical[NAPICID];
300 
301 
302 /* Bitmap of all available CPUs */
303 u_int	all_cpus;
304 
305 /* AP uses this during bootstrap.  Do not staticize.  */
306 char *bootSTK;
307 static int bootAP;
308 
309 /* Hotwire a 0->4MB V==P mapping */
310 extern pt_entry_t *KPTphys;
311 
312 /* SMP page table page */
313 extern pt_entry_t *SMPpt;
314 
315 struct pcb stoppcbs[MAXCPU];
316 
317 int smp_started;		/* has the system started? */
318 
319 /*
320  * Local data and functions.
321  */
322 
323 static int	mp_capable;
324 static u_int	boot_address;
325 static u_int	base_memory;
326 
327 static int	picmode;		/* 0: virtual wire mode, 1: PIC mode */
328 static mpfps_t	mpfps;
329 static int	search_for_sig(u_int32_t target, int count);
330 static void	mp_enable(u_int boot_addr);
331 
332 static void	mptable_pass1(void);
333 static int	mptable_pass2(void);
334 static void	default_mp_table(int type);
335 static void	fix_mp_table(void);
336 static void	setup_apic_irq_mapping(void);
337 static void	init_locks(void);
338 static int	start_all_aps(u_int boot_addr);
339 static void	install_ap_tramp(u_int boot_addr);
340 static int	start_ap(int logicalCpu, u_int boot_addr);
341 static int	apic_int_is_bus_type(int intr, int bus_type);
342 static void	release_aps(void *dummy);
343 
344 /*
345  * Calculate usable address in base memory for AP trampoline code.
346  */
347 u_int
348 mp_bootaddress(u_int basemem)
349 {
350 	POSTCODE(MP_BOOTADDRESS_POST);
351 
352 	base_memory = basemem * 1024;	/* convert to bytes */
353 
354 	boot_address = base_memory & ~0xfff;	/* round down to 4k boundary */
355 	if ((base_memory - boot_address) < bootMP_size)
356 		boot_address -= 4096;	/* not enough, lower by 4k */
357 
358 	return boot_address;
359 }
360 
361 
362 /*
363  * Look for an Intel MP spec table (ie, SMP capable hardware).
364  */
365 int
366 mp_probe(void)
367 {
368 	int     x;
369 	u_long  segment;
370 	u_int32_t target;
371 
372 	POSTCODE(MP_PROBE_POST);
373 
374 	/* see if EBDA exists */
375 	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
376 		/* search first 1K of EBDA */
377 		target = (u_int32_t) (segment << 4);
378 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
379 			goto found;
380 	} else {
381 		/* last 1K of base memory, effective 'top of base' passed in */
382 		target = (u_int32_t) (base_memory - 0x400);
383 		if ((x = search_for_sig(target, 1024 / 4)) >= 0)
384 			goto found;
385 	}
386 
387 	/* search the BIOS */
388 	target = (u_int32_t) BIOS_BASE;
389 	if ((x = search_for_sig(target, BIOS_COUNT)) >= 0)
390 		goto found;
391 
392 	/* nothing found */
393 	mpfps = (mpfps_t)0;
394 	mp_capable = 0;
395 	return 0;
396 
397 found:
398 	/* calculate needed resources */
399 	mpfps = (mpfps_t)x;
400 	mptable_pass1();
401 
402 	/* flag fact that we are running multiple processors */
403 	mp_capable = 1;
404 	return 1;
405 }
406 
407 
408 /*
409  * Initialize the SMP hardware and the APIC and start up the AP's.
410  */
411 void
412 mp_start(void)
413 {
414 	POSTCODE(MP_START_POST);
415 
416 	/* look for MP capable motherboard */
417 	if (mp_capable)
418 		mp_enable(boot_address);
419 	else
420 		panic("MP hardware not found!");
421 }
422 
423 
424 /*
425  * Print various information about the SMP system hardware and setup.
426  */
427 void
428 mp_announce(void)
429 {
430 	int     x;
431 
432 	POSTCODE(MP_ANNOUNCE_POST);
433 
434 	printf("FreeBSD/SMP: Multiprocessor motherboard\n");
435 	printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
436 	printf(", version: 0x%08x", cpu_apic_versions[0]);
437 	printf(", at 0x%08x\n", cpu_apic_address);
438 	for (x = 1; x <= mp_naps; ++x) {
439 		printf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
440 		printf(", version: 0x%08x", cpu_apic_versions[x]);
441 		printf(", at 0x%08x\n", cpu_apic_address);
442 	}
443 
444 #if defined(APIC_IO)
445 	for (x = 0; x < mp_napics; ++x) {
446 		printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
447 		printf(", version: 0x%08x", io_apic_versions[x]);
448 		printf(", at 0x%08x\n", io_apic_address[x]);
449 	}
450 #else
451 	printf(" Warning: APIC I/O disabled\n");
452 #endif	/* APIC_IO */
453 }
454 
455 /*
456  * AP cpu's call this to sync up protected mode.
457  */
458 void
459 init_secondary(void)
460 {
461 	int	gsel_tss;
462 	int	x, myid = bootAP;
463 
464 	gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid];
465 	gdt_segs[GPROC0_SEL].ssd_base =
466 		(int) &SMP_prvspace[myid].globaldata.gd_common_tss;
467 	SMP_prvspace[myid].globaldata.gd_prvspace =
468 		&SMP_prvspace[myid].globaldata;
469 
470 	for (x = 0; x < NGDT; x++) {
471 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
472 	}
473 
474 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
475 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
476 	lgdt(&r_gdt);			/* does magic intra-segment return */
477 
478 	lidt(&r_idt);
479 
480 	lldt(_default_ldt);
481 #ifdef USER_LDT
482 	PCPU_SET(currentldt, _default_ldt);
483 #endif
484 
485 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
486 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
487 	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
488 	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
489 	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
490 	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
491 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
492 	ltr(gsel_tss);
493 
494 	pmap_set_opt();
495 }
496 
497 
498 #if defined(APIC_IO)
499 /*
500  * Final configuration of the BSP's local APIC:
501  *  - disable 'pic mode'.
502  *  - disable 'virtual wire mode'.
503  *  - enable NMI.
504  */
505 void
506 bsp_apic_configure(void)
507 {
508 	u_char		byte;
509 	u_int32_t	temp;
510 
511 	/* leave 'pic mode' if necessary */
512 	if (picmode) {
513 		outb(0x22, 0x70);	/* select IMCR */
514 		byte = inb(0x23);	/* current contents */
515 		byte |= 0x01;		/* mask external INTR */
516 		outb(0x23, byte);	/* disconnect 8259s/NMI */
517 	}
518 
519 	/* mask lint0 (the 8259 'virtual wire' connection) */
520 	temp = lapic.lvt_lint0;
521 	temp |= APIC_LVT_M;		/* set the mask */
522 	lapic.lvt_lint0 = temp;
523 
524         /* setup lint1 to handle NMI */
525         temp = lapic.lvt_lint1;
526         temp &= ~APIC_LVT_M;		/* clear the mask */
527         lapic.lvt_lint1 = temp;
528 
529 	if (bootverbose)
530 		apic_dump("bsp_apic_configure()");
531 }
532 #endif  /* APIC_IO */
533 
534 
535 /*******************************************************************
536  * local functions and data
537  */
538 
539 /*
540  * start the SMP system
541  */
542 static void
543 mp_enable(u_int boot_addr)
544 {
545 	int     x;
546 #if defined(APIC_IO)
547 	int     apic;
548 	u_int   ux;
549 #endif	/* APIC_IO */
550 
551 	POSTCODE(MP_ENABLE_POST);
552 
553 	/* turn on 4MB of V == P addressing so we can get to MP table */
554 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
555 	invltlb();
556 
557 	/* examine the MP table for needed info, uses physical addresses */
558 	x = mptable_pass2();
559 
560 	*(int *)PTD = 0;
561 	invltlb();
562 
563 	/* can't process default configs till the CPU APIC is pmapped */
564 	if (x)
565 		default_mp_table(x);
566 
567 	/* post scan cleanup */
568 	fix_mp_table();
569 	setup_apic_irq_mapping();
570 
571 #if defined(APIC_IO)
572 
573 	/* fill the LOGICAL io_apic_versions table */
574 	for (apic = 0; apic < mp_napics; ++apic) {
575 		ux = io_apic_read(apic, IOAPIC_VER);
576 		io_apic_versions[apic] = ux;
577 		io_apic_set_id(apic, IO_TO_ID(apic));
578 	}
579 
580 	/* program each IO APIC in the system */
581 	for (apic = 0; apic < mp_napics; ++apic)
582 		if (io_apic_setup(apic) < 0)
583 			panic("IO APIC setup failure");
584 
585 	/* install a 'Spurious INTerrupt' vector */
586 	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
587 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
588 
589 	/* install an inter-CPU IPI for TLB invalidation */
590 	setidt(XINVLTLB_OFFSET, Xinvltlb,
591 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
592 
593 #ifdef BETTER_CLOCK
594 	/* install an inter-CPU IPI for reading processor state */
595 	setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate,
596 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
597 #endif
598 
599 	/* install an inter-CPU IPI for all-CPU rendezvous */
600 	setidt(XRENDEZVOUS_OFFSET, Xrendezvous,
601 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
602 
603 	/* install an inter-CPU IPI for forcing an additional software trap */
604 	setidt(XCPUAST_OFFSET, Xcpuast,
605 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
606 
607 	/* install an inter-CPU IPI for interrupt forwarding */
608 	setidt(XFORWARD_IRQ_OFFSET, Xforward_irq,
609 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
610 
611 	/* install an inter-CPU IPI for CPU stop/restart */
612 	setidt(XCPUSTOP_OFFSET, Xcpustop,
613 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
614 
615 #if defined(TEST_TEST1)
616 	/* install a "fake hardware INTerrupt" vector */
617 	setidt(XTEST1_OFFSET, Xtest1,
618 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
619 #endif  /** TEST_TEST1 */
620 
621 #endif	/* APIC_IO */
622 
623 	/* initialize all SMP locks */
624 	init_locks();
625 
626 	/* obtain the ap_boot_lock */
627 	s_lock(&ap_boot_lock);
628 
629 	/* start each Application Processor */
630 	start_all_aps(boot_addr);
631 }
632 
633 
634 /*
635  * look for the MP spec signature
636  */
637 
638 /* string defined by the Intel MP Spec as identifying the MP table */
639 #define MP_SIG		0x5f504d5f	/* _MP_ */
640 #define NEXT(X)		((X) += 4)
641 static int
642 search_for_sig(u_int32_t target, int count)
643 {
644 	int     x;
645 	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
646 
647 	for (x = 0; x < count; NEXT(x))
648 		if (addr[x] == MP_SIG)
649 			/* make array index a byte index */
650 			return (target + (x * sizeof(u_int32_t)));
651 
652 	return -1;
653 }
654 
655 
656 static basetable_entry basetable_entry_types[] =
657 {
658 	{0, 20, "Processor"},
659 	{1, 8, "Bus"},
660 	{2, 8, "I/O APIC"},
661 	{3, 8, "I/O INT"},
662 	{4, 8, "Local INT"}
663 };
664 
665 typedef struct BUSDATA {
666 	u_char  bus_id;
667 	enum busTypes bus_type;
668 }       bus_datum;
669 
670 typedef struct INTDATA {
671 	u_char  int_type;
672 	u_short int_flags;
673 	u_char  src_bus_id;
674 	u_char  src_bus_irq;
675 	u_char  dst_apic_id;
676 	u_char  dst_apic_int;
677 	u_char	int_vector;
678 }       io_int, local_int;
679 
680 typedef struct BUSTYPENAME {
681 	u_char  type;
682 	char    name[7];
683 }       bus_type_name;
684 
685 static bus_type_name bus_type_table[] =
686 {
687 	{CBUS, "CBUS"},
688 	{CBUSII, "CBUSII"},
689 	{EISA, "EISA"},
690 	{MCA, "MCA"},
691 	{UNKNOWN_BUSTYPE, "---"},
692 	{ISA, "ISA"},
693 	{MCA, "MCA"},
694 	{UNKNOWN_BUSTYPE, "---"},
695 	{UNKNOWN_BUSTYPE, "---"},
696 	{UNKNOWN_BUSTYPE, "---"},
697 	{UNKNOWN_BUSTYPE, "---"},
698 	{UNKNOWN_BUSTYPE, "---"},
699 	{PCI, "PCI"},
700 	{UNKNOWN_BUSTYPE, "---"},
701 	{UNKNOWN_BUSTYPE, "---"},
702 	{UNKNOWN_BUSTYPE, "---"},
703 	{UNKNOWN_BUSTYPE, "---"},
704 	{XPRESS, "XPRESS"},
705 	{UNKNOWN_BUSTYPE, "---"}
706 };
707 /* from MP spec v1.4, table 5-1 */
708 static int default_data[7][5] =
709 {
710 /*   nbus, id0, type0, id1, type1 */
711 	{1, 0, ISA, 255, 255},
712 	{1, 0, EISA, 255, 255},
713 	{1, 0, EISA, 255, 255},
714 	{1, 0, MCA, 255, 255},
715 	{2, 0, ISA, 1, PCI},
716 	{2, 0, EISA, 1, PCI},
717 	{2, 0, MCA, 1, PCI}
718 };
719 
720 
721 /* the bus data */
722 static bus_datum *bus_data;
723 
724 /* the IO INT data, one entry per possible APIC INTerrupt */
725 static io_int  *io_apic_ints;
726 
727 static int nintrs;
728 
729 static int processor_entry	__P((proc_entry_ptr entry, int cpu));
730 static int bus_entry		__P((bus_entry_ptr entry, int bus));
731 static int io_apic_entry	__P((io_apic_entry_ptr entry, int apic));
732 static int int_entry		__P((int_entry_ptr entry, int intr));
733 static int lookup_bus_type	__P((char *name));
734 
735 
736 /*
737  * 1st pass on motherboard's Intel MP specification table.
738  *
739  * initializes:
740  *	mp_ncpus = 1
741  *
742  * determines:
743  *	cpu_apic_address (common to all CPUs)
744  *	io_apic_address[N]
745  *	mp_naps
746  *	mp_nbusses
747  *	mp_napics
748  *	nintrs
749  */
750 static void
751 mptable_pass1(void)
752 {
753 	int	x;
754 	mpcth_t	cth;
755 	int	totalSize;
756 	void*	position;
757 	int	count;
758 	int	type;
759 
760 	POSTCODE(MPTABLE_PASS1_POST);
761 
762 	/* clear various tables */
763 	for (x = 0; x < NAPICID; ++x) {
764 		io_apic_address[x] = ~0;	/* IO APIC address table */
765 	}
766 
767 	/* init everything to empty */
768 	mp_naps = 0;
769 	mp_nbusses = 0;
770 	mp_napics = 0;
771 	nintrs = 0;
772 
773 	/* check for use of 'default' configuration */
774 	if (MPFPS_MPFB1 != 0) {
775 		/* use default addresses */
776 		cpu_apic_address = DEFAULT_APIC_BASE;
777 		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
778 
779 		/* fill in with defaults */
780 		mp_naps = 2;		/* includes BSP */
781 		mp_nbusses = default_data[MPFPS_MPFB1 - 1][0];
782 #if defined(APIC_IO)
783 		mp_napics = 1;
784 		nintrs = 16;
785 #endif	/* APIC_IO */
786 	}
787 	else {
788 		if ((cth = mpfps->pap) == 0)
789 			panic("MP Configuration Table Header MISSING!");
790 
791 		cpu_apic_address = (vm_offset_t) cth->apic_address;
792 
793 		/* walk the table, recording info of interest */
794 		totalSize = cth->base_table_length - sizeof(struct MPCTH);
795 		position = (u_char *) cth + sizeof(struct MPCTH);
796 		count = cth->entry_count;
797 
798 		while (count--) {
799 			switch (type = *(u_char *) position) {
800 			case 0: /* processor_entry */
801 				if (((proc_entry_ptr)position)->cpu_flags
802 					& PROCENTRY_FLAG_EN)
803 					++mp_naps;
804 				break;
805 			case 1: /* bus_entry */
806 				++mp_nbusses;
807 				break;
808 			case 2: /* io_apic_entry */
809 				if (((io_apic_entry_ptr)position)->apic_flags
810 					& IOAPICENTRY_FLAG_EN)
811 					io_apic_address[mp_napics++] =
812 					    (vm_offset_t)((io_apic_entry_ptr)
813 						position)->apic_address;
814 				break;
815 			case 3: /* int_entry */
816 				++nintrs;
817 				break;
818 			case 4:	/* int_entry */
819 				break;
820 			default:
821 				panic("mpfps Base Table HOSED!");
822 				/* NOTREACHED */
823 			}
824 
825 			totalSize -= basetable_entry_types[type].length;
826 			(u_char*)position += basetable_entry_types[type].length;
827 		}
828 	}
829 
830 	/* qualify the numbers */
831 	if (mp_naps > MAXCPU) {
832 		printf("Warning: only using %d of %d available CPUs!\n",
833 			MAXCPU, mp_naps);
834 		mp_naps = MAXCPU;
835 	}
836 
837 	/*
838 	 * Count the BSP.
839 	 * This is also used as a counter while starting the APs.
840 	 */
841 	mp_ncpus = 1;
842 
843 	--mp_naps;	/* subtract the BSP */
844 }
845 
846 
847 /*
848  * 2nd pass on motherboard's Intel MP specification table.
849  *
850  * sets:
851  *	boot_cpu_id
852  *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
853  *	CPU_TO_ID(N), logical CPU to APIC ID table
854  *	IO_TO_ID(N), logical IO to APIC ID table
855  *	bus_data[N]
856  *	io_apic_ints[N]
857  */
858 static int
859 mptable_pass2(void)
860 {
861 	int     x;
862 	mpcth_t cth;
863 	int     totalSize;
864 	void*   position;
865 	int     count;
866 	int     type;
867 	int     apic, bus, cpu, intr;
868 	int	i, j;
869 	int	pgeflag;
870 
871 	POSTCODE(MPTABLE_PASS2_POST);
872 
873 	pgeflag = 0;		/* XXX - Not used under SMP yet.  */
874 
875 	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
876 	    M_DEVBUF, M_WAITOK);
877 	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
878 	    M_DEVBUF, M_WAITOK);
879 	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1),
880 	    M_DEVBUF, M_WAITOK);
881 	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
882 	    M_DEVBUF, M_WAITOK);
883 
884 	bzero(ioapic, sizeof(ioapic_t *) * mp_napics);
885 
886 	for (i = 0; i < mp_napics; i++) {
887 		for (j = 0; j < mp_napics; j++) {
888 			/* same page frame as a previous IO apic? */
889 			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) ==
890 			    (io_apic_address[i] & PG_FRAME)) {
891 				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
892 					+ (NPTEPG-2-j) * PAGE_SIZE
893 					+ (io_apic_address[i] & PAGE_MASK));
894 				break;
895 			}
896 			/* use this slot if available */
897 			if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) {
898 				SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW |
899 				    pgeflag | (io_apic_address[i] & PG_FRAME));
900 				ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace
901 					+ (NPTEPG-2-j) * PAGE_SIZE
902 					+ (io_apic_address[i] & PAGE_MASK));
903 				break;
904 			}
905 		}
906 	}
907 
908 	/* clear various tables */
909 	for (x = 0; x < NAPICID; ++x) {
910 		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
911 		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
912 		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
913 	}
914 
915 	/* clear bus data table */
916 	for (x = 0; x < mp_nbusses; ++x)
917 		bus_data[x].bus_id = 0xff;
918 
919 	/* clear IO APIC INT table */
920 	for (x = 0; x < (nintrs + 1); ++x) {
921 		io_apic_ints[x].int_type = 0xff;
922 		io_apic_ints[x].int_vector = 0xff;
923 	}
924 
925 	/* setup the cpu/apic mapping arrays */
926 	boot_cpu_id = -1;
927 
928 	/* record whether PIC or virtual-wire mode */
929 	picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0;
930 
931 	/* check for use of 'default' configuration */
932 	if (MPFPS_MPFB1 != 0)
933 		return MPFPS_MPFB1;	/* return default configuration type */
934 
935 	if ((cth = mpfps->pap) == 0)
936 		panic("MP Configuration Table Header MISSING!");
937 
938 	/* walk the table, recording info of interest */
939 	totalSize = cth->base_table_length - sizeof(struct MPCTH);
940 	position = (u_char *) cth + sizeof(struct MPCTH);
941 	count = cth->entry_count;
942 	apic = bus = intr = 0;
943 	cpu = 1;				/* pre-count the BSP */
944 
945 	while (count--) {
946 		switch (type = *(u_char *) position) {
947 		case 0:
948 			if (processor_entry(position, cpu))
949 				++cpu;
950 			break;
951 		case 1:
952 			if (bus_entry(position, bus))
953 				++bus;
954 			break;
955 		case 2:
956 			if (io_apic_entry(position, apic))
957 				++apic;
958 			break;
959 		case 3:
960 			if (int_entry(position, intr))
961 				++intr;
962 			break;
963 		case 4:
964 			/* int_entry(position); */
965 			break;
966 		default:
967 			panic("mpfps Base Table HOSED!");
968 			/* NOTREACHED */
969 		}
970 
971 		totalSize -= basetable_entry_types[type].length;
972 		(u_char *) position += basetable_entry_types[type].length;
973 	}
974 
975 	if (boot_cpu_id == -1)
976 		panic("NO BSP found!");
977 
978 	/* report fact that its NOT a default configuration */
979 	return 0;
980 }
981 
982 
983 void
984 assign_apic_irq(int apic, int intpin, int irq)
985 {
986 	int x;
987 
988 	if (int_to_apicintpin[irq].ioapic != -1)
989 		panic("assign_apic_irq: inconsistent table");
990 
991 	int_to_apicintpin[irq].ioapic = apic;
992 	int_to_apicintpin[irq].int_pin = intpin;
993 	int_to_apicintpin[irq].apic_address = ioapic[apic];
994 	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
995 
996 	for (x = 0; x < nintrs; x++) {
997 		if ((io_apic_ints[x].int_type == 0 ||
998 		     io_apic_ints[x].int_type == 3) &&
999 		    io_apic_ints[x].int_vector == 0xff &&
1000 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1001 		    io_apic_ints[x].dst_apic_int == intpin)
1002 			io_apic_ints[x].int_vector = irq;
1003 	}
1004 }
1005 
1006 void
1007 revoke_apic_irq(int irq)
1008 {
1009 	int x;
1010 	int oldapic;
1011 	int oldintpin;
1012 
1013 	if (int_to_apicintpin[irq].ioapic == -1)
1014 		panic("assign_apic_irq: inconsistent table");
1015 
1016 	oldapic = int_to_apicintpin[irq].ioapic;
1017 	oldintpin = int_to_apicintpin[irq].int_pin;
1018 
1019 	int_to_apicintpin[irq].ioapic = -1;
1020 	int_to_apicintpin[irq].int_pin = 0;
1021 	int_to_apicintpin[irq].apic_address = NULL;
1022 	int_to_apicintpin[irq].redirindex = 0;
1023 
1024 	for (x = 0; x < nintrs; x++) {
1025 		if ((io_apic_ints[x].int_type == 0 ||
1026 		     io_apic_ints[x].int_type == 3) &&
1027 		    io_apic_ints[x].int_vector == 0xff &&
1028 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1029 		    io_apic_ints[x].dst_apic_int == oldintpin)
1030 			io_apic_ints[x].int_vector = 0xff;
1031 	}
1032 }
1033 
1034 
1035 
1036 static void
1037 swap_apic_id(int apic, int oldid, int newid)
1038 {
1039 	int x;
1040 	int oapic;
1041 
1042 
1043 	if (oldid == newid)
1044 		return;			/* Nothing to do */
1045 
1046 	printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1047 	       apic, oldid, newid);
1048 
1049 	/* Swap physical APIC IDs in interrupt entries */
1050 	for (x = 0; x < nintrs; x++) {
1051 		if (io_apic_ints[x].dst_apic_id == oldid)
1052 			io_apic_ints[x].dst_apic_id = newid;
1053 		else if (io_apic_ints[x].dst_apic_id == newid)
1054 			io_apic_ints[x].dst_apic_id = oldid;
1055 	}
1056 
1057 	/* Swap physical APIC IDs in IO_TO_ID mappings */
1058 	for (oapic = 0; oapic < mp_napics; oapic++)
1059 		if (IO_TO_ID(oapic) == newid)
1060 			break;
1061 
1062 	if (oapic < mp_napics) {
1063 		printf("Changing APIC ID for IO APIC #%d from "
1064 		       "%d to %d in MP table\n",
1065 		       oapic, newid, oldid);
1066 		IO_TO_ID(oapic) = oldid;
1067 	}
1068 	IO_TO_ID(apic) = newid;
1069 }
1070 
1071 
1072 static void
1073 fix_id_to_io_mapping(void)
1074 {
1075 	int x;
1076 
1077 	for (x = 0; x < NAPICID; x++)
1078 		ID_TO_IO(x) = -1;
1079 
1080 	for (x = 0; x <= mp_naps; x++)
1081 		if (CPU_TO_ID(x) < NAPICID)
1082 			ID_TO_IO(CPU_TO_ID(x)) = x;
1083 
1084 	for (x = 0; x < mp_napics; x++)
1085 		if (IO_TO_ID(x) < NAPICID)
1086 			ID_TO_IO(IO_TO_ID(x)) = x;
1087 }
1088 
1089 
1090 static int
1091 first_free_apic_id(void)
1092 {
1093 	int freeid, x;
1094 
1095 	for (freeid = 0; freeid < NAPICID; freeid++) {
1096 		for (x = 0; x <= mp_naps; x++)
1097 			if (CPU_TO_ID(x) == freeid)
1098 				break;
1099 		if (x <= mp_naps)
1100 			continue;
1101 		for (x = 0; x < mp_napics; x++)
1102 			if (IO_TO_ID(x) == freeid)
1103 				break;
1104 		if (x < mp_napics)
1105 			continue;
1106 		return freeid;
1107 	}
1108 	return freeid;
1109 }
1110 
1111 
1112 static int
1113 io_apic_id_acceptable(int apic, int id)
1114 {
1115 	int cpu;		/* Logical CPU number */
1116 	int oapic;		/* Logical IO APIC number for other IO APIC */
1117 
1118 	if (id >= NAPICID)
1119 		return 0;	/* Out of range */
1120 
1121 	for (cpu = 0; cpu <= mp_naps; cpu++)
1122 		if (CPU_TO_ID(cpu) == id)
1123 			return 0;	/* Conflict with CPU */
1124 
1125 	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1126 		if (IO_TO_ID(oapic) == id)
1127 			return 0;	/* Conflict with other APIC */
1128 
1129 	return 1;		/* ID is acceptable for IO APIC */
1130 }
1131 
1132 
1133 /*
1134  * parse an Intel MP specification table
1135  */
1136 static void
1137 fix_mp_table(void)
1138 {
1139 	int	x;
1140 	int	id;
1141 	int	bus_0 = 0;	/* Stop GCC warning */
1142 	int	bus_pci = 0;	/* Stop GCC warning */
1143 	int	num_pci_bus;
1144 	int	apic;		/* IO APIC unit number */
1145 	int     freeid;		/* Free physical APIC ID */
1146 	int	physid;		/* Current physical IO APIC ID */
1147 
1148 	/*
1149 	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1150 	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1151 	 * exists the BIOS must begin with bus entries for the PCI bus and use
1152 	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1153 	 * exists the BIOS can choose to ignore this ordering, and indeed many
1154 	 * MP motherboards do ignore it.  This causes a problem when the PCI
1155 	 * sub-system makes requests of the MP sub-system based on PCI bus
1156 	 * numbers.	So here we look for the situation and renumber the
1157 	 * busses and associated INTs in an effort to "make it right".
1158 	 */
1159 
1160 	/* find bus 0, PCI bus, count the number of PCI busses */
1161 	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1162 		if (bus_data[x].bus_id == 0) {
1163 			bus_0 = x;
1164 		}
1165 		if (bus_data[x].bus_type == PCI) {
1166 			++num_pci_bus;
1167 			bus_pci = x;
1168 		}
1169 	}
1170 	/*
1171 	 * bus_0 == slot of bus with ID of 0
1172 	 * bus_pci == slot of last PCI bus encountered
1173 	 */
1174 
1175 	/* check the 1 PCI bus case for sanity */
1176 	/* if it is number 0 all is well */
1177 	if (num_pci_bus == 1 &&
1178 	    bus_data[bus_pci].bus_id != 0) {
1179 
1180 		/* mis-numbered, swap with whichever bus uses slot 0 */
1181 
1182 		/* swap the bus entry types */
1183 		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1184 		bus_data[bus_0].bus_type = PCI;
1185 
1186 		/* swap each relavant INTerrupt entry */
1187 		id = bus_data[bus_pci].bus_id;
1188 		for (x = 0; x < nintrs; ++x) {
1189 			if (io_apic_ints[x].src_bus_id == id) {
1190 				io_apic_ints[x].src_bus_id = 0;
1191 			}
1192 			else if (io_apic_ints[x].src_bus_id == 0) {
1193 				io_apic_ints[x].src_bus_id = id;
1194 			}
1195 		}
1196 	}
1197 
1198 	/* Assign IO APIC IDs.
1199 	 *
1200 	 * First try the existing ID. If a conflict is detected, try
1201 	 * the ID in the MP table.  If a conflict is still detected, find
1202 	 * a free id.
1203 	 *
1204 	 * We cannot use the ID_TO_IO table before all conflicts has been
1205 	 * resolved and the table has been corrected.
1206 	 */
1207 	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1208 
1209 		/* First try to use the value set by the BIOS */
1210 		physid = io_apic_get_id(apic);
1211 		if (io_apic_id_acceptable(apic, physid)) {
1212 			if (IO_TO_ID(apic) != physid)
1213 				swap_apic_id(apic, IO_TO_ID(apic), physid);
1214 			continue;
1215 		}
1216 
1217 		/* Then check if the value in the MP table is acceptable */
1218 		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1219 			continue;
1220 
1221 		/* Last resort, find a free APIC ID and use it */
1222 		freeid = first_free_apic_id();
1223 		if (freeid >= NAPICID)
1224 			panic("No free physical APIC IDs found");
1225 
1226 		if (io_apic_id_acceptable(apic, freeid)) {
1227 			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1228 			continue;
1229 		}
1230 		panic("Free physical APIC ID not usable");
1231 	}
1232 	fix_id_to_io_mapping();
1233 
1234 	/* detect and fix broken Compaq MP table */
1235 	if (apic_int_type(0, 0) == -1) {
1236 		printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1237 		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1238 		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1239 		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1240 		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1241 		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1242 		nintrs++;
1243 	}
1244 }
1245 
1246 
1247 /* Assign low level interrupt handlers */
1248 static void
1249 setup_apic_irq_mapping(void)
1250 {
1251 	int	x;
1252 	int	int_vector;
1253 
1254 	/* Clear array */
1255 	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1256 		int_to_apicintpin[x].ioapic = -1;
1257 		int_to_apicintpin[x].int_pin = 0;
1258 		int_to_apicintpin[x].apic_address = NULL;
1259 		int_to_apicintpin[x].redirindex = 0;
1260 	}
1261 
1262 	/* First assign ISA/EISA interrupts */
1263 	for (x = 0; x < nintrs; x++) {
1264 		int_vector = io_apic_ints[x].src_bus_irq;
1265 		if (int_vector < APIC_INTMAPSIZE &&
1266 		    io_apic_ints[x].int_vector == 0xff &&
1267 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1268 		    (apic_int_is_bus_type(x, ISA) ||
1269 		     apic_int_is_bus_type(x, EISA)) &&
1270 		    io_apic_ints[x].int_type == 0) {
1271 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1272 					io_apic_ints[x].dst_apic_int,
1273 					int_vector);
1274 		}
1275 	}
1276 
1277 	/* Assign first set of interrupts to intpins on IOAPIC #0 */
1278 	for (x = 0; x < nintrs; x++) {
1279 		int_vector = io_apic_ints[x].dst_apic_int;
1280 		if (int_vector < APIC_INTMAPSIZE &&
1281 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1282 		    io_apic_ints[x].int_vector == 0xff &&
1283 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1284 		    (io_apic_ints[x].int_type == 0 ||
1285 		     io_apic_ints[x].int_type == 3)) {
1286 			assign_apic_irq(0,
1287 					io_apic_ints[x].dst_apic_int,
1288 					int_vector);
1289 		}
1290 	}
1291 	/*
1292 	 * Assign interrupts for remaining intpins.
1293 	 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates
1294 	 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode
1295 	 * due to 8254 interrupts not being delivered can reuse that low level
1296 	 * interrupt handler.
1297 	 */
1298 	int_vector = 0;
1299 	while (int_vector < APIC_INTMAPSIZE &&
1300 	       int_to_apicintpin[int_vector].ioapic != -1)
1301 		int_vector++;
1302 	for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) {
1303 		if ((io_apic_ints[x].int_type == 0 ||
1304 		     (io_apic_ints[x].int_type == 3 &&
1305 		      (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) ||
1306 		       io_apic_ints[x].dst_apic_int != 0))) &&
1307 		    io_apic_ints[x].int_vector == 0xff) {
1308 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1309 					io_apic_ints[x].dst_apic_int,
1310 					int_vector);
1311 			int_vector++;
1312 			while (int_vector < APIC_INTMAPSIZE &&
1313 			       int_to_apicintpin[int_vector].ioapic != -1)
1314 				int_vector++;
1315 		}
1316 	}
1317 }
1318 
1319 
1320 static int
1321 processor_entry(proc_entry_ptr entry, int cpu)
1322 {
1323 	/* check for usability */
1324 	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1325 		return 0;
1326 
1327 	if(entry->apic_id >= NAPICID)
1328 		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1329 	/* check for BSP flag */
1330 	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1331 		boot_cpu_id = entry->apic_id;
1332 		CPU_TO_ID(0) = entry->apic_id;
1333 		ID_TO_CPU(entry->apic_id) = 0;
1334 		return 0;	/* its already been counted */
1335 	}
1336 
1337 	/* add another AP to list, if less than max number of CPUs */
1338 	else if (cpu < MAXCPU) {
1339 		CPU_TO_ID(cpu) = entry->apic_id;
1340 		ID_TO_CPU(entry->apic_id) = cpu;
1341 		return 1;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 
1348 static int
1349 bus_entry(bus_entry_ptr entry, int bus)
1350 {
1351 	int     x;
1352 	char    c, name[8];
1353 
1354 	/* encode the name into an index */
1355 	for (x = 0; x < 6; ++x) {
1356 		if ((c = entry->bus_type[x]) == ' ')
1357 			break;
1358 		name[x] = c;
1359 	}
1360 	name[x] = '\0';
1361 
1362 	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1363 		panic("unknown bus type: '%s'", name);
1364 
1365 	bus_data[bus].bus_id = entry->bus_id;
1366 	bus_data[bus].bus_type = x;
1367 
1368 	return 1;
1369 }
1370 
1371 
1372 static int
1373 io_apic_entry(io_apic_entry_ptr entry, int apic)
1374 {
1375 	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1376 		return 0;
1377 
1378 	IO_TO_ID(apic) = entry->apic_id;
1379 	if (entry->apic_id < NAPICID)
1380 		ID_TO_IO(entry->apic_id) = apic;
1381 
1382 	return 1;
1383 }
1384 
1385 
1386 static int
1387 lookup_bus_type(char *name)
1388 {
1389 	int     x;
1390 
1391 	for (x = 0; x < MAX_BUSTYPE; ++x)
1392 		if (strcmp(bus_type_table[x].name, name) == 0)
1393 			return bus_type_table[x].type;
1394 
1395 	return UNKNOWN_BUSTYPE;
1396 }
1397 
1398 
1399 static int
1400 int_entry(int_entry_ptr entry, int intr)
1401 {
1402 	int apic;
1403 
1404 	io_apic_ints[intr].int_type = entry->int_type;
1405 	io_apic_ints[intr].int_flags = entry->int_flags;
1406 	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1407 	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1408 	if (entry->dst_apic_id == 255) {
1409 		/* This signal goes to all IO APICS.  Select an IO APIC
1410 		   with sufficient number of interrupt pins */
1411 		for (apic = 0; apic < mp_napics; apic++)
1412 			if (((io_apic_read(apic, IOAPIC_VER) &
1413 			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1414 			    entry->dst_apic_int)
1415 				break;
1416 		if (apic < mp_napics)
1417 			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1418 		else
1419 			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1420 	} else
1421 		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1422 	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1423 
1424 	return 1;
1425 }
1426 
1427 
1428 static int
1429 apic_int_is_bus_type(int intr, int bus_type)
1430 {
1431 	int     bus;
1432 
1433 	for (bus = 0; bus < mp_nbusses; ++bus)
1434 		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1435 		    && ((int) bus_data[bus].bus_type == bus_type))
1436 			return 1;
1437 
1438 	return 0;
1439 }
1440 
1441 
1442 /*
1443  * Given a traditional ISA INT mask, return an APIC mask.
1444  */
1445 u_int
1446 isa_apic_mask(u_int isa_mask)
1447 {
1448 	int isa_irq;
1449 	int apic_pin;
1450 
1451 #if defined(SKIP_IRQ15_REDIRECT)
1452 	if (isa_mask == (1 << 15)) {
1453 		printf("skipping ISA IRQ15 redirect\n");
1454 		return isa_mask;
1455 	}
1456 #endif  /* SKIP_IRQ15_REDIRECT */
1457 
1458 	isa_irq = ffs(isa_mask);		/* find its bit position */
1459 	if (isa_irq == 0)			/* doesn't exist */
1460 		return 0;
1461 	--isa_irq;				/* make it zero based */
1462 
1463 	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1464 	if (apic_pin == -1)
1465 		return 0;
1466 
1467 	return (1 << apic_pin);			/* convert pin# to a mask */
1468 }
1469 
1470 
1471 /*
1472  * Determine which APIC pin an ISA/EISA INT is attached to.
1473  */
1474 #define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1475 #define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1476 #define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1477 #define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1478 
1479 #define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1480 int
1481 isa_apic_irq(int isa_irq)
1482 {
1483 	int     intr;
1484 
1485 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1486 		if (INTTYPE(intr) == 0) {		/* standard INT */
1487 			if (SRCBUSIRQ(intr) == isa_irq) {
1488 				if (apic_int_is_bus_type(intr, ISA) ||
1489 			            apic_int_is_bus_type(intr, EISA))
1490 					return INTIRQ(intr);	/* found */
1491 			}
1492 		}
1493 	}
1494 	return -1;					/* NOT found */
1495 }
1496 
1497 
1498 /*
1499  * Determine which APIC pin a PCI INT is attached to.
1500  */
1501 #define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1502 #define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1503 #define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1504 int
1505 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1506 {
1507 	int     intr;
1508 
1509 	--pciInt;					/* zero based */
1510 
1511 	for (intr = 0; intr < nintrs; ++intr)		/* check each record */
1512 		if ((INTTYPE(intr) == 0)		/* standard INT */
1513 		    && (SRCBUSID(intr) == pciBus)
1514 		    && (SRCBUSDEVICE(intr) == pciDevice)
1515 		    && (SRCBUSLINE(intr) == pciInt))	/* a candidate IRQ */
1516 			if (apic_int_is_bus_type(intr, PCI))
1517 				return INTIRQ(intr);	/* exact match */
1518 
1519 	return -1;					/* NOT found */
1520 }
1521 
1522 int
1523 next_apic_irq(int irq)
1524 {
1525 	int intr, ointr;
1526 	int bus, bustype;
1527 
1528 	bus = 0;
1529 	bustype = 0;
1530 	for (intr = 0; intr < nintrs; intr++) {
1531 		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1532 			continue;
1533 		bus = SRCBUSID(intr);
1534 		bustype = apic_bus_type(bus);
1535 		if (bustype != ISA &&
1536 		    bustype != EISA &&
1537 		    bustype != PCI)
1538 			continue;
1539 		break;
1540 	}
1541 	if (intr >= nintrs) {
1542 		return -1;
1543 	}
1544 	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1545 		if (INTTYPE(ointr) != 0)
1546 			continue;
1547 		if (bus != SRCBUSID(ointr))
1548 			continue;
1549 		if (bustype == PCI) {
1550 			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1551 				continue;
1552 			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1553 				continue;
1554 		}
1555 		if (bustype == ISA || bustype == EISA) {
1556 			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1557 				continue;
1558 		}
1559 		if (INTPIN(intr) == INTPIN(ointr))
1560 			continue;
1561 		break;
1562 	}
1563 	if (ointr >= nintrs) {
1564 		return -1;
1565 	}
1566 	return INTIRQ(ointr);
1567 }
1568 #undef SRCBUSLINE
1569 #undef SRCBUSDEVICE
1570 #undef SRCBUSID
1571 #undef SRCBUSIRQ
1572 
1573 #undef INTPIN
1574 #undef INTIRQ
1575 #undef INTAPIC
1576 #undef INTTYPE
1577 
1578 
1579 /*
1580  * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1581  *
1582  * XXX FIXME:
1583  *  Exactly what this means is unclear at this point.  It is a solution
1584  *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1585  *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1586  *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1587  *  option.
1588  */
1589 int
1590 undirect_isa_irq(int rirq)
1591 {
1592 #if defined(READY)
1593 	if (bootverbose)
1594 	    printf("Freeing redirected ISA irq %d.\n", rirq);
1595 	/** FIXME: tickle the MB redirector chip */
1596 	return ???;
1597 #else
1598 	if (bootverbose)
1599 	    printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1600 	return 0;
1601 #endif  /* READY */
1602 }
1603 
1604 
1605 /*
1606  * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1607  */
1608 int
1609 undirect_pci_irq(int rirq)
1610 {
1611 #if defined(READY)
1612 	if (bootverbose)
1613 		printf("Freeing redirected PCI irq %d.\n", rirq);
1614 
1615 	/** FIXME: tickle the MB redirector chip */
1616 	return ???;
1617 #else
1618 	if (bootverbose)
1619 		printf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1620 		       rirq);
1621 	return 0;
1622 #endif  /* READY */
1623 }
1624 
1625 
1626 /*
1627  * given a bus ID, return:
1628  *  the bus type if found
1629  *  -1 if NOT found
1630  */
1631 int
1632 apic_bus_type(int id)
1633 {
1634 	int     x;
1635 
1636 	for (x = 0; x < mp_nbusses; ++x)
1637 		if (bus_data[x].bus_id == id)
1638 			return bus_data[x].bus_type;
1639 
1640 	return -1;
1641 }
1642 
1643 
1644 /*
1645  * given a LOGICAL APIC# and pin#, return:
1646  *  the associated src bus ID if found
1647  *  -1 if NOT found
1648  */
1649 int
1650 apic_src_bus_id(int apic, int pin)
1651 {
1652 	int     x;
1653 
1654 	/* search each of the possible INTerrupt sources */
1655 	for (x = 0; x < nintrs; ++x)
1656 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1657 		    (pin == io_apic_ints[x].dst_apic_int))
1658 			return (io_apic_ints[x].src_bus_id);
1659 
1660 	return -1;		/* NOT found */
1661 }
1662 
1663 
1664 /*
1665  * given a LOGICAL APIC# and pin#, return:
1666  *  the associated src bus IRQ if found
1667  *  -1 if NOT found
1668  */
1669 int
1670 apic_src_bus_irq(int apic, int pin)
1671 {
1672 	int     x;
1673 
1674 	for (x = 0; x < nintrs; x++)
1675 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1676 		    (pin == io_apic_ints[x].dst_apic_int))
1677 			return (io_apic_ints[x].src_bus_irq);
1678 
1679 	return -1;		/* NOT found */
1680 }
1681 
1682 
1683 /*
1684  * given a LOGICAL APIC# and pin#, return:
1685  *  the associated INTerrupt type if found
1686  *  -1 if NOT found
1687  */
1688 int
1689 apic_int_type(int apic, int pin)
1690 {
1691 	int     x;
1692 
1693 	/* search each of the possible INTerrupt sources */
1694 	for (x = 0; x < nintrs; ++x)
1695 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1696 		    (pin == io_apic_ints[x].dst_apic_int))
1697 			return (io_apic_ints[x].int_type);
1698 
1699 	return -1;		/* NOT found */
1700 }
1701 
1702 int
1703 apic_irq(int apic, int pin)
1704 {
1705 	int x;
1706 	int res;
1707 
1708 	for (x = 0; x < nintrs; ++x)
1709 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1710 		    (pin == io_apic_ints[x].dst_apic_int)) {
1711 			res = io_apic_ints[x].int_vector;
1712 			if (res == 0xff)
1713 				return -1;
1714 			if (apic != int_to_apicintpin[res].ioapic)
1715 				panic("apic_irq: inconsistent table");
1716 			if (pin != int_to_apicintpin[res].int_pin)
1717 				panic("apic_irq inconsistent table (2)");
1718 			return res;
1719 		}
1720 	return -1;
1721 }
1722 
1723 
1724 /*
1725  * given a LOGICAL APIC# and pin#, return:
1726  *  the associated trigger mode if found
1727  *  -1 if NOT found
1728  */
1729 int
1730 apic_trigger(int apic, int pin)
1731 {
1732 	int     x;
1733 
1734 	/* search each of the possible INTerrupt sources */
1735 	for (x = 0; x < nintrs; ++x)
1736 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1737 		    (pin == io_apic_ints[x].dst_apic_int))
1738 			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1739 
1740 	return -1;		/* NOT found */
1741 }
1742 
1743 
1744 /*
1745  * given a LOGICAL APIC# and pin#, return:
1746  *  the associated 'active' level if found
1747  *  -1 if NOT found
1748  */
1749 int
1750 apic_polarity(int apic, int pin)
1751 {
1752 	int     x;
1753 
1754 	/* search each of the possible INTerrupt sources */
1755 	for (x = 0; x < nintrs; ++x)
1756 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1757 		    (pin == io_apic_ints[x].dst_apic_int))
1758 			return (io_apic_ints[x].int_flags & 0x03);
1759 
1760 	return -1;		/* NOT found */
1761 }
1762 
1763 
1764 /*
1765  * set data according to MP defaults
1766  * FIXME: probably not complete yet...
1767  */
1768 static void
1769 default_mp_table(int type)
1770 {
1771 	int     ap_cpu_id;
1772 #if defined(APIC_IO)
1773 	int     io_apic_id;
1774 	int     pin;
1775 #endif	/* APIC_IO */
1776 
1777 #if 0
1778 	printf("  MP default config type: %d\n", type);
1779 	switch (type) {
1780 	case 1:
1781 		printf("   bus: ISA, APIC: 82489DX\n");
1782 		break;
1783 	case 2:
1784 		printf("   bus: EISA, APIC: 82489DX\n");
1785 		break;
1786 	case 3:
1787 		printf("   bus: EISA, APIC: 82489DX\n");
1788 		break;
1789 	case 4:
1790 		printf("   bus: MCA, APIC: 82489DX\n");
1791 		break;
1792 	case 5:
1793 		printf("   bus: ISA+PCI, APIC: Integrated\n");
1794 		break;
1795 	case 6:
1796 		printf("   bus: EISA+PCI, APIC: Integrated\n");
1797 		break;
1798 	case 7:
1799 		printf("   bus: MCA+PCI, APIC: Integrated\n");
1800 		break;
1801 	default:
1802 		printf("   future type\n");
1803 		break;
1804 		/* NOTREACHED */
1805 	}
1806 #endif	/* 0 */
1807 
1808 	boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24;
1809 	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1810 
1811 	/* BSP */
1812 	CPU_TO_ID(0) = boot_cpu_id;
1813 	ID_TO_CPU(boot_cpu_id) = 0;
1814 
1815 	/* one and only AP */
1816 	CPU_TO_ID(1) = ap_cpu_id;
1817 	ID_TO_CPU(ap_cpu_id) = 1;
1818 
1819 #if defined(APIC_IO)
1820 	/* one and only IO APIC */
1821 	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1822 
1823 	/*
1824 	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1825 	 * necessary as some hardware isn't properly setting up the IO APIC
1826 	 */
1827 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1828 	if (io_apic_id != 2) {
1829 #else
1830 	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1831 #endif	/* REALLY_ANAL_IOAPICID_VALUE */
1832 		io_apic_set_id(0, 2);
1833 		io_apic_id = 2;
1834 	}
1835 	IO_TO_ID(0) = io_apic_id;
1836 	ID_TO_IO(io_apic_id) = 0;
1837 #endif	/* APIC_IO */
1838 
1839 	/* fill out bus entries */
1840 	switch (type) {
1841 	case 1:
1842 	case 2:
1843 	case 3:
1844 	case 4:
1845 	case 5:
1846 	case 6:
1847 	case 7:
1848 		bus_data[0].bus_id = default_data[type - 1][1];
1849 		bus_data[0].bus_type = default_data[type - 1][2];
1850 		bus_data[1].bus_id = default_data[type - 1][3];
1851 		bus_data[1].bus_type = default_data[type - 1][4];
1852 		break;
1853 
1854 	/* case 4: case 7:		   MCA NOT supported */
1855 	default:		/* illegal/reserved */
1856 		panic("BAD default MP config: %d", type);
1857 		/* NOTREACHED */
1858 	}
1859 
1860 #if defined(APIC_IO)
1861 	/* general cases from MP v1.4, table 5-2 */
1862 	for (pin = 0; pin < 16; ++pin) {
1863 		io_apic_ints[pin].int_type = 0;
1864 		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
1865 		io_apic_ints[pin].src_bus_id = 0;
1866 		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
1867 		io_apic_ints[pin].dst_apic_id = io_apic_id;
1868 		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
1869 	}
1870 
1871 	/* special cases from MP v1.4, table 5-2 */
1872 	if (type == 2) {
1873 		io_apic_ints[2].int_type = 0xff;	/* N/C */
1874 		io_apic_ints[13].int_type = 0xff;	/* N/C */
1875 #if !defined(APIC_MIXED_MODE)
1876 		/** FIXME: ??? */
1877 		panic("sorry, can't support type 2 default yet");
1878 #endif	/* APIC_MIXED_MODE */
1879 	}
1880 	else
1881 		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
1882 
1883 	if (type == 7)
1884 		io_apic_ints[0].int_type = 0xff;	/* N/C */
1885 	else
1886 		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
1887 #endif	/* APIC_IO */
1888 }
1889 
1890 
1891 /*
1892  * initialize all the SMP locks
1893  */
1894 
1895 /* critical region around IO APIC, apic_imen */
1896 struct simplelock	imen_lock;
1897 
1898 /* critical region around splxx(), cpl, cml, cil, ipending */
1899 struct simplelock	cpl_lock;
1900 
1901 /* Make FAST_INTR() routines sequential */
1902 struct simplelock	fast_intr_lock;
1903 
1904 /* critical region around INTR() routines */
1905 struct simplelock	intr_lock;
1906 
1907 /* lock region used by kernel profiling */
1908 struct simplelock	mcount_lock;
1909 
1910 #ifdef USE_COMLOCK
1911 /* locks com (tty) data/hardware accesses: a FASTINTR() */
1912 struct simplelock	com_lock;
1913 #endif /* USE_COMLOCK */
1914 
1915 /* lock around the MP rendezvous */
1916 static struct simplelock smp_rv_lock;
1917 
1918 /* only 1 CPU can panic at a time :) */
1919 struct simplelock	panic_lock;
1920 
1921 static void
1922 init_locks(void)
1923 {
1924 #if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ)
1925 	s_lock_init((struct simplelock*)&apic_itrace_debuglock);
1926 #endif
1927 
1928 	s_lock_init((struct simplelock*)&mcount_lock);
1929 
1930 	s_lock_init((struct simplelock*)&fast_intr_lock);
1931 	s_lock_init((struct simplelock*)&intr_lock);
1932 	s_lock_init((struct simplelock*)&imen_lock);
1933 	s_lock_init((struct simplelock*)&cpl_lock);
1934 	s_lock_init(&smp_rv_lock);
1935 	s_lock_init(&panic_lock);
1936 
1937 #ifdef USE_COMLOCK
1938 	s_lock_init((struct simplelock*)&com_lock);
1939 #endif /* USE_COMLOCK */
1940 
1941 	s_lock_init(&ap_boot_lock);
1942 }
1943 
1944 /*
1945  * start each AP in our list
1946  */
1947 static int
1948 start_all_aps(u_int boot_addr)
1949 {
1950 	int     x, i, pg;
1951 	u_char  mpbiosreason;
1952 	u_long  mpbioswarmvec;
1953 	struct globaldata *gd;
1954 	char *stack;
1955 
1956 	POSTCODE(START_ALL_APS_POST);
1957 
1958 	/* initialize BSP's local APIC */
1959 	apic_initialize();
1960 	bsp_apic_ready = 1;
1961 
1962 	/* install the AP 1st level boot code */
1963 	install_ap_tramp(boot_addr);
1964 
1965 
1966 	/* save the current value of the warm-start vector */
1967 	mpbioswarmvec = *((u_long *) WARMBOOT_OFF);
1968 #ifndef PC98
1969 	outb(CMOS_REG, BIOS_RESET);
1970 	mpbiosreason = inb(CMOS_DATA);
1971 #endif
1972 
1973 	/* record BSP in CPU map */
1974 	all_cpus = 1;
1975 
1976 	/* set up 0 -> 4MB P==V mapping for AP boot */
1977 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
1978 	invltlb();
1979 
1980 	/* start each AP */
1981 	for (x = 1; x <= mp_naps; ++x) {
1982 
1983 		/* This is a bit verbose, it will go away soon.  */
1984 
1985 		/* first page of AP's private space */
1986 		pg = x * i386_btop(sizeof(struct privatespace));
1987 
1988 		/* allocate a new private data page */
1989 		gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE);
1990 
1991 		/* wire it into the private page table page */
1992 		SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd));
1993 
1994 		/* allocate and set up an idle stack data page */
1995 		stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE);
1996 		for (i = 0; i < UPAGES; i++)
1997 			SMPpt[pg + 5 + i] = (pt_entry_t)
1998 			    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
1999 
2000 		SMPpt[pg + 1] = 0;		/* *prv_CMAP1 */
2001 		SMPpt[pg + 2] = 0;		/* *prv_CMAP2 */
2002 		SMPpt[pg + 3] = 0;		/* *prv_CMAP3 */
2003 		SMPpt[pg + 4] = 0;		/* *prv_PMAP1 */
2004 
2005 		/* prime data page for it to use */
2006 		SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu);
2007 		gd->gd_cpuid = x;
2008 		gd->gd_cpu_lockid = x << 24;
2009 		gd->gd_prv_CMAP1 = &SMPpt[pg + 1];
2010 		gd->gd_prv_CMAP2 = &SMPpt[pg + 2];
2011 		gd->gd_prv_CMAP3 = &SMPpt[pg + 3];
2012 		gd->gd_prv_PMAP1 = &SMPpt[pg + 4];
2013 		gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1;
2014 		gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2;
2015 		gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3;
2016 		gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1;
2017 
2018 		/* setup a vector to our boot code */
2019 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2020 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2021 #ifndef PC98
2022 		outb(CMOS_REG, BIOS_RESET);
2023 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2024 #endif
2025 
2026 		bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE];
2027 		bootAP = x;
2028 
2029 		/* attempt to start the Application Processor */
2030 		CHECK_INIT(99);	/* setup checkpoints */
2031 		if (!start_ap(x, boot_addr)) {
2032 			printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2033 			CHECK_PRINT("trace");	/* show checkpoints */
2034 			/* better panic as the AP may be running loose */
2035 			printf("panic y/n? [y] ");
2036 			if (cngetc() != 'n')
2037 				panic("bye-bye");
2038 		}
2039 		CHECK_PRINT("trace");		/* show checkpoints */
2040 
2041 		/* record its version info */
2042 		cpu_apic_versions[x] = cpu_apic_versions[0];
2043 
2044 		all_cpus |= (1 << x);		/* record AP in CPU map */
2045 	}
2046 
2047 	/* build our map of 'other' CPUs */
2048 	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2049 
2050 	/* fill in our (BSP) APIC version */
2051 	cpu_apic_versions[0] = lapic.version;
2052 
2053 	/* restore the warmstart vector */
2054 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2055 #ifndef PC98
2056 	outb(CMOS_REG, BIOS_RESET);
2057 	outb(CMOS_DATA, mpbiosreason);
2058 #endif
2059 
2060 	/*
2061 	 * Set up the idle context for the BSP.  Similar to above except
2062 	 * that some was done by locore, some by pmap.c and some is implicit
2063 	 * because the BSP is cpu#0 and the page is initially zero, and also
2064 	 * because we can refer to variables by name on the BSP..
2065 	 */
2066 
2067 	/* Allocate and setup BSP idle stack */
2068 	stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE);
2069 	for (i = 0; i < UPAGES; i++)
2070 		SMPpt[5 + i] = (pt_entry_t)
2071 		    (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack));
2072 
2073 	*(int *)PTD = 0;
2074 	pmap_set_opt();
2075 
2076 	/* number of APs actually started */
2077 	return mp_ncpus - 1;
2078 }
2079 
2080 
2081 /*
2082  * load the 1st level AP boot code into base memory.
2083  */
2084 
2085 /* targets for relocation */
2086 extern void bigJump(void);
2087 extern void bootCodeSeg(void);
2088 extern void bootDataSeg(void);
2089 extern void MPentry(void);
2090 extern u_int MP_GDT;
2091 extern u_int mp_gdtbase;
2092 
2093 static void
2094 install_ap_tramp(u_int boot_addr)
2095 {
2096 	int     x;
2097 	int     size = *(int *) ((u_long) & bootMP_size);
2098 	u_char *src = (u_char *) ((u_long) bootMP);
2099 	u_char *dst = (u_char *) boot_addr + KERNBASE;
2100 	u_int   boot_base = (u_int) bootMP;
2101 	u_int8_t *dst8;
2102 	u_int16_t *dst16;
2103 	u_int32_t *dst32;
2104 
2105 	POSTCODE(INSTALL_AP_TRAMP_POST);
2106 
2107 	for (x = 0; x < size; ++x)
2108 		*dst++ = *src++;
2109 
2110 	/*
2111 	 * modify addresses in code we just moved to basemem. unfortunately we
2112 	 * need fairly detailed info about mpboot.s for this to work.  changes
2113 	 * to mpboot.s might require changes here.
2114 	 */
2115 
2116 	/* boot code is located in KERNEL space */
2117 	dst = (u_char *) boot_addr + KERNBASE;
2118 
2119 	/* modify the lgdt arg */
2120 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2121 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2122 
2123 	/* modify the ljmp target for MPentry() */
2124 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2125 	*dst32 = ((u_int) MPentry - KERNBASE);
2126 
2127 	/* modify the target for boot code segment */
2128 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2129 	dst8 = (u_int8_t *) (dst16 + 1);
2130 	*dst16 = (u_int) boot_addr & 0xffff;
2131 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2132 
2133 	/* modify the target for boot data segment */
2134 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2135 	dst8 = (u_int8_t *) (dst16 + 1);
2136 	*dst16 = (u_int) boot_addr & 0xffff;
2137 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2138 }
2139 
2140 
2141 /*
2142  * this function starts the AP (application processor) identified
2143  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2144  * to accomplish this.  This is necessary because of the nuances
2145  * of the different hardware we might encounter.  It ain't pretty,
2146  * but it seems to work.
2147  */
2148 static int
2149 start_ap(int logical_cpu, u_int boot_addr)
2150 {
2151 	int     physical_cpu;
2152 	int     vector;
2153 	int     cpus;
2154 	u_long  icr_lo, icr_hi;
2155 
2156 	POSTCODE(START_AP_POST);
2157 
2158 	/* get the PHYSICAL APIC ID# */
2159 	physical_cpu = CPU_TO_ID(logical_cpu);
2160 
2161 	/* calculate the vector */
2162 	vector = (boot_addr >> 12) & 0xff;
2163 
2164 	/* used as a watchpoint to signal AP startup */
2165 	cpus = mp_ncpus;
2166 
2167 	/*
2168 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2169 	 * and running the target CPU. OR this INIT IPI might be latched (P5
2170 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2171 	 * ignored.
2172 	 */
2173 
2174 	/* setup the address for the target AP */
2175 	icr_hi = lapic.icr_hi & ~APIC_ID_MASK;
2176 	icr_hi |= (physical_cpu << 24);
2177 	lapic.icr_hi = icr_hi;
2178 
2179 	/* do an INIT IPI: assert RESET */
2180 	icr_lo = lapic.icr_lo & 0xfff00000;
2181 	lapic.icr_lo = icr_lo | 0x0000c500;
2182 
2183 	/* wait for pending status end */
2184 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2185 		 /* spin */ ;
2186 
2187 	/* do an INIT IPI: deassert RESET */
2188 	lapic.icr_lo = icr_lo | 0x00008500;
2189 
2190 	/* wait for pending status end */
2191 	u_sleep(10000);		/* wait ~10mS */
2192 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2193 		 /* spin */ ;
2194 
2195 	/*
2196 	 * next we do a STARTUP IPI: the previous INIT IPI might still be
2197 	 * latched, (P5 bug) this 1st STARTUP would then terminate
2198 	 * immediately, and the previously started INIT IPI would continue. OR
2199 	 * the previous INIT IPI has already run. and this STARTUP IPI will
2200 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2201 	 * will run.
2202 	 */
2203 
2204 	/* do a STARTUP IPI */
2205 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2206 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2207 		 /* spin */ ;
2208 	u_sleep(200);		/* wait ~200uS */
2209 
2210 	/*
2211 	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2212 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2213 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2214 	 * recognized after hardware RESET or INIT IPI.
2215 	 */
2216 
2217 	lapic.icr_lo = icr_lo | 0x00000600 | vector;
2218 	while (lapic.icr_lo & APIC_DELSTAT_MASK)
2219 		 /* spin */ ;
2220 	u_sleep(200);		/* wait ~200uS */
2221 
2222 	/* wait for it to start */
2223 	set_apic_timer(5000000);/* == 5 seconds */
2224 	while (read_apic_timer())
2225 		if (mp_ncpus > cpus)
2226 			return 1;	/* return SUCCESS */
2227 
2228 	return 0;		/* return FAILURE */
2229 }
2230 
2231 /*
2232  * Flush the TLB on all other CPU's
2233  *
2234  * XXX: Needs to handshake and wait for completion before proceding.
2235  */
2236 void
2237 smp_invltlb(void)
2238 {
2239 #if defined(APIC_IO)
2240 	if (smp_started && invltlb_ok)
2241 		all_but_self_ipi(XINVLTLB_OFFSET);
2242 #endif  /* APIC_IO */
2243 }
2244 
2245 void
2246 invlpg(u_int addr)
2247 {
2248 	__asm   __volatile("invlpg (%0)"::"r"(addr):"memory");
2249 
2250 	/* send a message to the other CPUs */
2251 	smp_invltlb();
2252 }
2253 
2254 void
2255 invltlb(void)
2256 {
2257 	u_long  temp;
2258 
2259 	/*
2260 	 * This should be implemented as load_cr3(rcr3()) when load_cr3() is
2261 	 * inlined.
2262 	 */
2263 	__asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory");
2264 
2265 	/* send a message to the other CPUs */
2266 	smp_invltlb();
2267 }
2268 
2269 
2270 /*
2271  * When called the executing CPU will send an IPI to all other CPUs
2272  *  requesting that they halt execution.
2273  *
2274  * Usually (but not necessarily) called with 'other_cpus' as its arg.
2275  *
2276  *  - Signals all CPUs in map to stop.
2277  *  - Waits for each to stop.
2278  *
2279  * Returns:
2280  *  -1: error
2281  *   0: NA
2282  *   1: ok
2283  *
2284  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2285  *            from executing at same time.
2286  */
2287 int
2288 stop_cpus(u_int map)
2289 {
2290 	int count = 0;
2291 
2292 	if (!smp_started)
2293 		return 0;
2294 
2295 	/* send the Xcpustop IPI to all CPUs in map */
2296 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2297 
2298 	while (count++ < 100000 && (stopped_cpus & map) != map)
2299 		/* spin */ ;
2300 
2301 #ifdef DIAGNOSTIC
2302 	if ((stopped_cpus & map) != map)
2303 		printf("Warning: CPUs 0x%x did not stop!\n",
2304 		    (~(stopped_cpus & map)) & map);
2305 #endif
2306 
2307 	return 1;
2308 }
2309 
2310 
2311 /*
2312  * Called by a CPU to restart stopped CPUs.
2313  *
2314  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2315  *
2316  *  - Signals all CPUs in map to restart.
2317  *  - Waits for each to restart.
2318  *
2319  * Returns:
2320  *  -1: error
2321  *   0: NA
2322  *   1: ok
2323  */
2324 int
2325 restart_cpus(u_int map)
2326 {
2327 	int count = 0;
2328 
2329 	if (!smp_started)
2330 		return 0;
2331 
2332 	started_cpus = map;		/* signal other cpus to restart */
2333 
2334 	/* wait for each to clear its bit */
2335 	while (count++ < 100000 && (stopped_cpus & map) != 0)
2336 		/* spin */ ;
2337 
2338 #ifdef DIAGNOSTIC
2339 	if ((stopped_cpus & map) != 0)
2340 		printf("Warning: CPUs 0x%x did not restart!\n",
2341 		    (~(stopped_cpus & map)) & map);
2342 #endif
2343 
2344 	return 1;
2345 }
2346 
2347 int smp_active = 0;	/* are the APs allowed to run? */
2348 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, "");
2349 
2350 /* XXX maybe should be hw.ncpu */
2351 static int smp_cpus = 1;	/* how many cpu's running */
2352 SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, "");
2353 
2354 int invltlb_ok = 0;	/* throttle smp_invltlb() till safe */
2355 SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
2356 
2357 /* Warning: Do not staticize.  Used from swtch.s */
2358 int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */
2359 SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW,
2360 	   &do_page_zero_idle, 0, "");
2361 
2362 /* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */
2363 int forward_irq_enabled = 1;
2364 SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW,
2365 	   &forward_irq_enabled, 0, "");
2366 
2367 /* Enable forwarding of a signal to a process running on a different CPU */
2368 static int forward_signal_enabled = 1;
2369 SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
2370 	   &forward_signal_enabled, 0, "");
2371 
2372 /* Enable forwarding of roundrobin to all other cpus */
2373 static int forward_roundrobin_enabled = 1;
2374 SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
2375 	   &forward_roundrobin_enabled, 0, "");
2376 
2377 /*
2378  * This is called once the rest of the system is up and running and we're
2379  * ready to let the AP's out of the pen.
2380  */
2381 void ap_init(void);
2382 
2383 void
2384 ap_init(void)
2385 {
2386 	u_int	apic_id;
2387 
2388 	/* lock against other AP's that are waking up */
2389 	s_lock(&ap_boot_lock);
2390 
2391 	/* BSP may have changed PTD while we're waiting for the lock */
2392 	cpu_invltlb();
2393 
2394 	smp_cpus++;
2395 
2396 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2397 	lidt(&r_idt);
2398 #endif
2399 
2400 	/* Build our map of 'other' CPUs. */
2401 	PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
2402 
2403 	printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
2404 
2405 	/* set up CPU registers and state */
2406 	cpu_setregs();
2407 
2408 	/* set up FPU state on the AP */
2409 	npxinit(__INITIAL_NPXCW__);
2410 
2411 	/* A quick check from sanity claus */
2412 	apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]);
2413 	if (PCPU_GET(cpuid) != apic_id) {
2414 		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
2415 		printf("SMP: apic_id = %d\n", apic_id);
2416 		printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2417 		panic("cpuid mismatch! boom!!");
2418 	}
2419 
2420 	/* Init local apic for irq's */
2421 	apic_initialize();
2422 
2423 	/* Set memory range attributes for this CPU to match the BSP */
2424 	mem_range_AP_init();
2425 
2426 	/*
2427 	 * Activate smp_invltlb, although strictly speaking, this isn't
2428 	 * quite correct yet.  We should have a bitfield for cpus willing
2429 	 * to accept TLB flush IPI's or something and sync them.
2430 	 */
2431 	if (smp_cpus == mp_ncpus) {
2432 		invltlb_ok = 1;
2433 		smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
2434 		smp_active = 1;	 /* historic */
2435 	}
2436 
2437 	/* let other AP's wake up now */
2438 	s_unlock(&ap_boot_lock);
2439 
2440 	/* wait until all the AP's are up */
2441 	while (smp_started == 0)
2442 		; /* nothing */
2443 
2444 	/*
2445 	 * Set curproc to our per-cpu idleproc so that mutexes have
2446 	 * something unique to lock with.
2447 	 */
2448 	PCPU_SET(curproc, PCPU_GET(idleproc));
2449 
2450 	microuptime(PCPU_PTR(switchtime));
2451 	PCPU_SET(switchticks, ticks);
2452 
2453 	/* ok, now grab sched_lock and enter the scheduler */
2454 	enable_intr();
2455 	mtx_enter(&sched_lock, MTX_SPIN);
2456 	cpu_throw();	/* doesn't return */
2457 
2458 	panic("scheduler returned us to ap_init");
2459 }
2460 
2461 #ifdef BETTER_CLOCK
2462 
2463 #define CHECKSTATE_USER	0
2464 #define CHECKSTATE_SYS	1
2465 #define CHECKSTATE_INTR	2
2466 
2467 /* Do not staticize.  Used from apic_vector.s */
2468 struct proc*	checkstate_curproc[MAXCPU];
2469 int		checkstate_cpustate[MAXCPU];
2470 u_long		checkstate_pc[MAXCPU];
2471 
2472 #define PC_TO_INDEX(pc, prof)				\
2473         ((int)(((u_quad_t)((pc) - (prof)->pr_off) *	\
2474             (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
2475 
2476 static void
2477 addupc_intr_forwarded(struct proc *p, int id, int *astmap)
2478 {
2479 	int i;
2480 	struct uprof *prof;
2481 	u_long pc;
2482 
2483 	pc = checkstate_pc[id];
2484 	prof = &p->p_stats->p_prof;
2485 	if (pc >= prof->pr_off &&
2486 	    (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) {
2487 		if ((p->p_flag & P_OWEUPC) == 0) {
2488 			prof->pr_addr = pc;
2489 			prof->pr_ticks = 1;
2490 			p->p_flag |= P_OWEUPC;
2491 		}
2492 		*astmap |= (1 << id);
2493 	}
2494 }
2495 
2496 static void
2497 forwarded_statclock(int id, int pscnt, int *astmap)
2498 {
2499 	struct pstats *pstats;
2500 	long rss;
2501 	struct rusage *ru;
2502 	struct vmspace *vm;
2503 	int cpustate;
2504 	struct proc *p;
2505 #ifdef GPROF
2506 	register struct gmonparam *g;
2507 	int i;
2508 #endif
2509 
2510 	p = checkstate_curproc[id];
2511 	cpustate = checkstate_cpustate[id];
2512 
2513 	/* XXX */
2514 	if (p->p_ithd)
2515 		cpustate = CHECKSTATE_INTR;
2516 	else if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2517 		cpustate = CHECKSTATE_SYS;
2518 
2519 	switch (cpustate) {
2520 	case CHECKSTATE_USER:
2521 		if (p->p_flag & P_PROFIL)
2522 			addupc_intr_forwarded(p, id, astmap);
2523 		if (pscnt > 1)
2524 			return;
2525 		p->p_uticks++;
2526 		if (p->p_nice > NZERO)
2527 			cp_time[CP_NICE]++;
2528 		else
2529 			cp_time[CP_USER]++;
2530 		break;
2531 	case CHECKSTATE_SYS:
2532 #ifdef GPROF
2533 		/*
2534 		 * Kernel statistics are just like addupc_intr, only easier.
2535 		 */
2536 		g = &_gmonparam;
2537 		if (g->state == GMON_PROF_ON) {
2538 			i = checkstate_pc[id] - g->lowpc;
2539 			if (i < g->textsize) {
2540 				i /= HISTFRACTION * sizeof(*g->kcount);
2541 				g->kcount[i]++;
2542 			}
2543 		}
2544 #endif
2545 		if (pscnt > 1)
2546 			return;
2547 
2548 		p->p_sticks++;
2549 		if (p == SMP_prvspace[id].globaldata.gd_idleproc)
2550 			cp_time[CP_IDLE]++;
2551 		else
2552 			cp_time[CP_SYS]++;
2553 		break;
2554 	case CHECKSTATE_INTR:
2555 	default:
2556 #ifdef GPROF
2557 		/*
2558 		 * Kernel statistics are just like addupc_intr, only easier.
2559 		 */
2560 		g = &_gmonparam;
2561 		if (g->state == GMON_PROF_ON) {
2562 			i = checkstate_pc[id] - g->lowpc;
2563 			if (i < g->textsize) {
2564 				i /= HISTFRACTION * sizeof(*g->kcount);
2565 				g->kcount[i]++;
2566 			}
2567 		}
2568 #endif
2569 		if (pscnt > 1)
2570 			return;
2571 		if (p)
2572 			p->p_iticks++;
2573 		cp_time[CP_INTR]++;
2574 	}
2575 	schedclock(p);
2576 
2577 	/* Update resource usage integrals and maximums. */
2578 	if ((pstats = p->p_stats) != NULL &&
2579 	    (ru = &pstats->p_ru) != NULL &&
2580 	    (vm = p->p_vmspace) != NULL) {
2581 		ru->ru_ixrss += pgtok(vm->vm_tsize);
2582 		ru->ru_idrss += pgtok(vm->vm_dsize);
2583 		ru->ru_isrss += pgtok(vm->vm_ssize);
2584 		rss = pgtok(vmspace_resident_count(vm));
2585 		if (ru->ru_maxrss < rss)
2586 			ru->ru_maxrss = rss;
2587 	}
2588 }
2589 
2590 void
2591 forward_statclock(int pscnt)
2592 {
2593 	int map;
2594 	int id;
2595 	int i;
2596 
2597 	/* Kludge. We don't yet have separate locks for the interrupts
2598 	 * and the kernel. This means that we cannot let the other processors
2599 	 * handle complex interrupts while inhibiting them from entering
2600 	 * the kernel in a non-interrupt context.
2601 	 *
2602 	 * What we can do, without changing the locking mechanisms yet,
2603 	 * is letting the other processors handle a very simple interrupt
2604 	 * (wich determines the processor states), and do the main
2605 	 * work ourself.
2606 	 */
2607 
2608 	if (!smp_started || !invltlb_ok || cold || panicstr)
2609 		return;
2610 
2611 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle ) */
2612 
2613 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2614 	checkstate_probed_cpus = 0;
2615 	if (map != 0)
2616 		selected_apic_ipi(map,
2617 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2618 
2619 	i = 0;
2620 	while (checkstate_probed_cpus != map) {
2621 		/* spin */
2622 		i++;
2623 		if (i == 100000) {
2624 #ifdef BETTER_CLOCK_DIAGNOSTIC
2625 			printf("forward_statclock: checkstate %x\n",
2626 			       checkstate_probed_cpus);
2627 #endif
2628 			break;
2629 		}
2630 	}
2631 
2632 	/*
2633 	 * Step 2: walk through other processors processes, update ticks and
2634 	 * profiling info.
2635 	 */
2636 
2637 	map = 0;
2638 	for (id = 0; id < mp_ncpus; id++) {
2639 		if (id == PCPU_GET(cpuid))
2640 			continue;
2641 		if (((1 << id) & checkstate_probed_cpus) == 0)
2642 			continue;
2643 		forwarded_statclock(id, pscnt, &map);
2644 	}
2645 	if (map != 0) {
2646 		checkstate_need_ast |= map;
2647 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2648 		i = 0;
2649 		while ((checkstate_need_ast & map) != 0) {
2650 			/* spin */
2651 			i++;
2652 			if (i > 100000) {
2653 #ifdef BETTER_CLOCK_DIAGNOSTIC
2654 				printf("forward_statclock: dropped ast 0x%x\n",
2655 				       checkstate_need_ast & map);
2656 #endif
2657 				break;
2658 			}
2659 		}
2660 	}
2661 }
2662 
2663 void
2664 forward_hardclock(int pscnt)
2665 {
2666 	int map;
2667 	int id;
2668 	struct proc *p;
2669 	struct pstats *pstats;
2670 	int i;
2671 
2672 	/* Kludge. We don't yet have separate locks for the interrupts
2673 	 * and the kernel. This means that we cannot let the other processors
2674 	 * handle complex interrupts while inhibiting them from entering
2675 	 * the kernel in a non-interrupt context.
2676 	 *
2677 	 * What we can do, without changing the locking mechanisms yet,
2678 	 * is letting the other processors handle a very simple interrupt
2679 	 * (wich determines the processor states), and do the main
2680 	 * work ourself.
2681 	 */
2682 
2683 	if (!smp_started || !invltlb_ok || cold || panicstr)
2684 		return;
2685 
2686 	/* Step 1: Probe state   (user, cpu, interrupt, spinlock, idle) */
2687 
2688 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2689 	checkstate_probed_cpus = 0;
2690 	if (map != 0)
2691 		selected_apic_ipi(map,
2692 				  XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED);
2693 
2694 	i = 0;
2695 	while (checkstate_probed_cpus != map) {
2696 		/* spin */
2697 		i++;
2698 		if (i == 100000) {
2699 #ifdef BETTER_CLOCK_DIAGNOSTIC
2700 			printf("forward_hardclock: checkstate %x\n",
2701 			       checkstate_probed_cpus);
2702 #endif
2703 			break;
2704 		}
2705 	}
2706 
2707 	/*
2708 	 * Step 2: walk through other processors processes, update virtual
2709 	 * timer and profiling timer. If stathz == 0, also update ticks and
2710 	 * profiling info.
2711 	 */
2712 
2713 	map = 0;
2714 	for (id = 0; id < mp_ncpus; id++) {
2715 		if (id == PCPU_GET(cpuid))
2716 			continue;
2717 		if (((1 << id) & checkstate_probed_cpus) == 0)
2718 			continue;
2719 		p = checkstate_curproc[id];
2720 		if (p) {
2721 			pstats = p->p_stats;
2722 			if (checkstate_cpustate[id] == CHECKSTATE_USER &&
2723 			    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
2724 			    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
2725 				psignal(p, SIGVTALRM);
2726 				map |= (1 << id);
2727 			}
2728 			if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
2729 			    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
2730 				psignal(p, SIGPROF);
2731 				map |= (1 << id);
2732 			}
2733 		}
2734 		if (stathz == 0) {
2735 			forwarded_statclock( id, pscnt, &map);
2736 		}
2737 	}
2738 	if (map != 0) {
2739 		checkstate_need_ast |= map;
2740 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2741 		i = 0;
2742 		while ((checkstate_need_ast & map) != 0) {
2743 			/* spin */
2744 			i++;
2745 			if (i > 100000) {
2746 #ifdef BETTER_CLOCK_DIAGNOSTIC
2747 				printf("forward_hardclock: dropped ast 0x%x\n",
2748 				       checkstate_need_ast & map);
2749 #endif
2750 				break;
2751 			}
2752 		}
2753 	}
2754 }
2755 
2756 #endif /* BETTER_CLOCK */
2757 
2758 void
2759 forward_signal(struct proc *p)
2760 {
2761 	int map;
2762 	int id;
2763 	int i;
2764 
2765 	/* Kludge. We don't yet have separate locks for the interrupts
2766 	 * and the kernel. This means that we cannot let the other processors
2767 	 * handle complex interrupts while inhibiting them from entering
2768 	 * the kernel in a non-interrupt context.
2769 	 *
2770 	 * What we can do, without changing the locking mechanisms yet,
2771 	 * is letting the other processors handle a very simple interrupt
2772 	 * (wich determines the processor states), and do the main
2773 	 * work ourself.
2774 	 */
2775 
2776 	if (!smp_started || !invltlb_ok || cold || panicstr)
2777 		return;
2778 	if (!forward_signal_enabled)
2779 		return;
2780 	while (1) {
2781 		if (p->p_stat != SRUN)
2782 			return;
2783 		id = p->p_oncpu;
2784 		if (id == 0xff)
2785 			return;
2786 		map = (1<<id);
2787 		checkstate_need_ast |= map;
2788 		selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2789 		i = 0;
2790 		while ((checkstate_need_ast & map) != 0) {
2791 			/* spin */
2792 			i++;
2793 			if (i > 100000) {
2794 #if 0
2795 				printf("forward_signal: dropped ast 0x%x\n",
2796 				       checkstate_need_ast & map);
2797 #endif
2798 				break;
2799 			}
2800 		}
2801 		if (id == p->p_oncpu)
2802 			return;
2803 	}
2804 }
2805 
2806 void
2807 forward_roundrobin(void)
2808 {
2809 	u_int map;
2810 	int i;
2811 
2812 	if (!smp_started || !invltlb_ok || cold || panicstr)
2813 		return;
2814 	if (!forward_roundrobin_enabled)
2815 		return;
2816 	resched_cpus |= PCPU_GET(other_cpus);
2817 	map = PCPU_GET(other_cpus) & ~stopped_cpus ;
2818 #if 1
2819 	selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED);
2820 #else
2821 	(void) all_but_self_ipi(XCPUAST_OFFSET);
2822 #endif
2823 	i = 0;
2824 	while ((checkstate_need_ast & map) != 0) {
2825 		/* spin */
2826 		i++;
2827 		if (i > 100000) {
2828 #if 0
2829 			printf("forward_roundrobin: dropped ast 0x%x\n",
2830 			       checkstate_need_ast & map);
2831 #endif
2832 			break;
2833 		}
2834 	}
2835 }
2836 
2837 
2838 #ifdef APIC_INTR_REORDER
2839 /*
2840  *	Maintain mapping from softintr vector to isr bit in local apic.
2841  */
2842 void
2843 set_lapic_isrloc(int intr, int vector)
2844 {
2845 	if (intr < 0 || intr > 32)
2846 		panic("set_apic_isrloc: bad intr argument: %d",intr);
2847 	if (vector < ICU_OFFSET || vector > 255)
2848 		panic("set_apic_isrloc: bad vector argument: %d",vector);
2849 	apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2);
2850 	apic_isrbit_location[intr].bit = (1<<(vector & 31));
2851 }
2852 #endif
2853 
2854 /*
2855  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
2856  * (if specified), rendezvous, execute the action function (if specified),
2857  * rendezvous again, execute the teardown function (if specified), and then
2858  * resume.
2859  *
2860  * Note that the supplied external functions _must_ be reentrant and aware
2861  * that they are running in parallel and in an unknown lock context.
2862  */
2863 static void (*smp_rv_setup_func)(void *arg);
2864 static void (*smp_rv_action_func)(void *arg);
2865 static void (*smp_rv_teardown_func)(void *arg);
2866 static void *smp_rv_func_arg;
2867 static volatile int smp_rv_waiters[2];
2868 
2869 void
2870 smp_rendezvous_action(void)
2871 {
2872 	/* setup function */
2873 	if (smp_rv_setup_func != NULL)
2874 		smp_rv_setup_func(smp_rv_func_arg);
2875 	/* spin on entry rendezvous */
2876 	atomic_add_int(&smp_rv_waiters[0], 1);
2877 	while (smp_rv_waiters[0] < mp_ncpus)
2878 		;
2879 	/* action function */
2880 	if (smp_rv_action_func != NULL)
2881 		smp_rv_action_func(smp_rv_func_arg);
2882 	/* spin on exit rendezvous */
2883 	atomic_add_int(&smp_rv_waiters[1], 1);
2884 	while (smp_rv_waiters[1] < mp_ncpus)
2885 		;
2886 	/* teardown function */
2887 	if (smp_rv_teardown_func != NULL)
2888 		smp_rv_teardown_func(smp_rv_func_arg);
2889 }
2890 
2891 void
2892 smp_rendezvous(void (* setup_func)(void *),
2893 	       void (* action_func)(void *),
2894 	       void (* teardown_func)(void *),
2895 	       void *arg)
2896 {
2897 	u_int	efl;
2898 
2899 	/* obtain rendezvous lock */
2900 	s_lock(&smp_rv_lock);		/* XXX sleep here? NOWAIT flag? */
2901 
2902 	/* set static function pointers */
2903 	smp_rv_setup_func = setup_func;
2904 	smp_rv_action_func = action_func;
2905 	smp_rv_teardown_func = teardown_func;
2906 	smp_rv_func_arg = arg;
2907 	smp_rv_waiters[0] = 0;
2908 	smp_rv_waiters[1] = 0;
2909 
2910 	/* disable interrupts on this CPU, save interrupt status */
2911 	efl = read_eflags();
2912 	write_eflags(efl & ~PSL_I);
2913 
2914 	/* signal other processors, which will enter the IPI with interrupts off */
2915 	all_but_self_ipi(XRENDEZVOUS_OFFSET);
2916 
2917 	/* call executor function */
2918 	smp_rendezvous_action();
2919 
2920 	/* restore interrupt flag */
2921 	write_eflags(efl);
2922 
2923 	/* release lock */
2924 	s_unlock(&smp_rv_lock);
2925 }
2926 
2927 void
2928 release_aps(void *dummy __unused)
2929 {
2930 	s_unlock(&ap_boot_lock);
2931 }
2932 
2933 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
2934