xref: /illumos-gate/usr/src/uts/i86pc/io/pcplusmp/apic.c (revision fe3e2633be44d2f5361a7bba26abeb80fcc04fbc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PSMI 1.1 extensions are supported only in 2.6 and later versions.
31  * PSMI 1.2 extensions are supported only in 2.7 and later versions.
32  * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
33  * PSMI 1.5 extensions are supported in Solaris Nevada.
34  * PSMI 1.6 extensions are supported in Solaris Nevada.
35  */
36 #define	PSMI_1_6
37 
38 #include <sys/processor.h>
39 #include <sys/time.h>
40 #include <sys/psm.h>
41 #include <sys/smp_impldefs.h>
42 #include <sys/cram.h>
43 #include <sys/acpi/acpi.h>
44 #include <sys/acpica.h>
45 #include <sys/psm_common.h>
46 #include <sys/apic.h>
47 #include <sys/pit.h>
48 #include <sys/ddi.h>
49 #include <sys/sunddi.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/pci.h>
52 #include <sys/promif.h>
53 #include <sys/x86_archext.h>
54 #include <sys/cpc_impl.h>
55 #include <sys/uadmin.h>
56 #include <sys/panic.h>
57 #include <sys/debug.h>
58 #include <sys/archsystm.h>
59 #include <sys/trap.h>
60 #include <sys/machsystm.h>
61 #include <sys/sysmacros.h>
62 #include <sys/cpuvar.h>
63 #include <sys/rm_platter.h>
64 #include <sys/privregs.h>
65 #include <sys/note.h>
66 #include <sys/pci_intr_lib.h>
67 #include <sys/spl.h>
68 #include <sys/clock.h>
69 #include <sys/dditypes.h>
70 #include <sys/sunddi.h>
71 
72 /*
73  *	Local Function Prototypes
74  */
75 static void apic_init_intr();
76 static void apic_nmi_intr(caddr_t arg, struct regs *rp);
77 
78 /*
79  *	standard MP entries
80  */
81 static int	apic_probe();
82 static int	apic_clkinit();
83 static int	apic_getclkirq(int ipl);
84 static uint_t	apic_calibrate(volatile uint32_t *addr,
85     uint16_t *pit_ticks_adj);
86 static hrtime_t apic_gettime();
87 static hrtime_t apic_gethrtime();
88 static void	apic_init();
89 static void	apic_picinit(void);
90 static int	apic_cpu_start(processorid_t, caddr_t);
91 static int	apic_post_cpu_start(void);
92 static void	apic_send_ipi(int cpun, int ipl);
93 static void	apic_set_idlecpu(processorid_t cpun);
94 static void	apic_unset_idlecpu(processorid_t cpun);
95 static int	apic_intr_enter(int ipl, int *vect);
96 static void	apic_setspl(int ipl);
97 static void	x2apic_setspl(int ipl);
98 static int	apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
99 static int	apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
100 static void	apic_shutdown(int cmd, int fcn);
101 static void	apic_preshutdown(int cmd, int fcn);
102 static int	apic_disable_intr(processorid_t cpun);
103 static void	apic_enable_intr(processorid_t cpun);
104 static processorid_t	apic_get_next_processorid(processorid_t cpun);
105 static int		apic_get_ipivect(int ipl, int type);
106 static void	apic_timer_reprogram(hrtime_t time);
107 static void	apic_timer_enable(void);
108 static void	apic_timer_disable(void);
109 static void	apic_post_cyclic_setup(void *arg);
110 
111 static int	apic_oneshot = 0;
112 int	apic_oneshot_enable = 1; /* to allow disabling one-shot capability */
113 
114 /* Now the ones for Dynamic Interrupt distribution */
115 int	apic_enable_dynamic_migration = 0;
116 
117 
118 /*
119  * These variables are frequently accessed in apic_intr_enter(),
120  * apic_intr_exit and apic_setspl, so group them together
121  */
122 volatile uint32_t *apicadr =  NULL;	/* virtual addr of local APIC	*/
123 int apic_setspl_delay = 1;		/* apic_setspl - delay enable	*/
124 int apic_clkvect;
125 
126 /* vector at which error interrupts come in */
127 int apic_errvect;
128 int apic_enable_error_intr = 1;
129 int apic_error_display_delay = 100;
130 
131 /* vector at which performance counter overflow interrupts come in */
132 int apic_cpcovf_vect;
133 int apic_enable_cpcovf_intr = 1;
134 
135 /*
136  * The following vector assignments influence the value of ipltopri and
137  * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
138  * idle to 0 and IPL 0 to 0xf to differentiate idle in case
139  * we care to do so in future. Note some IPLs which are rarely used
140  * will share the vector ranges and heavily used IPLs (5 and 6) have
141  * a wide range.
142  *
143  * This array is used to initialize apic_ipls[] (in apic_init()).
144  *
145  *	IPL		Vector range.		as passed to intr_enter
146  *	0		none.
147  *	1,2,3		0x20-0x2f		0x0-0xf
148  *	4		0x30-0x3f		0x10-0x1f
149  *	5		0x40-0x5f		0x20-0x3f
150  *	6		0x60-0x7f		0x40-0x5f
151  *	7,8,9		0x80-0x8f		0x60-0x6f
152  *	10		0x90-0x9f		0x70-0x7f
153  *	11		0xa0-0xaf		0x80-0x8f
154  *	...		...
155  *	15		0xe0-0xef		0xc0-0xcf
156  *	15		0xf0-0xff		0xd0-0xdf
157  */
158 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
159 	3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
160 };
161 	/*
162 	 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
163 	 * NOTE that this is vector as passed into intr_enter which is
164 	 * programmed vector - 0x20 (APIC_BASE_VECT)
165 	 */
166 
167 uchar_t	apic_ipltopri[MAXIPL + 1];	/* unix ipl to apic pri	*/
168 	/* The taskpri to be programmed into apic to mask given ipl */
169 
170 #if defined(__amd64)
171 uchar_t	apic_cr8pri[MAXIPL + 1];	/* unix ipl to cr8 pri	*/
172 #endif
173 
174 /*
175  * Correlation of the hardware vector to the IPL in use, initialized
176  * from apic_vectortoipl[] in apic_init().  The final IPLs may not correlate
177  * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
178  * connected to errata-stricken IOAPICs
179  */
180 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
181 
182 /*
183  * Patchable global variables.
184  */
185 int	apic_forceload = 0;
186 
187 int	apic_coarse_hrtime = 1;		/* 0 - use accurate slow gethrtime() */
188 					/* 1 - use gettime() for performance */
189 int	apic_flat_model = 0;		/* 0 - clustered. 1 - flat */
190 int	apic_enable_hwsoftint = 0;	/* 0 - disable, 1 - enable	*/
191 int	apic_enable_bind_log = 1;	/* 1 - display interrupt binding log */
192 int	apic_panic_on_nmi = 0;
193 int	apic_panic_on_apic_error = 0;
194 
195 int	apic_verbose = 0;
196 
197 /* minimum number of timer ticks to program to */
198 int apic_min_timer_ticks = 1;
199 /*
200  *	Local static data
201  */
202 static struct	psm_ops apic_ops = {
203 	apic_probe,
204 
205 	apic_init,
206 	apic_picinit,
207 	apic_intr_enter,
208 	apic_intr_exit,
209 	apic_setspl,
210 	apic_addspl,
211 	apic_delspl,
212 	apic_disable_intr,
213 	apic_enable_intr,
214 	(int (*)(int))NULL,		/* psm_softlvl_to_irq */
215 	(void (*)(int))NULL,		/* psm_set_softintr */
216 
217 	apic_set_idlecpu,
218 	apic_unset_idlecpu,
219 
220 	apic_clkinit,
221 	apic_getclkirq,
222 	(void (*)(void))NULL,		/* psm_hrtimeinit */
223 	apic_gethrtime,
224 
225 	apic_get_next_processorid,
226 	apic_cpu_start,
227 	apic_post_cpu_start,
228 	apic_shutdown,
229 	apic_get_ipivect,
230 	apic_send_ipi,
231 
232 	(int (*)(dev_info_t *, int))NULL,	/* psm_translate_irq */
233 	(void (*)(int, char *))NULL,	/* psm_notify_error */
234 	(void (*)(int))NULL,		/* psm_notify_func */
235 	apic_timer_reprogram,
236 	apic_timer_enable,
237 	apic_timer_disable,
238 	apic_post_cyclic_setup,
239 	apic_preshutdown,
240 	apic_intr_ops,			/* Advanced DDI Interrupt framework */
241 	apic_state,			/* save, restore apic state for S3 */
242 };
243 
244 
245 static struct	psm_info apic_psm_info = {
246 	PSM_INFO_VER01_6,			/* version */
247 	PSM_OWN_EXCLUSIVE,			/* ownership */
248 	(struct psm_ops *)&apic_ops,		/* operation */
249 	APIC_PCPLUSMP_NAME,			/* machine name */
250 	"pcplusmp v1.4 compatible",
251 };
252 
253 static void *apic_hdlp;
254 
255 #ifdef DEBUG
256 int	apic_debug = 0;
257 int	apic_restrict_vector = 0;
258 
259 int	apic_debug_msgbuf[APIC_DEBUG_MSGBUFSIZE];
260 int	apic_debug_msgbufindex = 0;
261 
262 #endif /* DEBUG */
263 
264 apic_cpus_info_t	*apic_cpus;
265 
266 cpuset_t	apic_cpumask;
267 uint_t	apic_picinit_called;
268 
269 /* Flag to indicate that we need to shut down all processors */
270 static uint_t	apic_shutdown_processors;
271 
272 uint_t apic_nsec_per_intr = 0;
273 
274 /*
275  * apic_let_idle_redistribute can have the following values:
276  * 0 - If clock decremented it from 1 to 0, clock has to call redistribute.
277  * apic_redistribute_lock prevents multiple idle cpus from redistributing
278  */
279 int	apic_num_idle_redistributions = 0;
280 static	int apic_let_idle_redistribute = 0;
281 static	uint_t apic_nticks = 0;
282 static	uint_t apic_skipped_redistribute = 0;
283 
284 /* to gather intr data and redistribute */
285 static void apic_redistribute_compute(void);
286 
287 static	uint_t last_count_read = 0;
288 static	lock_t	apic_gethrtime_lock;
289 volatile int	apic_hrtime_stamp = 0;
290 volatile hrtime_t apic_nsec_since_boot = 0;
291 static uint_t apic_hertz_count;
292 
293 uint64_t apic_ticks_per_SFnsecs;	/* # of ticks in SF nsecs */
294 
295 static hrtime_t apic_nsec_max;
296 
297 static	hrtime_t	apic_last_hrtime = 0;
298 int		apic_hrtime_error = 0;
299 int		apic_remote_hrterr = 0;
300 int		apic_num_nmis = 0;
301 int		apic_apic_error = 0;
302 int		apic_num_apic_errors = 0;
303 int		apic_num_cksum_errors = 0;
304 
305 int	apic_error = 0;
306 static	int	apic_cmos_ssb_set = 0;
307 
308 /* use to make sure only one cpu handles the nmi */
309 static	lock_t	apic_nmi_lock;
310 /* use to make sure only one cpu handles the error interrupt */
311 static	lock_t	apic_error_lock;
312 
313 static	struct {
314 	uchar_t	cntl;
315 	uchar_t	data;
316 } aspen_bmc[] = {
317 	{ CC_SMS_WR_START,	0x18 },		/* NetFn/LUN */
318 	{ CC_SMS_WR_NEXT,	0x24 },		/* Cmd SET_WATCHDOG_TIMER */
319 	{ CC_SMS_WR_NEXT,	0x84 },		/* DataByte 1: SMS/OS no log */
320 	{ CC_SMS_WR_NEXT,	0x2 },		/* DataByte 2: Power Down */
321 	{ CC_SMS_WR_NEXT,	0x0 },		/* DataByte 3: no pre-timeout */
322 	{ CC_SMS_WR_NEXT,	0x0 },		/* DataByte 4: timer expir. */
323 	{ CC_SMS_WR_NEXT,	0xa },		/* DataByte 5: init countdown */
324 	{ CC_SMS_WR_END,	0x0 },		/* DataByte 6: init countdown */
325 
326 	{ CC_SMS_WR_START,	0x18 },		/* NetFn/LUN */
327 	{ CC_SMS_WR_END,	0x22 }		/* Cmd RESET_WATCHDOG_TIMER */
328 };
329 
330 static	struct {
331 	int	port;
332 	uchar_t	data;
333 } sitka_bmc[] = {
334 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_START },
335 	{ SMS_DATA_REGISTER,	0x18 },		/* NetFn/LUN */
336 	{ SMS_DATA_REGISTER,	0x24 },		/* Cmd SET_WATCHDOG_TIMER */
337 	{ SMS_DATA_REGISTER,	0x84 },		/* DataByte 1: SMS/OS no log */
338 	{ SMS_DATA_REGISTER,	0x2 },		/* DataByte 2: Power Down */
339 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 3: no pre-timeout */
340 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 4: timer expir. */
341 	{ SMS_DATA_REGISTER,	0xa },		/* DataByte 5: init countdown */
342 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_END },
343 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 6: init countdown */
344 
345 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_START },
346 	{ SMS_DATA_REGISTER,	0x18 },		/* NetFn/LUN */
347 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_END },
348 	{ SMS_DATA_REGISTER,	0x22 }		/* Cmd RESET_WATCHDOG_TIMER */
349 };
350 
351 /* Patchable global variables. */
352 int		apic_kmdb_on_nmi = 0;		/* 0 - no, 1 - yes enter kmdb */
353 uint32_t	apic_divide_reg_init = 0;	/* 0 - divide by 2 */
354 
355 /*
356  *	This is the loadable module wrapper
357  */
358 
359 int
360 _init(void)
361 {
362 	if (apic_coarse_hrtime)
363 		apic_ops.psm_gethrtime = &apic_gettime;
364 	return (psm_mod_init(&apic_hdlp, &apic_psm_info));
365 }
366 
367 int
368 _fini(void)
369 {
370 	return (psm_mod_fini(&apic_hdlp, &apic_psm_info));
371 }
372 
373 int
374 _info(struct modinfo *modinfop)
375 {
376 	return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop));
377 }
378 
379 
380 static int
381 apic_probe()
382 {
383 	return (apic_probe_common(apic_psm_info.p_mach_idstring));
384 }
385 
386 void
387 apic_init()
388 {
389 	int i;
390 	int	j = 1;
391 
392 	apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
393 	for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
394 		if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
395 		    (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
396 			/* get to highest vector at the same ipl */
397 			continue;
398 		for (; j <= apic_vectortoipl[i]; j++) {
399 			apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
400 			    APIC_BASE_VECT;
401 		}
402 	}
403 	for (; j < MAXIPL + 1; j++)
404 		/* fill up any empty ipltopri slots */
405 		apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
406 	apic_init_common();
407 #if defined(__amd64)
408 	/*
409 	 * Make cpu-specific interrupt info point to cr8pri vector
410 	 */
411 	for (i = 0; i <= MAXIPL; i++)
412 		apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT;
413 	CPU->cpu_pri_data = apic_cr8pri;
414 #endif	/* __amd64 */
415 }
416 
417 /*
418  * handler for APIC Error interrupt. Just print a warning and continue
419  */
420 static int
421 apic_error_intr()
422 {
423 	uint_t	error0, error1, error;
424 	uint_t	i;
425 
426 	/*
427 	 * We need to write before read as per 7.4.17 of system prog manual.
428 	 * We do both and or the results to be safe
429 	 */
430 	error0 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
431 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
432 	error1 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
433 	error = error0 | error1;
434 
435 	/*
436 	 * Clear the APIC error status (do this on all cpus that enter here)
437 	 * (two writes are required due to the semantics of accessing the
438 	 * error status register.)
439 	 */
440 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
441 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
442 
443 	/*
444 	 * Prevent more than 1 CPU from handling error interrupt causing
445 	 * double printing (interleave of characters from multiple
446 	 * CPU's when using prom_printf)
447 	 */
448 	if (lock_try(&apic_error_lock) == 0)
449 		return (error ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
450 	if (error) {
451 #if	DEBUG
452 		if (apic_debug)
453 			debug_enter("pcplusmp: APIC Error interrupt received");
454 #endif /* DEBUG */
455 		if (apic_panic_on_apic_error)
456 			cmn_err(CE_PANIC,
457 			    "APIC Error interrupt on CPU %d. Status = %x\n",
458 			    psm_get_cpu_id(), error);
459 		else {
460 			if ((error & ~APIC_CS_ERRORS) == 0) {
461 				/* cksum error only */
462 				apic_error |= APIC_ERR_APIC_ERROR;
463 				apic_apic_error |= error;
464 				apic_num_apic_errors++;
465 				apic_num_cksum_errors++;
466 			} else {
467 				/*
468 				 * prom_printf is the best shot we have of
469 				 * something which is problem free from
470 				 * high level/NMI type of interrupts
471 				 */
472 				prom_printf("APIC Error interrupt on CPU %d. "
473 				    "Status 0 = %x, Status 1 = %x\n",
474 				    psm_get_cpu_id(), error0, error1);
475 				apic_error |= APIC_ERR_APIC_ERROR;
476 				apic_apic_error |= error;
477 				apic_num_apic_errors++;
478 				for (i = 0; i < apic_error_display_delay; i++) {
479 					tenmicrosec();
480 				}
481 				/*
482 				 * provide more delay next time limited to
483 				 * roughly 1 clock tick time
484 				 */
485 				if (apic_error_display_delay < 500)
486 					apic_error_display_delay *= 2;
487 			}
488 		}
489 		lock_clear(&apic_error_lock);
490 		return (DDI_INTR_CLAIMED);
491 	} else {
492 		lock_clear(&apic_error_lock);
493 		return (DDI_INTR_UNCLAIMED);
494 	}
495 	/* NOTREACHED */
496 }
497 
498 /*
499  * Turn off the mask bit in the performance counter Local Vector Table entry.
500  */
501 static void
502 apic_cpcovf_mask_clear(void)
503 {
504 	apic_reg_ops->apic_write(APIC_PCINT_VECT,
505 	    (apic_reg_ops->apic_read(APIC_PCINT_VECT) & ~APIC_LVT_MASK));
506 }
507 
508 static void
509 apic_init_intr()
510 {
511 	processorid_t	cpun = psm_get_cpu_id();
512 	uint_t nlvt;
513 	uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
514 
515 	/*
516 	 * On BSP we would have enabled x2apic, if supported by processor,
517 	 * in acpi_probe(), but on AP we do it here.
518 	 */
519 	if (apic_detect_x2apic()) {
520 		apic_enable_x2apic();
521 	}
522 
523 	apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
524 
525 	if (apic_mode == LOCAL_APIC) {
526 		/*
527 		 * We are running APIC in MMIO mode.
528 		 */
529 		if (apic_flat_model) {
530 			apic_reg_ops->apic_write(APIC_FORMAT_REG,
531 			    APIC_FLAT_MODEL);
532 		} else {
533 			apic_reg_ops->apic_write(APIC_FORMAT_REG,
534 			    APIC_CLUSTER_MODEL);
535 		}
536 
537 		apic_reg_ops->apic_write(APIC_DEST_REG,
538 		    AV_HIGH_ORDER >> cpun);
539 	}
540 
541 	if (apic_direct_EOI) {
542 		/*
543 		 * Set 12th bit in Spurious Interrupt Vector
544 		 * Register to support level triggered interrupt
545 		 * directed EOI.
546 		 */
547 		svr |= (0x1 << APIC_SVR);
548 	}
549 
550 	/* need to enable APIC before unmasking NMI */
551 	apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
552 
553 	/*
554 	 * Presence of an invalid vector with delivery mode AV_FIXED can
555 	 * cause an error interrupt, even if the entry is masked...so
556 	 * write a valid vector to LVT entries along with the mask bit
557 	 */
558 
559 	/* All APICs have timer and LINT0/1 */
560 	apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ);
561 	apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ);
562 	apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI);	/* enable NMI */
563 
564 	/*
565 	 * On integrated APICs, the number of LVT entries is
566 	 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
567 	 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
568 	 */
569 
570 	if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
571 		nlvt = 3;
572 	} else {
573 		nlvt = ((apicadr[APIC_VERS_REG] >> 16) & 0xFF) + 1;
574 	}
575 
576 	if (nlvt >= 5) {
577 		/* Enable performance counter overflow interrupt */
578 
579 		if ((x86_feature & X86_MSR) != X86_MSR)
580 			apic_enable_cpcovf_intr = 0;
581 		if (apic_enable_cpcovf_intr) {
582 			if (apic_cpcovf_vect == 0) {
583 				int ipl = APIC_PCINT_IPL;
584 				int irq = apic_get_ipivect(ipl, -1);
585 
586 				ASSERT(irq != -1);
587 				apic_cpcovf_vect =
588 				    apic_irq_table[irq]->airq_vector;
589 				ASSERT(apic_cpcovf_vect);
590 				(void) add_avintr(NULL, ipl,
591 				    (avfunc)kcpc_hw_overflow_intr,
592 				    "apic pcint", irq, NULL, NULL, NULL, NULL);
593 				kcpc_hw_overflow_intr_installed = 1;
594 				kcpc_hw_enable_cpc_intr =
595 				    apic_cpcovf_mask_clear;
596 			}
597 			apic_reg_ops->apic_write(APIC_PCINT_VECT,
598 			    apic_cpcovf_vect);
599 		}
600 	}
601 
602 	if (nlvt >= 6) {
603 		/* Only mask TM intr if the BIOS apparently doesn't use it */
604 
605 		uint32_t lvtval;
606 
607 		lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT);
608 		if (((lvtval & AV_MASK) == AV_MASK) ||
609 		    ((lvtval & AV_DELIV_MODE) != AV_SMI)) {
610 			apic_reg_ops->apic_write(APIC_THERM_VECT,
611 			    AV_MASK|APIC_RESV_IRQ);
612 		}
613 	}
614 
615 	/* Enable error interrupt */
616 
617 	if (nlvt >= 4 && apic_enable_error_intr) {
618 		if (apic_errvect == 0) {
619 			int ipl = 0xf;	/* get highest priority intr */
620 			int irq = apic_get_ipivect(ipl, -1);
621 
622 			ASSERT(irq != -1);
623 			apic_errvect = apic_irq_table[irq]->airq_vector;
624 			ASSERT(apic_errvect);
625 			/*
626 			 * Not PSMI compliant, but we are going to merge
627 			 * with ON anyway
628 			 */
629 			(void) add_avintr((void *)NULL, ipl,
630 			    (avfunc)apic_error_intr, "apic error intr",
631 			    irq, NULL, NULL, NULL, NULL);
632 		}
633 		apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect);
634 		apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
635 		apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
636 	}
637 
638 }
639 
640 static void
641 apic_disable_local_apic()
642 {
643 	apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
644 	apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK);
645 
646 	/* local intr reg 0 */
647 	apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK);
648 
649 	/* disable NMI */
650 	apic_reg_ops->apic_write(APIC_INT_VECT1, AV_MASK);
651 
652 	/* and error interrupt */
653 	apic_reg_ops->apic_write(APIC_ERR_VECT, AV_MASK);
654 
655 	/* and perf counter intr */
656 	apic_reg_ops->apic_write(APIC_PCINT_VECT, AV_MASK);
657 
658 	apic_reg_ops->apic_write(APIC_SPUR_INT_REG, APIC_SPUR_INTR);
659 }
660 
661 static void
662 apic_picinit(void)
663 {
664 	int i, j;
665 	uint_t isr;
666 	uint32_t ver;
667 
668 	/*
669 	 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
670 	 * bit on without clearing it with EOI.  Since softint
671 	 * uses vector 0x20 to interrupt itself, so softint will
672 	 * not work on this machine.  In order to fix this problem
673 	 * a check is made to verify all the isr bits are clear.
674 	 * If not, EOIs are issued to clear the bits.
675 	 */
676 	for (i = 7; i >= 1; i--) {
677 		isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4));
678 		if (isr != 0)
679 			for (j = 0; ((j < 32) && (isr != 0)); j++)
680 				if (isr & (1 << j)) {
681 					apic_reg_ops->apic_write(
682 					    APIC_EOI_REG, 0);
683 					isr &= ~(1 << j);
684 					apic_error |= APIC_ERR_BOOT_EOI;
685 				}
686 	}
687 
688 	/* set a flag so we know we have run apic_picinit() */
689 	apic_picinit_called = 1;
690 	LOCK_INIT_CLEAR(&apic_gethrtime_lock);
691 	LOCK_INIT_CLEAR(&apic_ioapic_lock);
692 	LOCK_INIT_CLEAR(&apic_error_lock);
693 
694 	picsetup();	 /* initialise the 8259 */
695 
696 	/* add nmi handler - least priority nmi handler */
697 	LOCK_INIT_CLEAR(&apic_nmi_lock);
698 
699 	if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr,
700 	    "pcplusmp NMI handler", (caddr_t)NULL))
701 		cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler");
702 
703 	ver = apic_reg_ops->apic_read(APIC_VERS_REG);
704 	/*
705 	 * In order to determine support for Directed EOI capability,
706 	 * we check for 24th bit in Local APIC Version Register.
707 	 */
708 	if (ver & (0x1 << APIC_DIRECTED_EOI)) {
709 		apic_direct_EOI = 1;
710 		apic_change_eoi();
711 	}
712 
713 	apic_init_intr();
714 
715 	/* enable apic mode if imcr present */
716 	if (apic_imcrp) {
717 		outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
718 		outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC);
719 	}
720 
721 	ioapic_init_intr(IOAPIC_MASK);
722 }
723 
724 
725 /*ARGSUSED1*/
726 static int
727 apic_cpu_start(processorid_t cpun, caddr_t arg)
728 {
729 	int		loop_count;
730 	uint32_t	vector;
731 	uint_t		cpu_id;
732 	ulong_t		iflag;
733 
734 	cpu_id =  apic_cpus[cpun].aci_local_id;
735 
736 	apic_cmos_ssb_set = 1;
737 
738 	/*
739 	 * Interrupts on BSP cpu will be disabled during these startup
740 	 * steps in order to avoid unwanted side effects from
741 	 * executing interrupt handlers on a problematic BIOS.
742 	 */
743 
744 	iflag = intr_clear();
745 	outb(CMOS_ADDR, SSB);
746 	outb(CMOS_DATA, BIOS_SHUTDOWN);
747 
748 	while (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
749 		apic_ret();
750 
751 	/* for integrated - make sure there is one INIT IPI in buffer */
752 	/* for external - it will wake up the cpu */
753 	apic_reg_ops->apic_write_int_cmd(cpu_id, AV_ASSERT | AV_RESET);
754 
755 	/* If only 1 CPU is installed, PENDING bit will not go low */
756 	for (loop_count = 0x1000; loop_count; loop_count--)
757 		if (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
758 			apic_ret();
759 		else
760 			break;
761 
762 	apic_reg_ops->apic_write_int_cmd(cpu_id, AV_DEASSERT | AV_RESET);
763 
764 	drv_usecwait(20000);		/* 20 milli sec */
765 
766 	if (apic_cpus[cpun].aci_local_ver >= APIC_INTEGRATED_VERS) {
767 		/* integrated apic */
768 
769 		vector = (rm_platter_pa >> MMU_PAGESHIFT) &
770 		    (APIC_VECTOR_MASK | APIC_IPL_MASK);
771 
772 		/* to offset the INIT IPI queue up in the buffer */
773 		apic_reg_ops->apic_write_int_cmd(cpu_id, vector | AV_STARTUP);
774 
775 		drv_usecwait(200);		/* 20 micro sec */
776 
777 		apic_reg_ops->apic_write_int_cmd(cpu_id, vector | AV_STARTUP);
778 
779 		drv_usecwait(200);		/* 20 micro sec */
780 	}
781 	intr_restore(iflag);
782 	return (0);
783 }
784 
785 
786 #ifdef	DEBUG
787 int	apic_break_on_cpu = 9;
788 int	apic_stretch_interrupts = 0;
789 int	apic_stretch_ISR = 1 << 3;	/* IPL of 3 matches nothing now */
790 
791 void
792 apic_break()
793 {
794 }
795 #endif /* DEBUG */
796 
797 /*
798  * platform_intr_enter
799  *
800  *	Called at the beginning of the interrupt service routine to
801  *	mask all level equal to and below the interrupt priority
802  *	of the interrupting vector.  An EOI should be given to
803  *	the interrupt controller to enable other HW interrupts.
804  *
805  *	Return -1 for spurious interrupts
806  *
807  */
808 /*ARGSUSED*/
809 static int
810 apic_intr_enter(int ipl, int *vectorp)
811 {
812 	uchar_t vector;
813 	int nipl;
814 	int irq;
815 	ulong_t iflag;
816 	apic_cpus_info_t *cpu_infop;
817 
818 	/*
819 	 * The real vector delivered is (*vectorp + 0x20), but our caller
820 	 * subtracts 0x20 from the vector before passing it to us.
821 	 * (That's why APIC_BASE_VECT is 0x20.)
822 	 */
823 	vector = (uchar_t)*vectorp;
824 
825 	/* if interrupted by the clock, increment apic_nsec_since_boot */
826 	if (vector == apic_clkvect) {
827 		if (!apic_oneshot) {
828 			/* NOTE: this is not MT aware */
829 			apic_hrtime_stamp++;
830 			apic_nsec_since_boot += apic_nsec_per_intr;
831 			apic_hrtime_stamp++;
832 			last_count_read = apic_hertz_count;
833 			apic_redistribute_compute();
834 		}
835 
836 		/* We will avoid all the book keeping overhead for clock */
837 		nipl = apic_ipls[vector];
838 
839 		*vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
840 		if (apic_mode == LOCAL_APIC) {
841 #if defined(__amd64)
842 			setcr8((ulong_t)(apic_ipltopri[nipl] >>
843 			    APIC_IPL_SHIFT));
844 #else
845 			LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
846 			    (uint32_t)apic_ipltopri[nipl]);
847 #endif
848 			LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
849 		} else {
850 			X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
851 			X2APIC_WRITE(APIC_EOI_REG, 0);
852 		}
853 
854 		return (nipl);
855 	}
856 
857 	cpu_infop = &apic_cpus[psm_get_cpu_id()];
858 
859 	if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
860 		cpu_infop->aci_spur_cnt++;
861 		return (APIC_INT_SPURIOUS);
862 	}
863 
864 	/* Check if the vector we got is really what we need */
865 	if (apic_revector_pending) {
866 		/*
867 		 * Disable interrupts for the duration of
868 		 * the vector translation to prevent a self-race for
869 		 * the apic_revector_lock.  This cannot be done
870 		 * in apic_xlate_vector because it is recursive and
871 		 * we want the vector translation to be atomic with
872 		 * respect to other (higher-priority) interrupts.
873 		 */
874 		iflag = intr_clear();
875 		vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
876 		    APIC_BASE_VECT;
877 		intr_restore(iflag);
878 	}
879 
880 	nipl = apic_ipls[vector];
881 	*vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
882 
883 	if (apic_mode == LOCAL_APIC) {
884 #if defined(__amd64)
885 		setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
886 #else
887 		LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
888 		    (uint32_t)apic_ipltopri[nipl]);
889 #endif
890 	} else {
891 		X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
892 	}
893 
894 	cpu_infop->aci_current[nipl] = (uchar_t)irq;
895 	cpu_infop->aci_curipl = (uchar_t)nipl;
896 	cpu_infop->aci_ISR_in_progress |= 1 << nipl;
897 
898 	/*
899 	 * apic_level_intr could have been assimilated into the irq struct.
900 	 * but, having it as a character array is more efficient in terms of
901 	 * cache usage. So, we leave it as is.
902 	 */
903 	if (!apic_level_intr[irq]) {
904 		if (apic_mode == LOCAL_APIC)
905 			LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
906 		else
907 			X2APIC_WRITE(APIC_EOI_REG, 0);
908 	}
909 
910 #ifdef	DEBUG
911 	APIC_DEBUG_BUF_PUT(vector);
912 	APIC_DEBUG_BUF_PUT(irq);
913 	APIC_DEBUG_BUF_PUT(nipl);
914 	APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
915 	if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
916 		drv_usecwait(apic_stretch_interrupts);
917 
918 	if (apic_break_on_cpu == psm_get_cpu_id())
919 		apic_break();
920 #endif /* DEBUG */
921 	return (nipl);
922 }
923 
924 /*
925  * This macro is a common code used by MMIO local apic and x2apic
926  * local apic.
927  */
928 #define	APIC_INTR_EXIT() \
929 { \
930 	cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
931 	if (apic_level_intr[irq]) \
932 		apic_reg_ops->apic_send_eoi(irq); \
933 	cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
934 	/* ISR above current pri could not be in progress */ \
935 	cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
936 }
937 
938 /*
939  * Any changes made to this function must also change x2apic
940  * version of intr_exit.
941  */
942 void
943 apic_intr_exit(int prev_ipl, int irq)
944 {
945 	apic_cpus_info_t *cpu_infop;
946 
947 #if defined(__amd64)
948 	setcr8((ulong_t)apic_cr8pri[prev_ipl]);
949 #else
950 	apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
951 #endif
952 
953 	APIC_INTR_EXIT();
954 }
955 
956 /*
957  * Same as apic_intr_exit() except it uses MSR rather than MMIO
958  * to access local apic registers.
959  */
960 void
961 x2apic_intr_exit(int prev_ipl, int irq)
962 {
963 	apic_cpus_info_t *cpu_infop;
964 
965 	X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
966 	APIC_INTR_EXIT();
967 }
968 
969 intr_exit_fn_t
970 psm_intr_exit_fn(void)
971 {
972 	if (apic_mode == LOCAL_X2APIC)
973 		return (x2apic_intr_exit);
974 
975 	return (apic_intr_exit);
976 }
977 
978 /*
979  * Mask all interrupts below or equal to the given IPL.
980  * Any changes made to this function must also change x2apic
981  * version of setspl.
982  */
983 static void
984 apic_setspl(int ipl)
985 {
986 
987 #if defined(__amd64)
988 	setcr8((ulong_t)apic_cr8pri[ipl]);
989 #else
990 	apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
991 #endif
992 
993 	/* interrupts at ipl above this cannot be in progress */
994 	apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
995 	/*
996 	 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
997 	 * have enough time to come in before the priority is raised again
998 	 * during the idle() loop.
999 	 */
1000 	if (apic_setspl_delay)
1001 		(void) apic_reg_ops->apic_get_pri();
1002 }
1003 
1004 /*
1005  * x2apic version of setspl.
1006  * Mask all interrupts below or equal to the given IPL
1007  */
1008 static void
1009 x2apic_setspl(int ipl)
1010 {
1011 	X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
1012 
1013 	/* interrupts at ipl above this cannot be in progress */
1014 	apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
1015 }
1016 
1017 /*
1018  * generates an interprocessor interrupt to another CPU
1019  */
1020 static void
1021 apic_send_ipi(int cpun, int ipl)
1022 {
1023 	int vector;
1024 	ulong_t flag;
1025 
1026 	vector = apic_resv_vector[ipl];
1027 
1028 	ASSERT((vector >= APIC_BASE_VECT) && (vector <= APIC_SPUR_INTR));
1029 
1030 	flag = intr_clear();
1031 
1032 	while (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
1033 		apic_ret();
1034 
1035 	apic_reg_ops->apic_write_int_cmd(apic_cpus[cpun].aci_local_id,
1036 	    vector);
1037 
1038 	intr_restore(flag);
1039 }
1040 
1041 
1042 /*ARGSUSED*/
1043 static void
1044 apic_set_idlecpu(processorid_t cpun)
1045 {
1046 }
1047 
1048 /*ARGSUSED*/
1049 static void
1050 apic_unset_idlecpu(processorid_t cpun)
1051 {
1052 }
1053 
1054 
1055 void
1056 apic_ret()
1057 {
1058 }
1059 
1060 /*
1061  * If apic_coarse_time == 1, then apic_gettime() is used instead of
1062  * apic_gethrtime().  This is used for performance instead of accuracy.
1063  */
1064 
1065 static hrtime_t
1066 apic_gettime()
1067 {
1068 	int old_hrtime_stamp;
1069 	hrtime_t temp;
1070 
1071 	/*
1072 	 * In one-shot mode, we do not keep time, so if anyone
1073 	 * calls psm_gettime() directly, we vector over to
1074 	 * gethrtime().
1075 	 * one-shot mode MUST NOT be enabled if this psm is the source of
1076 	 * hrtime.
1077 	 */
1078 
1079 	if (apic_oneshot)
1080 		return (gethrtime());
1081 
1082 
1083 gettime_again:
1084 	while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
1085 		apic_ret();
1086 
1087 	temp = apic_nsec_since_boot;
1088 
1089 	if (apic_hrtime_stamp != old_hrtime_stamp) {	/* got an interrupt */
1090 		goto gettime_again;
1091 	}
1092 	return (temp);
1093 }
1094 
1095 /*
1096  * Here we return the number of nanoseconds since booting.  Note every
1097  * clock interrupt increments apic_nsec_since_boot by the appropriate
1098  * amount.
1099  */
1100 static hrtime_t
1101 apic_gethrtime()
1102 {
1103 	int curr_timeval, countval, elapsed_ticks;
1104 	int old_hrtime_stamp, status;
1105 	hrtime_t temp;
1106 	uint32_t cpun;
1107 	ulong_t oflags;
1108 
1109 	/*
1110 	 * In one-shot mode, we do not keep time, so if anyone
1111 	 * calls psm_gethrtime() directly, we vector over to
1112 	 * gethrtime().
1113 	 * one-shot mode MUST NOT be enabled if this psm is the source of
1114 	 * hrtime.
1115 	 */
1116 
1117 	if (apic_oneshot)
1118 		return (gethrtime());
1119 
1120 	oflags = intr_clear();	/* prevent migration */
1121 
1122 	cpun = apic_reg_ops->apic_read(APIC_LID_REG);
1123 	if (apic_mode == LOCAL_APIC)
1124 		cpun >>= APIC_ID_BIT_OFFSET;
1125 
1126 	lock_set(&apic_gethrtime_lock);
1127 
1128 gethrtime_again:
1129 	while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
1130 		apic_ret();
1131 
1132 	/*
1133 	 * Check to see which CPU we are on.  Note the time is kept on
1134 	 * the local APIC of CPU 0.  If on CPU 0, simply read the current
1135 	 * counter.  If on another CPU, issue a remote read command to CPU 0.
1136 	 */
1137 	if (cpun == apic_cpus[0].aci_local_id) {
1138 		countval = apic_reg_ops->apic_read(APIC_CURR_COUNT);
1139 	} else {
1140 		while (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
1141 			apic_ret();
1142 
1143 		apic_reg_ops->apic_write_int_cmd(
1144 		    apic_cpus[0].aci_local_id, APIC_CURR_ADD | AV_REMOTE);
1145 
1146 		while ((status = apic_reg_ops->apic_read(APIC_INT_CMD1))
1147 		    & AV_READ_PENDING) {
1148 			apic_ret();
1149 		}
1150 
1151 		if (status & AV_REMOTE_STATUS)	/* 1 = valid */
1152 			countval = apic_reg_ops->apic_read(APIC_REMOTE_READ);
1153 		else {	/* 0 = invalid */
1154 			apic_remote_hrterr++;
1155 			/*
1156 			 * return last hrtime right now, will need more
1157 			 * testing if change to retry
1158 			 */
1159 			temp = apic_last_hrtime;
1160 
1161 			lock_clear(&apic_gethrtime_lock);
1162 
1163 			intr_restore(oflags);
1164 
1165 			return (temp);
1166 		}
1167 	}
1168 	if (countval > last_count_read)
1169 		countval = 0;
1170 	else
1171 		last_count_read = countval;
1172 
1173 	elapsed_ticks = apic_hertz_count - countval;
1174 
1175 	curr_timeval = APIC_TICKS_TO_NSECS(elapsed_ticks);
1176 	temp = apic_nsec_since_boot + curr_timeval;
1177 
1178 	if (apic_hrtime_stamp != old_hrtime_stamp) {	/* got an interrupt */
1179 		/* we might have clobbered last_count_read. Restore it */
1180 		last_count_read = apic_hertz_count;
1181 		goto gethrtime_again;
1182 	}
1183 
1184 	if (temp < apic_last_hrtime) {
1185 		/* return last hrtime if error occurs */
1186 		apic_hrtime_error++;
1187 		temp = apic_last_hrtime;
1188 	}
1189 	else
1190 		apic_last_hrtime = temp;
1191 
1192 	lock_clear(&apic_gethrtime_lock);
1193 	intr_restore(oflags);
1194 
1195 	return (temp);
1196 }
1197 
1198 /* apic NMI handler */
1199 /*ARGSUSED*/
1200 static void
1201 apic_nmi_intr(caddr_t arg, struct regs *rp)
1202 {
1203 	if (apic_shutdown_processors) {
1204 		apic_disable_local_apic();
1205 		return;
1206 	}
1207 
1208 	apic_error |= APIC_ERR_NMI;
1209 
1210 	if (!lock_try(&apic_nmi_lock))
1211 		return;
1212 	apic_num_nmis++;
1213 
1214 	if (apic_kmdb_on_nmi && psm_debugger()) {
1215 		debug_enter("NMI received: entering kmdb\n");
1216 	} else if (apic_panic_on_nmi) {
1217 		/* Keep panic from entering kmdb. */
1218 		nopanicdebug = 1;
1219 		panic("NMI received\n");
1220 	} else {
1221 		/*
1222 		 * prom_printf is the best shot we have of something which is
1223 		 * problem free from high level/NMI type of interrupts
1224 		 */
1225 		prom_printf("NMI received\n");
1226 	}
1227 
1228 	lock_clear(&apic_nmi_lock);
1229 }
1230 
1231 /*ARGSUSED*/
1232 static int
1233 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
1234 {
1235 	return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
1236 }
1237 
1238 static int
1239 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
1240 {
1241 	return (apic_delspl_common(irqno, ipl, min_ipl,  max_ipl));
1242 }
1243 
1244 static int
1245 apic_post_cpu_start()
1246 {
1247 	int cpun;
1248 
1249 	apic_init_intr();
1250 
1251 	/*
1252 	 * since some systems don't enable the internal cache on the non-boot
1253 	 * cpus, so we have to enable them here
1254 	 */
1255 	setcr0(getcr0() & ~(CR0_CD | CR0_NW));
1256 
1257 	while (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
1258 		apic_ret();
1259 
1260 	/*
1261 	 * We may be booting, or resuming from suspend; aci_status will
1262 	 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
1263 	 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
1264 	 */
1265 	cpun = psm_get_cpu_id();
1266 	apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
1267 
1268 	apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
1269 	return (PSM_SUCCESS);
1270 }
1271 
1272 processorid_t
1273 apic_get_next_processorid(processorid_t cpu_id)
1274 {
1275 
1276 	int i;
1277 
1278 	if (cpu_id == -1)
1279 		return ((processorid_t)0);
1280 
1281 	for (i = cpu_id + 1; i < NCPU; i++) {
1282 		if (CPU_IN_SET(apic_cpumask, i))
1283 			return (i);
1284 	}
1285 
1286 	return ((processorid_t)-1);
1287 }
1288 
1289 
1290 /*
1291  * type == -1 indicates it is an internal request. Do not change
1292  * resv_vector for these requests
1293  */
1294 static int
1295 apic_get_ipivect(int ipl, int type)
1296 {
1297 	uchar_t vector;
1298 	int irq;
1299 
1300 	if (irq = apic_allocate_irq(APIC_VECTOR(ipl))) {
1301 		if (vector = apic_allocate_vector(ipl, irq, 1)) {
1302 			apic_irq_table[irq]->airq_mps_intr_index =
1303 			    RESERVE_INDEX;
1304 			apic_irq_table[irq]->airq_vector = vector;
1305 			if (type != -1) {
1306 				apic_resv_vector[ipl] = vector;
1307 			}
1308 			return (irq);
1309 		}
1310 	}
1311 	apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
1312 	return (-1);	/* shouldn't happen */
1313 }
1314 
1315 static int
1316 apic_getclkirq(int ipl)
1317 {
1318 	int	irq;
1319 
1320 	if ((irq = apic_get_ipivect(ipl, -1)) == -1)
1321 		return (-1);
1322 	/*
1323 	 * Note the vector in apic_clkvect for per clock handling.
1324 	 */
1325 	apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT;
1326 	APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n",
1327 	    apic_clkvect));
1328 	return (irq);
1329 }
1330 
1331 
1332 /*
1333  * Return the number of APIC clock ticks elapsed for 8245 to decrement
1334  * (APIC_TIME_COUNT + pit_ticks_adj) ticks.
1335  */
1336 static uint_t
1337 apic_calibrate(volatile uint32_t *addr, uint16_t *pit_ticks_adj)
1338 {
1339 	uint8_t		pit_tick_lo;
1340 	uint16_t	pit_tick, target_pit_tick;
1341 	uint32_t	start_apic_tick, end_apic_tick;
1342 	ulong_t		iflag;
1343 	uint32_t	reg;
1344 
1345 	reg = addr + APIC_CURR_COUNT - apicadr;
1346 
1347 	iflag = intr_clear();
1348 
1349 	do {
1350 		pit_tick_lo = inb(PITCTR0_PORT);
1351 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1352 	} while (pit_tick < APIC_TIME_MIN ||
1353 	    pit_tick_lo <= APIC_LB_MIN || pit_tick_lo >= APIC_LB_MAX);
1354 
1355 	/*
1356 	 * Wait for the 8254 to decrement by 5 ticks to ensure
1357 	 * we didn't start in the middle of a tick.
1358 	 * Compare with 0x10 for the wrap around case.
1359 	 */
1360 	target_pit_tick = pit_tick - 5;
1361 	do {
1362 		pit_tick_lo = inb(PITCTR0_PORT);
1363 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1364 	} while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1365 
1366 	start_apic_tick = apic_reg_ops->apic_read(reg);
1367 
1368 	/*
1369 	 * Wait for the 8254 to decrement by
1370 	 * (APIC_TIME_COUNT + pit_ticks_adj) ticks
1371 	 */
1372 	target_pit_tick = pit_tick - APIC_TIME_COUNT;
1373 	do {
1374 		pit_tick_lo = inb(PITCTR0_PORT);
1375 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1376 	} while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1377 
1378 	end_apic_tick = apic_reg_ops->apic_read(reg);
1379 
1380 	*pit_ticks_adj = target_pit_tick - pit_tick;
1381 
1382 	intr_restore(iflag);
1383 
1384 	return (start_apic_tick - end_apic_tick);
1385 }
1386 
1387 /*
1388  * Initialise the APIC timer on the local APIC of CPU 0 to the desired
1389  * frequency.  Note at this stage in the boot sequence, the boot processor
1390  * is the only active processor.
1391  * hertz value of 0 indicates a one-shot mode request.  In this case
1392  * the function returns the resolution (in nanoseconds) for the hardware
1393  * timer interrupt.  If one-shot mode capability is not available,
1394  * the return value will be 0. apic_enable_oneshot is a global switch
1395  * for disabling the functionality.
1396  * A non-zero positive value for hertz indicates a periodic mode request.
1397  * In this case the hardware will be programmed to generate clock interrupts
1398  * at hertz frequency and returns the resolution of interrupts in
1399  * nanosecond.
1400  */
1401 
1402 static int
1403 apic_clkinit(int hertz)
1404 {
1405 	uint_t		apic_ticks = 0;
1406 	uint_t		pit_ticks;
1407 	int		ret;
1408 	uint16_t	pit_ticks_adj;
1409 	static int	firsttime = 1;
1410 
1411 	if (firsttime) {
1412 		/* first time calibrate on CPU0 only */
1413 
1414 		apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
1415 		apic_reg_ops->apic_write(APIC_INIT_COUNT, APIC_MAXVAL);
1416 		apic_ticks = apic_calibrate(apicadr, &pit_ticks_adj);
1417 
1418 		/* total number of PIT ticks corresponding to apic_ticks */
1419 		pit_ticks = APIC_TIME_COUNT + pit_ticks_adj;
1420 
1421 		/*
1422 		 * Determine the number of nanoseconds per APIC clock tick
1423 		 * and then determine how many APIC ticks to interrupt at the
1424 		 * desired frequency
1425 		 * apic_ticks / (pitticks / PIT_HZ) = apic_ticks_per_s
1426 		 * (apic_ticks * PIT_HZ) / pitticks = apic_ticks_per_s
1427 		 * apic_ticks_per_ns = (apic_ticks * PIT_HZ) / (pitticks * 10^9)
1428 		 * pic_ticks_per_SFns =
1429 		 *   (SF * apic_ticks * PIT_HZ) / (pitticks * 10^9)
1430 		 */
1431 		apic_ticks_per_SFnsecs =
1432 		    ((SF * apic_ticks * PIT_HZ) /
1433 		    ((uint64_t)pit_ticks * NANOSEC));
1434 
1435 		/* the interval timer initial count is 32 bit max */
1436 		apic_nsec_max = APIC_TICKS_TO_NSECS(APIC_MAXVAL);
1437 		firsttime = 0;
1438 	}
1439 
1440 	if (hertz != 0) {
1441 		/* periodic */
1442 		apic_nsec_per_intr = NANOSEC / hertz;
1443 		apic_hertz_count = APIC_NSECS_TO_TICKS(apic_nsec_per_intr);
1444 	}
1445 
1446 	apic_int_busy_mark = (apic_int_busy_mark *
1447 	    apic_sample_factor_redistribution) / 100;
1448 	apic_int_free_mark = (apic_int_free_mark *
1449 	    apic_sample_factor_redistribution) / 100;
1450 	apic_diff_for_redistribution = (apic_diff_for_redistribution *
1451 	    apic_sample_factor_redistribution) / 100;
1452 
1453 	if (hertz == 0) {
1454 		/* requested one_shot */
1455 		if (!tsc_gethrtime_enable || !apic_oneshot_enable)
1456 			return (0);
1457 		apic_oneshot = 1;
1458 		ret = (int)APIC_TICKS_TO_NSECS(1);
1459 	} else {
1460 		/* program the local APIC to interrupt at the given frequency */
1461 		apic_reg_ops->apic_write(APIC_INIT_COUNT, apic_hertz_count);
1462 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1463 		    (apic_clkvect + APIC_BASE_VECT) | AV_TIME);
1464 		apic_oneshot = 0;
1465 		ret = NANOSEC / hertz;
1466 	}
1467 
1468 	return (ret);
1469 
1470 }
1471 
1472 /*
1473  * apic_preshutdown:
1474  * Called early in shutdown whilst we can still access filesystems to do
1475  * things like loading modules which will be required to complete shutdown
1476  * after filesystems are all unmounted.
1477  */
1478 static void
1479 apic_preshutdown(int cmd, int fcn)
1480 {
1481 	APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n",
1482 	    cmd, fcn, apic_poweroff_method, apic_enable_acpi));
1483 
1484 	if ((cmd != A_SHUTDOWN) || (fcn != AD_POWEROFF)) {
1485 		return;
1486 	}
1487 }
1488 
1489 static void
1490 apic_shutdown(int cmd, int fcn)
1491 {
1492 	int restarts, attempts;
1493 	int i;
1494 	uchar_t	byte;
1495 	ulong_t iflag;
1496 
1497 	/* Send NMI to all CPUs except self to do per processor shutdown */
1498 	iflag = intr_clear();
1499 	while (apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
1500 		apic_ret();
1501 	apic_shutdown_processors = 1;
1502 	apic_reg_ops->apic_write(APIC_INT_CMD1,
1503 	    AV_NMI | AV_LEVEL | AV_SH_ALL_EXCSELF);
1504 
1505 	/* restore cmos shutdown byte before reboot */
1506 	if (apic_cmos_ssb_set) {
1507 		outb(CMOS_ADDR, SSB);
1508 		outb(CMOS_DATA, 0);
1509 	}
1510 
1511 	ioapic_disable_redirection();
1512 
1513 	/*	disable apic mode if imcr present	*/
1514 	if (apic_imcrp) {
1515 		outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
1516 		outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_PIC);
1517 	}
1518 
1519 	apic_disable_local_apic();
1520 
1521 	intr_restore(iflag);
1522 
1523 	/* remainder of function is for shutdown cases only */
1524 	if (cmd != A_SHUTDOWN)
1525 		return;
1526 
1527 	/*
1528 	 * Switch system back into Legacy-Mode if using ACPI and
1529 	 * not powering-off.  Some BIOSes need to remain in ACPI-mode
1530 	 * for power-off to succeed (Dell Dimension 4600)
1531 	 */
1532 	if (apic_enable_acpi && (fcn != AD_POWEROFF))
1533 		(void) AcpiDisable();
1534 
1535 	/* remainder of function is for shutdown+poweroff case only */
1536 	if (fcn != AD_POWEROFF)
1537 		return;
1538 
1539 	switch (apic_poweroff_method) {
1540 		case APIC_POWEROFF_VIA_RTC:
1541 
1542 			/* select the extended NVRAM bank in the RTC */
1543 			outb(CMOS_ADDR, RTC_REGA);
1544 			byte = inb(CMOS_DATA);
1545 			outb(CMOS_DATA, (byte | EXT_BANK));
1546 
1547 			outb(CMOS_ADDR, PFR_REG);
1548 
1549 			/* for Predator must toggle the PAB bit */
1550 			byte = inb(CMOS_DATA);
1551 
1552 			/*
1553 			 * clear power active bar, wakeup alarm and
1554 			 * kickstart
1555 			 */
1556 			byte &= ~(PAB_CBIT | WF_FLAG | KS_FLAG);
1557 			outb(CMOS_DATA, byte);
1558 
1559 			/* delay before next write */
1560 			drv_usecwait(1000);
1561 
1562 			/* for S40 the following would suffice */
1563 			byte = inb(CMOS_DATA);
1564 
1565 			/* power active bar control bit */
1566 			byte |= PAB_CBIT;
1567 			outb(CMOS_DATA, byte);
1568 
1569 			break;
1570 
1571 		case APIC_POWEROFF_VIA_ASPEN_BMC:
1572 			restarts = 0;
1573 restart_aspen_bmc:
1574 			if (++restarts == 3)
1575 				break;
1576 			attempts = 0;
1577 			do {
1578 				byte = inb(MISMIC_FLAG_REGISTER);
1579 				byte &= MISMIC_BUSY_MASK;
1580 				if (byte != 0) {
1581 					drv_usecwait(1000);
1582 					if (attempts >= 3)
1583 						goto restart_aspen_bmc;
1584 					++attempts;
1585 				}
1586 			} while (byte != 0);
1587 			outb(MISMIC_CNTL_REGISTER, CC_SMS_GET_STATUS);
1588 			byte = inb(MISMIC_FLAG_REGISTER);
1589 			byte |= 0x1;
1590 			outb(MISMIC_FLAG_REGISTER, byte);
1591 			i = 0;
1592 			for (; i < (sizeof (aspen_bmc)/sizeof (aspen_bmc[0]));
1593 			    i++) {
1594 				attempts = 0;
1595 				do {
1596 					byte = inb(MISMIC_FLAG_REGISTER);
1597 					byte &= MISMIC_BUSY_MASK;
1598 					if (byte != 0) {
1599 						drv_usecwait(1000);
1600 						if (attempts >= 3)
1601 							goto restart_aspen_bmc;
1602 						++attempts;
1603 					}
1604 				} while (byte != 0);
1605 				outb(MISMIC_CNTL_REGISTER, aspen_bmc[i].cntl);
1606 				outb(MISMIC_DATA_REGISTER, aspen_bmc[i].data);
1607 				byte = inb(MISMIC_FLAG_REGISTER);
1608 				byte |= 0x1;
1609 				outb(MISMIC_FLAG_REGISTER, byte);
1610 			}
1611 			break;
1612 
1613 		case APIC_POWEROFF_VIA_SITKA_BMC:
1614 			restarts = 0;
1615 restart_sitka_bmc:
1616 			if (++restarts == 3)
1617 				break;
1618 			attempts = 0;
1619 			do {
1620 				byte = inb(SMS_STATUS_REGISTER);
1621 				byte &= SMS_STATE_MASK;
1622 				if ((byte == SMS_READ_STATE) ||
1623 				    (byte == SMS_WRITE_STATE)) {
1624 					drv_usecwait(1000);
1625 					if (attempts >= 3)
1626 						goto restart_sitka_bmc;
1627 					++attempts;
1628 				}
1629 			} while ((byte == SMS_READ_STATE) ||
1630 			    (byte == SMS_WRITE_STATE));
1631 			outb(SMS_COMMAND_REGISTER, SMS_GET_STATUS);
1632 			i = 0;
1633 			for (; i < (sizeof (sitka_bmc)/sizeof (sitka_bmc[0]));
1634 			    i++) {
1635 				attempts = 0;
1636 				do {
1637 					byte = inb(SMS_STATUS_REGISTER);
1638 					byte &= SMS_IBF_MASK;
1639 					if (byte != 0) {
1640 						drv_usecwait(1000);
1641 						if (attempts >= 3)
1642 							goto restart_sitka_bmc;
1643 						++attempts;
1644 					}
1645 				} while (byte != 0);
1646 				outb(sitka_bmc[i].port, sitka_bmc[i].data);
1647 			}
1648 			break;
1649 
1650 		case APIC_POWEROFF_NONE:
1651 
1652 			/* If no APIC direct method, we will try using ACPI */
1653 			if (apic_enable_acpi) {
1654 				if (acpi_poweroff() == 1)
1655 					return;
1656 			} else
1657 				return;
1658 
1659 			break;
1660 	}
1661 	/*
1662 	 * Wait a limited time here for power to go off.
1663 	 * If the power does not go off, then there was a
1664 	 * problem and we should continue to the halt which
1665 	 * prints a message for the user to press a key to
1666 	 * reboot.
1667 	 */
1668 	drv_usecwait(7000000); /* wait seven seconds */
1669 
1670 }
1671 
1672 /*
1673  * Try and disable all interrupts. We just assign interrupts to other
1674  * processors based on policy. If any were bound by user request, we
1675  * let them continue and return failure. We do not bother to check
1676  * for cache affinity while rebinding.
1677  */
1678 
1679 static int
1680 apic_disable_intr(processorid_t cpun)
1681 {
1682 	int bind_cpu = 0, i, hardbound = 0;
1683 	apic_irq_t *irq_ptr;
1684 	ulong_t iflag;
1685 
1686 	iflag = intr_clear();
1687 	lock_set(&apic_ioapic_lock);
1688 
1689 	for (i = 0; i <= APIC_MAX_VECTOR; i++) {
1690 		if (apic_reprogram_info[i].done == B_FALSE) {
1691 			if (apic_reprogram_info[i].bindcpu == cpun) {
1692 				/*
1693 				 * CPU is busy -- it's the target of
1694 				 * a pending reprogramming attempt
1695 				 */
1696 				lock_clear(&apic_ioapic_lock);
1697 				intr_restore(iflag);
1698 				return (PSM_FAILURE);
1699 			}
1700 		}
1701 	}
1702 
1703 	apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE;
1704 
1705 	apic_cpus[cpun].aci_curipl = 0;
1706 
1707 	i = apic_min_device_irq;
1708 	for (; i <= apic_max_device_irq; i++) {
1709 		/*
1710 		 * If there are bound interrupts on this cpu, then
1711 		 * rebind them to other processors.
1712 		 */
1713 		if ((irq_ptr = apic_irq_table[i]) != NULL) {
1714 			ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) ||
1715 			    (irq_ptr->airq_temp_cpu == IRQ_UNINIT) ||
1716 			    ((irq_ptr->airq_temp_cpu & ~IRQ_USER_BOUND) <
1717 			    apic_nproc));
1718 
1719 			if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) {
1720 				hardbound = 1;
1721 				continue;
1722 			}
1723 
1724 			if (irq_ptr->airq_temp_cpu == cpun) {
1725 				do {
1726 					bind_cpu = apic_next_bind_cpu++;
1727 					if (bind_cpu >= apic_nproc) {
1728 						apic_next_bind_cpu = 1;
1729 						bind_cpu = 0;
1730 
1731 					}
1732 				} while (apic_rebind_all(irq_ptr, bind_cpu));
1733 			}
1734 		}
1735 	}
1736 
1737 	lock_clear(&apic_ioapic_lock);
1738 	intr_restore(iflag);
1739 
1740 	if (hardbound) {
1741 		cmn_err(CE_WARN, "Could not disable interrupts on %d"
1742 		    "due to user bound interrupts", cpun);
1743 		return (PSM_FAILURE);
1744 	}
1745 	else
1746 		return (PSM_SUCCESS);
1747 }
1748 
1749 /*
1750  * Bind interrupts to the CPU's local APIC.
1751  * Interrupts should not be bound to a CPU's local APIC until the CPU
1752  * is ready to receive interrupts.
1753  */
1754 static void
1755 apic_enable_intr(processorid_t cpun)
1756 {
1757 	int	i;
1758 	apic_irq_t *irq_ptr;
1759 	ulong_t iflag;
1760 
1761 	iflag = intr_clear();
1762 	lock_set(&apic_ioapic_lock);
1763 
1764 	apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE;
1765 
1766 	i = apic_min_device_irq;
1767 	for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
1768 		if ((irq_ptr = apic_irq_table[i]) != NULL) {
1769 			if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) {
1770 				(void) apic_rebind_all(irq_ptr,
1771 				    irq_ptr->airq_cpu);
1772 			}
1773 		}
1774 	}
1775 
1776 	lock_clear(&apic_ioapic_lock);
1777 	intr_restore(iflag);
1778 }
1779 
1780 
1781 /*
1782  * This function will reprogram the timer.
1783  *
1784  * When in oneshot mode the argument is the absolute time in future to
1785  * generate the interrupt at.
1786  *
1787  * When in periodic mode, the argument is the interval at which the
1788  * interrupts should be generated. There is no need to support the periodic
1789  * mode timer change at this time.
1790  */
1791 static void
1792 apic_timer_reprogram(hrtime_t time)
1793 {
1794 	hrtime_t now;
1795 	uint_t ticks;
1796 	int64_t delta;
1797 
1798 	/*
1799 	 * We should be called from high PIL context (CBE_HIGH_PIL),
1800 	 * so kpreempt is disabled.
1801 	 */
1802 
1803 	if (!apic_oneshot) {
1804 		/* time is the interval for periodic mode */
1805 		ticks = APIC_NSECS_TO_TICKS(time);
1806 	} else {
1807 		/* one shot mode */
1808 
1809 		now = gethrtime();
1810 		delta = time - now;
1811 
1812 		if (delta <= 0) {
1813 			/*
1814 			 * requested to generate an interrupt in the past
1815 			 * generate an interrupt as soon as possible
1816 			 */
1817 			ticks = apic_min_timer_ticks;
1818 		} else if (delta > apic_nsec_max) {
1819 			/*
1820 			 * requested to generate an interrupt at a time
1821 			 * further than what we are capable of. Set to max
1822 			 * the hardware can handle
1823 			 */
1824 
1825 			ticks = APIC_MAXVAL;
1826 #ifdef DEBUG
1827 			cmn_err(CE_CONT, "apic_timer_reprogram, request at"
1828 			    "  %lld  too far in future, current time"
1829 			    "  %lld \n", time, now);
1830 #endif
1831 		} else
1832 			ticks = APIC_NSECS_TO_TICKS(delta);
1833 	}
1834 
1835 	if (ticks < apic_min_timer_ticks)
1836 		ticks = apic_min_timer_ticks;
1837 
1838 	apic_reg_ops->apic_write(APIC_INIT_COUNT, ticks);
1839 }
1840 
1841 /*
1842  * This function will enable timer interrupts.
1843  */
1844 static void
1845 apic_timer_enable(void)
1846 {
1847 	/*
1848 	 * We should be Called from high PIL context (CBE_HIGH_PIL),
1849 	 * so kpreempt is disabled.
1850 	 */
1851 
1852 	if (!apic_oneshot) {
1853 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1854 		    (apic_clkvect + APIC_BASE_VECT) | AV_TIME);
1855 	} else {
1856 		/* one shot */
1857 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1858 		    (apic_clkvect + APIC_BASE_VECT));
1859 	}
1860 }
1861 
1862 /*
1863  * This function will disable timer interrupts.
1864  */
1865 static void
1866 apic_timer_disable(void)
1867 {
1868 	/*
1869 	 * We should be Called from high PIL context (CBE_HIGH_PIL),
1870 	 * so kpreempt is disabled.
1871 	 */
1872 	apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1873 	    (apic_clkvect + APIC_BASE_VECT) | AV_MASK);
1874 }
1875 
1876 
1877 ddi_periodic_t apic_periodic_id;
1878 
1879 /*
1880  * If this module needs a periodic handler for the interrupt distribution, it
1881  * can be added here. The argument to the periodic handler is not currently
1882  * used, but is reserved for future.
1883  */
1884 static void
1885 apic_post_cyclic_setup(void *arg)
1886 {
1887 _NOTE(ARGUNUSED(arg))
1888 	/* cpu_lock is held */
1889 	/* set up a periodic handler for intr redistribution */
1890 
1891 	/*
1892 	 * In peridoc mode intr redistribution processing is done in
1893 	 * apic_intr_enter during clk intr processing
1894 	 */
1895 	if (!apic_oneshot)
1896 		return;
1897 	/*
1898 	 * Register a periodical handler for the redistribution processing.
1899 	 * On X86, CY_LOW_LEVEL is mapped to the level 2 interrupt, so
1900 	 * DDI_IPL_2 should be passed to ddi_periodic_add() here.
1901 	 */
1902 	apic_periodic_id = ddi_periodic_add(
1903 	    (void (*)(void *))apic_redistribute_compute, NULL,
1904 	    apic_redistribute_sample_interval, DDI_IPL_2);
1905 }
1906 
1907 static void
1908 apic_redistribute_compute(void)
1909 {
1910 	int	i, j, max_busy;
1911 
1912 	if (apic_enable_dynamic_migration) {
1913 		if (++apic_nticks == apic_sample_factor_redistribution) {
1914 			/*
1915 			 * Time to call apic_intr_redistribute().
1916 			 * reset apic_nticks. This will cause max_busy
1917 			 * to be calculated below and if it is more than
1918 			 * apic_int_busy, we will do the whole thing
1919 			 */
1920 			apic_nticks = 0;
1921 		}
1922 		max_busy = 0;
1923 		for (i = 0; i < apic_nproc; i++) {
1924 
1925 			/*
1926 			 * Check if curipl is non zero & if ISR is in
1927 			 * progress
1928 			 */
1929 			if (((j = apic_cpus[i].aci_curipl) != 0) &&
1930 			    (apic_cpus[i].aci_ISR_in_progress & (1 << j))) {
1931 
1932 				int	irq;
1933 				apic_cpus[i].aci_busy++;
1934 				irq = apic_cpus[i].aci_current[j];
1935 				apic_irq_table[irq]->airq_busy++;
1936 			}
1937 
1938 			if (!apic_nticks &&
1939 			    (apic_cpus[i].aci_busy > max_busy))
1940 				max_busy = apic_cpus[i].aci_busy;
1941 		}
1942 		if (!apic_nticks) {
1943 			if (max_busy > apic_int_busy_mark) {
1944 			/*
1945 			 * We could make the following check be
1946 			 * skipped > 1 in which case, we get a
1947 			 * redistribution at half the busy mark (due to
1948 			 * double interval). Need to be able to collect
1949 			 * more empirical data to decide if that is a
1950 			 * good strategy. Punt for now.
1951 			 */
1952 				if (apic_skipped_redistribute) {
1953 					apic_cleanup_busy();
1954 					apic_skipped_redistribute = 0;
1955 				} else {
1956 					apic_intr_redistribute();
1957 				}
1958 			} else
1959 				apic_skipped_redistribute++;
1960 		}
1961 	}
1962 }
1963 
1964 
1965 /*
1966  * The following functions are in the platform specific file so that they
1967  * can be different functions depending on whether we are running on
1968  * bare metal or a hypervisor.
1969  */
1970 
1971 /*
1972  * map an apic for memory-mapped access
1973  */
1974 uint32_t *
1975 mapin_apic(uint32_t addr, size_t len, int flags)
1976 {
1977 	/*LINTED: pointer cast may result in improper alignment */
1978 	return ((uint32_t *)psm_map_phys(addr, len, flags));
1979 }
1980 
1981 uint32_t *
1982 mapin_ioapic(uint32_t addr, size_t len, int flags)
1983 {
1984 	return (mapin_apic(addr, len, flags));
1985 }
1986 
1987 /*
1988  * unmap an apic
1989  */
1990 void
1991 mapout_apic(caddr_t addr, size_t len)
1992 {
1993 	psm_unmap_phys(addr, len);
1994 }
1995 
1996 void
1997 mapout_ioapic(caddr_t addr, size_t len)
1998 {
1999 	mapout_apic(addr, len);
2000 }
2001 
2002 /*
2003  * Check to make sure there are enough irq slots
2004  */
2005 int
2006 apic_check_free_irqs(int count)
2007 {
2008 	int i, avail;
2009 
2010 	avail = 0;
2011 	for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) {
2012 		if ((apic_irq_table[i] == NULL) ||
2013 		    apic_irq_table[i]->airq_mps_intr_index == FREE_INDEX) {
2014 			if (++avail >= count)
2015 				return (PSM_SUCCESS);
2016 		}
2017 	}
2018 	return (PSM_FAILURE);
2019 }
2020 
2021 /*
2022  * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
2023  */
2024 int
2025 apic_alloc_msi_vectors(dev_info_t *dip, int inum, int count, int pri,
2026     int behavior)
2027 {
2028 	int	rcount, i;
2029 	uchar_t	start, irqno;
2030 	uint32_t cpu;
2031 	major_t	major;
2032 	apic_irq_t	*irqptr;
2033 
2034 	DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: dip=0x%p "
2035 	    "inum=0x%x  pri=0x%x count=0x%x behavior=%d\n",
2036 	    (void *)dip, inum, pri, count, behavior));
2037 
2038 	if (count > 1) {
2039 		if (behavior == DDI_INTR_ALLOC_STRICT &&
2040 		    (apic_multi_msi_enable == 0 || count > apic_multi_msi_max))
2041 			return (0);
2042 
2043 		if (apic_multi_msi_enable == 0)
2044 			count = 1;
2045 		else if (count > apic_multi_msi_max)
2046 			count = apic_multi_msi_max;
2047 	}
2048 
2049 	if ((rcount = apic_navail_vector(dip, pri)) > count)
2050 		rcount = count;
2051 	else if (rcount == 0 || (rcount < count &&
2052 	    behavior == DDI_INTR_ALLOC_STRICT))
2053 		return (0);
2054 
2055 	/* if not ISP2, then round it down */
2056 	if (!ISP2(rcount))
2057 		rcount = 1 << (highbit(rcount) - 1);
2058 
2059 	mutex_enter(&airq_mutex);
2060 
2061 	for (start = 0; rcount > 0; rcount >>= 1) {
2062 		if ((start = apic_find_multi_vectors(pri, rcount)) != 0 ||
2063 		    behavior == DDI_INTR_ALLOC_STRICT)
2064 			break;
2065 	}
2066 
2067 	if (start == 0) {
2068 		/* no vector available */
2069 		mutex_exit(&airq_mutex);
2070 		return (0);
2071 	}
2072 
2073 	if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
2074 		/* not enough free irq slots available */
2075 		mutex_exit(&airq_mutex);
2076 		return (0);
2077 	}
2078 
2079 	major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0;
2080 	for (i = 0; i < rcount; i++) {
2081 		if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
2082 		    (uchar_t)-1) {
2083 			/*
2084 			 * shouldn't happen because of the
2085 			 * apic_check_free_irqs() check earlier
2086 			 */
2087 			mutex_exit(&airq_mutex);
2088 			DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
2089 			    "apic_allocate_irq failed\n"));
2090 			return (i);
2091 		}
2092 		apic_max_device_irq = max(irqno, apic_max_device_irq);
2093 		apic_min_device_irq = min(irqno, apic_min_device_irq);
2094 		irqptr = apic_irq_table[irqno];
2095 #ifdef	DEBUG
2096 		if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ)
2097 			DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
2098 			    "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
2099 #endif
2100 		apic_vector_to_irq[start + i] = (uchar_t)irqno;
2101 
2102 		irqptr->airq_vector = (uchar_t)(start + i);
2103 		irqptr->airq_ioapicindex = (uchar_t)inum;	/* start */
2104 		irqptr->airq_intin_no = (uchar_t)rcount;
2105 		irqptr->airq_ipl = pri;
2106 		irqptr->airq_vector = start + i;
2107 		irqptr->airq_origirq = (uchar_t)(inum + i);
2108 		irqptr->airq_share_id = 0;
2109 		irqptr->airq_mps_intr_index = MSI_INDEX;
2110 		irqptr->airq_dip = dip;
2111 		irqptr->airq_major = major;
2112 		if (i == 0) /* they all bound to the same cpu */
2113 			cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno,
2114 			    0xff, 0xff);
2115 		else
2116 			irqptr->airq_cpu = cpu;
2117 		DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: irq=0x%x "
2118 		    "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno,
2119 		    (void *)irqptr->airq_dip, irqptr->airq_vector,
2120 		    irqptr->airq_origirq, pri));
2121 	}
2122 	mutex_exit(&airq_mutex);
2123 	return (rcount);
2124 }
2125 
2126 /*
2127  * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
2128  */
2129 int
2130 apic_alloc_msix_vectors(dev_info_t *dip, int inum, int count, int pri,
2131     int behavior)
2132 {
2133 	int	rcount, i;
2134 	major_t	major;
2135 
2136 	if (count > 1) {
2137 		if (behavior == DDI_INTR_ALLOC_STRICT) {
2138 			if (count > apic_msix_max)
2139 				return (0);
2140 		} else if (count > apic_msix_max)
2141 			count = apic_msix_max;
2142 	}
2143 
2144 	mutex_enter(&airq_mutex);
2145 
2146 	if ((rcount = apic_navail_vector(dip, pri)) > count)
2147 		rcount = count;
2148 	else if (rcount == 0 || (rcount < count &&
2149 	    behavior == DDI_INTR_ALLOC_STRICT)) {
2150 		rcount = 0;
2151 		goto out;
2152 	}
2153 
2154 	if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
2155 		/* not enough free irq slots available */
2156 		rcount = 0;
2157 		goto out;
2158 	}
2159 
2160 	major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0;
2161 	for (i = 0; i < rcount; i++) {
2162 		uchar_t	vector, irqno;
2163 		apic_irq_t	*irqptr;
2164 
2165 		if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
2166 		    (uchar_t)-1) {
2167 			/*
2168 			 * shouldn't happen because of the
2169 			 * apic_check_free_irqs() check earlier
2170 			 */
2171 			DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
2172 			    "apic_allocate_irq failed\n"));
2173 			rcount = i;
2174 			goto out;
2175 		}
2176 		if ((vector = apic_allocate_vector(pri, irqno, 1)) == 0) {
2177 			/*
2178 			 * shouldn't happen because of the
2179 			 * apic_navail_vector() call earlier
2180 			 */
2181 			DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
2182 			    "apic_allocate_vector failed\n"));
2183 			rcount = i;
2184 			goto out;
2185 		}
2186 		apic_max_device_irq = max(irqno, apic_max_device_irq);
2187 		apic_min_device_irq = min(irqno, apic_min_device_irq);
2188 		irqptr = apic_irq_table[irqno];
2189 		irqptr->airq_vector = (uchar_t)vector;
2190 		irqptr->airq_ipl = pri;
2191 		irqptr->airq_origirq = (uchar_t)(inum + i);
2192 		irqptr->airq_share_id = 0;
2193 		irqptr->airq_mps_intr_index = MSIX_INDEX;
2194 		irqptr->airq_dip = dip;
2195 		irqptr->airq_major = major;
2196 		irqptr->airq_cpu = apic_bind_intr(dip, irqno, 0xff, 0xff);
2197 	}
2198 out:
2199 	mutex_exit(&airq_mutex);
2200 	return (rcount);
2201 }
2202 
2203 /*
2204  * Allocate a free vector for irq at ipl. Takes care of merging of multiple
2205  * IPLs into a single APIC level as well as stretching some IPLs onto multiple
2206  * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
2207  * requests and allocated only when pri is set.
2208  */
2209 uchar_t
2210 apic_allocate_vector(int ipl, int irq, int pri)
2211 {
2212 	int	lowest, highest, i;
2213 
2214 	highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK;
2215 	lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL;
2216 
2217 	if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
2218 		lowest -= APIC_VECTOR_PER_IPL;
2219 
2220 #ifdef	DEBUG
2221 	if (apic_restrict_vector)	/* for testing shared interrupt logic */
2222 		highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS;
2223 #endif /* DEBUG */
2224 	if (pri == 0)
2225 		highest -= APIC_HI_PRI_VECTS;
2226 
2227 	for (i = lowest; i < highest; i++) {
2228 		if (APIC_CHECK_RESERVE_VECTORS(i))
2229 			continue;
2230 		if (apic_vector_to_irq[i] == APIC_RESV_IRQ) {
2231 			apic_vector_to_irq[i] = (uchar_t)irq;
2232 			return (i);
2233 		}
2234 	}
2235 
2236 	return (0);
2237 }
2238 
2239 /* Mark vector as not being used by any irq */
2240 void
2241 apic_free_vector(uchar_t vector)
2242 {
2243 	apic_vector_to_irq[vector] = APIC_RESV_IRQ;
2244 }
2245 
2246 uint32_t
2247 ioapic_read(int ioapic_ix, uint32_t reg)
2248 {
2249 	volatile uint32_t *ioapic;
2250 
2251 	ioapic = apicioadr[ioapic_ix];
2252 	ioapic[APIC_IO_REG] = reg;
2253 	return (ioapic[APIC_IO_DATA]);
2254 }
2255 
2256 void
2257 ioapic_write(int ioapic_ix, uint32_t reg, uint32_t value)
2258 {
2259 	volatile uint32_t *ioapic;
2260 
2261 	ioapic = apicioadr[ioapic_ix];
2262 	ioapic[APIC_IO_REG] = reg;
2263 	ioapic[APIC_IO_DATA] = value;
2264 }
2265 
2266 void
2267 ioapic_write_eoi(int ioapic_ix, uint32_t value)
2268 {
2269 	volatile uint32_t *ioapic;
2270 
2271 	ioapic = apicioadr[ioapic_ix];
2272 	ioapic[APIC_IO_EOI] = value;
2273 }
2274 
2275 static processorid_t
2276 apic_find_cpu(int flag)
2277 {
2278 	processorid_t acid = 0;
2279 	int i;
2280 
2281 	/* Find the first CPU with the passed-in flag set */
2282 	for (i = 0; i < apic_nproc; i++) {
2283 		if (apic_cpus[i].aci_status & flag) {
2284 			acid = i;
2285 			break;
2286 		}
2287 	}
2288 
2289 	ASSERT((apic_cpus[acid].aci_status & flag) != 0);
2290 	return (acid);
2291 }
2292 
2293 /*
2294  * Call rebind to do the actual programming.
2295  * Must be called with interrupts disabled and apic_ioapic_lock held
2296  * 'p' is polymorphic -- if this function is called to process a deferred
2297  * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
2298  * the irq pointer is retrieved.  If not doing deferred reprogramming,
2299  * p is of the type 'apic_irq_t *'.
2300  *
2301  * apic_ioapic_lock must be held across this call, as it protects apic_rebind
2302  * and it protects apic_find_cpu() from a race in which a CPU can be taken
2303  * offline after a cpu is selected, but before apic_rebind is called to
2304  * bind interrupts to it.
2305  */
2306 int
2307 apic_setup_io_intr(void *p, int irq, boolean_t deferred)
2308 {
2309 	apic_irq_t *irqptr;
2310 	struct ioapic_reprogram_data *drep = NULL;
2311 	int rv;
2312 
2313 	if (deferred) {
2314 		drep = (struct ioapic_reprogram_data *)p;
2315 		ASSERT(drep != NULL);
2316 		irqptr = drep->irqp;
2317 	} else
2318 		irqptr = (apic_irq_t *)p;
2319 
2320 	ASSERT(irqptr != NULL);
2321 
2322 	rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, drep);
2323 	if (rv) {
2324 		/*
2325 		 * CPU is not up or interrupts are disabled. Fall back to
2326 		 * the first available CPU
2327 		 */
2328 		rv = apic_rebind(irqptr, apic_find_cpu(APIC_CPU_INTR_ENABLE),
2329 		    drep);
2330 	}
2331 
2332 	return (rv);
2333 }
2334 
2335 
2336 uchar_t
2337 apic_modify_vector(uchar_t vector, int irq)
2338 {
2339 	apic_vector_to_irq[vector] = (uchar_t)irq;
2340 	return (vector);
2341 }
2342 
2343 char *
2344 apic_get_apic_type()
2345 {
2346 	return (apic_psm_info.p_mach_idstring);
2347 }
2348 
2349 void
2350 x2apic_update_psm()
2351 {
2352 	struct psm_ops *pops = &apic_ops;
2353 
2354 	ASSERT(pops != NULL);
2355 
2356 	pops->psm_send_ipi =  x2apic_send_ipi;
2357 	pops->psm_intr_exit = x2apic_intr_exit;
2358 	pops->psm_setspl = x2apic_setspl;
2359 
2360 	/* global functions */
2361 	send_dirintf = pops->psm_send_ipi;
2362 }
2363